diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 0e4f995f697..00000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,228 +0,0 @@ -branches: - only: - - master - -environment: - - matrix: - - # For Python versions available on Appveyor, see - # http://www.appveyor.com/docs/installed-software#python - # The list here is complete at the time of writing. - - #- PYTHON_VERSION: 2.7 - # PYTHON: "C:\\Miniconda-x64" - # CATEGORY: "nightly" - - #- PYTHON_VERSION: 3.4 - # PYTHON: "C:\\Miniconda34-x64" - # CATEGORY: "nightly" - - #- PYTHON_VERSION: 3.5 - # PYTHON: "C:\\Miniconda35-x64" - # CATEGORY: "nightly" - - #- PYTHON_VERSION: 3.6 - # PYTHON: "C:\\Miniconda36-x64" - # CATEGORY: "nightly" - - - PYTHON_VERSION: 2.7 - PYTHON: "C:\\Miniconda" - CATEGORY: "nightly" - EXTRAS: YES - - #- PYTHON_VERSION: 3.4 - # PYTHON: "C:\\Miniconda34-x64" - # CATEGORY: "nightly" - # EXTRAS: YES - - - PYTHON_VERSION: 3.5 - PYTHON: "C:\\Miniconda35" - CATEGORY: "nightly" - EXTRAS: YES - - - PYTHON_VERSION: 3.6 - PYTHON: "C:\\Miniconda36" - CATEGORY: "nightly" - # [200316]: disable extras because of installation dependency - # issues on appveyor - #EXTRAS: YES - - - PYTHON_VERSION: 3.7 - PYTHON: "C:\\Miniconda37" - CATEGORY: "nightly" - # [191115]: disable extras because of installation dependency - # issues with Miniconda 3.7 on appveyor - #EXTRAS: YES - - -install: - - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PYTHON%\\Library\\bin;%PATH%" - - "where python" - - "where pip" - - python --version - - "SET PIP=%PYTHON%\\Scripts\\pip" - - "%PIP% --version" - # - # Set standardized ways to invoke conda for the various channels. We - # are seeing strange issues where conda-forge and cachemeorg are - # fighting with anaconda over the version of core packages (notably, - # conda). The following prevents conda-forge and cacheme.org from - # overriding anaconda. - # - - SET CONDA_INSTALL=conda install -q -y - - "SET ANACONDA=%CONDA_INSTALL% -c anaconda" - - "SET CONDAFORGE=%CONDA_INSTALL% -c conda-forge --no-update-deps" - # - # Determine if we will use Appveyor's Miniconda or install Anaconda - # (intermittently one or the other suffers from NumPy failing to load the - # MKL DLL; See #542, #577 - # - - SET USING_MINICONDA=1 - # - # Update conda, then force it to NOT update itself again - # - # Somehow, the update from anaconda stalls for Python 3.4. So we're not specifying the channel here. - # - - conda config --set always_yes yes - #- conda update -q -y conda - - conda config --set auto_update_conda false - # - # If we are using full Anaconda instead of Appveyor's MiniConda, - # install it - # - - IF NOT DEFINED USING_MINICONDA (conda install anaconda) - # - # Create a virtual environment for this build - # - #- conda create -n pyomo_test_env python=%PYTHON_VERSION% - #- activate pyomo_test_env - #- "SET CONDAENV=%PYTHON%\\envs\\pyomo_test_env" - - "echo %PATH%" - # - - "SET ADDITIONAL_CF_PKGS=setuptools coverage sphinx_rtd_theme" - # - # Install extra packages (formerly pyomo.extras) - # - # If we are using Miniconda, we need to install additional packages - # that usually come with the full Anaconda distribution - # - - SET MINICONDA_EXTRAS="" - - IF DEFINED USING_MINICONDA (SET MINICONDA_EXTRAS=numpy scipy ipython openpyxl sympy pyodbc pyyaml networkx xlrd pandas matplotlib dill seaborn) - # - - "IF DEFINED EXTRAS (SET ADDITIONAL_CF_PKGS=%ADDITIONAL_CF_PKGS% pymysql pyro4 pint pathos %MINICONDA_EXTRAS%)" - #- "IF DEFINED EXTRAS (%CONDAFORGE% mkl)" - # - # Finally, add any solvers we want to the list - # - - "SET ADDITIONAL_CF_PKGS=%ADDITIONAL_CF_PKGS% glpk ipopt" - # - # ...and install everything from conda-force in one go - # - - "%CONDAFORGE% %ADDITIONAL_CF_PKGS%" - # - # While we would like to install codecov using conda (for - # consistency), there are cases (most recently, in Python 3.5) where - # the installation is not reliable and codecov is not available after - # being installed. - # - - "python -m pip install --upgrade pip" - - "%PIP% --version" - - "%PIP% install codecov" - # - # Install GAMS - # - - ps: Start-FileDownload 'https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0/windows/windows_x64_64.exe' - - windows_x64_64.exe /SP- /VERYSILENT /NORESTART /DIR=.\gams /NOICONS - - "SET cwd=%cd%" - - "cd gams\\apifiles\\Python" - - IF PYTHON_VERSION equ 2.7 (cd api \ python setup.py install ) - - IF PYTHON_VERSION equ 3.6 (cd api_36 \ python setup.py install ) - - IF PYTHON_VERSION equ 3.7 (cd api_37 \ python setup.py install ) - - "cd %cwd%" - # - # Add GAMS to PATH - # - - "SET PATH=%cd%\\gams;%PATH%" - # - # Clone but don't install pyomo-model-libraries - # - - "git clone https://github.com/Pyomo/pyomo-model-libraries.git" - - "%PIP% install git+https://github.com/PyUtilib/pyutilib" - - "python setup.py develop" - - # Set up python's coverage for covering subprocesses (important to do - # here because we want coverage of the download scripts below) - # - - "SET BUILD_DIR=%cd%" - - "SET COVERAGE_PROCESS_START=%BUILD_DIR%\\coveragerc" - - "copy %BUILD_DIR%\\.coveragerc %COVERAGE_PROCESS_START%" - - "echo data_file=%BUILD_DIR%\\.coverage >> %COVERAGE_PROCESS_START%" - - python -c "from distutils.sysconfig import get_python_lib; import os; FILE=open(os.path.join(get_python_lib(),'run_coverage_at_startup.pth'), 'w'); FILE.write('import coverage; coverage.process_startup()'); FILE.close()" - - # Configure Pyomo to put the configuration directory here (so that it - # is both writable, and will be cleared between test runs - - "SET PYOMO_CONFIG_DIR=%BUILD_DIR%\\config" - - # Fetch additional solvers - # - - "pyomo download-extensions" - - # Report relevant package versions - # - - "glpsol -v" - - "ipopt -v" - - python --version - -build: off - - -test_script: - # Put your test command here. - # If you don't need to build C extensions on 64-bit Python 3.3 or 3.4, - # you can remove "build.cmd" from the front of the command, as it's - # only needed to support those cases. - # Note that you must use the environment variable %PYTHON% to refer to - # the interpreter you're using - Appveyor does not do anything special - # to put the Python evrsion you want to use on PATH. - # - # This block of commands enable tracking of coverage for any - # subprocesses launched by tests - - "SET BUILD_DIR=%cd%" - - "SET COVERAGE_PROCESS_START=%BUILD_DIR%\\coveragerc" - # Configure Pyomo to put the configuration directory here (so that it - # is both writable, and will be cleared between test runs - - "SET PYOMO_CONFIG_DIR=%BUILD_DIR%\\config" - - # Run Pyomo tests - - "test.pyomo -v --cat=%CATEGORY% pyomo %BUILD_DIR%\\pyomo-model-libraries" - - # Run documentation tests - #- "nosetests -v --with-doctest --doctest-extension=.rst doc\\OnlineDocs" - - -#after_test: - # This step builds your wheels. - # Again, you only need build.cmd if you're building C extensions for - # 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct - # interpreter - #- "build.cmd %PYTHON%\\python.exe setup.py bdist_wheel" - - -#artifacts: - # bdist_wheel puts your built wheel in the dist directory - #- path: dist\* - - -on_success: - # You can use this step to upload your artifacts to a public website. - # See Appveyor's documentation for more details. Or you can simply - # access your wheels from the Appveyor "artifacts" tab for your build. - # - # Combine coverage reports over all subprocesses - - "cd %BUILD_DIR%" - - dir .cov* - - "coverage combine %BUILD_DIR%" - # On some appveyor platforms, the codecov script does not appear to be - # in the PATH. We will directly import the module (installed above) - - python -m codecov -X gcov diff --git a/.codecov.yml b/.codecov.yml index b2b447d21d4..39efc7e8fd5 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -12,3 +12,8 @@ coverage: threshold: 0% # ci: # - !ci.appveyor.com +codecov: + notify: + # GHA: 18, Travis: 13, Jenkins: 6 + after_n_builds: 33 + wait_for_ci: yes diff --git a/.coveragerc b/.coveragerc index e7d46592c37..34b0503f183 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,9 +1,6 @@ [report] omit = - */python?.?/* - */site-packages/nose/* - *__init__* - */setup.py + setup.py */tests/* */tmp/* @@ -11,3 +8,9 @@ omit = # "data_file" directive to the end of this file. [run] parallel = True +source = + pyomo + examples +omit = + # github actions creates a cache directory we don't want measured + cache/* diff --git a/.github/workflows/mpi_matrix_test.yml b/.github/workflows/mpi_matrix_test.yml deleted file mode 100644 index 2d306131206..00000000000 --- a/.github/workflows/mpi_matrix_test.yml +++ /dev/null @@ -1,164 +0,0 @@ -name: GitHub CI (mpi) - -on: - push: - branches: - - master - pull_request: - branches: - - master - -jobs: - build: - name: ${{ matrix.TARGET }}/py${{ matrix.python-version }} - runs-on: ${{ matrix.os }} - strategy: - max-parallel: 1 - matrix: - os: [ubuntu-latest] - python-version: [3.7] - include: - - os: ubuntu-latest - TARGET: linux - - steps: - - uses: actions/checkout@v2 - - name: Setup conda environment - uses: s-weigand/setup-conda@v1 - with: - update-conda: true - python-version: ${{ matrix.python-version }} - conda-channels: anaconda, conda-forge - - - name: Install dependencies - run: | - echo "" - echo "Install conda packages" - echo "" - conda install mpi4py - echo "" - echo "Upgrade pip..." - echo "" - python -m pip install --upgrade pip - echo "" - echo "Install Pyomo dependencies..." - echo "" - pip install cython numpy scipy ipython openpyxl sympy pyyaml \ - pyodbc networkx xlrd pandas matplotlib dill seaborn pymysql \ - pyro4 pint pathos coverage nose - echo "" - echo "Install CPLEX Community Edition..." - echo "" - pip install cplex || echo "CPLEX Community Edition is not available for ${{ matrix.python-version }}" - echo "" - echo "Install BARON..." - echo "" - if [ ${{ matrix.TARGET }} == 'osx' ]; then - wget -q https://www.minlp.com/downloads/xecs/baron/current/baron-osx64.zip -O baron_installer.zip - else - wget -q https://www.minlp.com/downloads/xecs/baron/current/baron-lin64.zip -O baron_installer.zip - fi - unzip -q baron_installer.zip - mv baron-* baron-dir - BARON_DIR=$(pwd)/baron-dir - export PATH=$PATH:$BARON_DIR - echo "" - echo "Install IDAES Ipopt..." - echo "" - sudo apt-get install libopenblas-dev gfortran liblapack-dev - mkdir ipopt && cd ipopt - wget -q https://github.com/IDAES/idaes-ext/releases/download/2.0.0/idaes-solvers-ubuntu1804-64.tar.gz -O ipopt.tar.gz - tar -xzf ipopt.tar.gz - cd .. - export PATH=$PATH:$(pwd)/ipopt - echo "" - echo "Install GJH_ASL_JSON..." - echo "" - wget -q "https://codeload.github.com/ghackebeil/gjh_asl_json/zip/master" -O gjh_asl_json.zip - unzip -q gjh_asl_json.zip - rm -rf gjh_asl_json.zip - cd gjh_asl_json-master/Thirdparty - ./get.ASL - cd .. - make - export PATH=$PATH:$(pwd)/bin - cd .. - echo "" - echo "Install GAMS..." - echo "" - if [ ${{ matrix.TARGET }} == 'osx' ]; then - wget -q https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0/macosx/osx_x64_64_sfx.exe -O gams_installer.exe - else - wget -q https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0/linux/linux_x64_64_sfx.exe -O gams_installer.exe - fi - chmod +x gams_installer.exe - ./gams_installer.exe -q -d gams - GAMS_DIR=`ls -d1 $(pwd)/gams/*/ | head -1` - export PATH=$PATH:$GAMS_DIR - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GAMS_DIR - export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:$GAMS_DIR - cd $GAMS_DIR/apifiles/Python/ - py_ver=$(python -c 'import sys;print("%s%s" % sys.version_info[:2])') - gams_ver=api - for ver in api_*; do - if test ${ver:4} -le $py_ver; then - gams_ver=$ver - fi - done - cd $gams_ver - python setup.py -q install -noCheck - echo "" - echo "Pass key environment variables to subsequent steps" - echo "" - echo "::set-env name=PATH::$PATH" - echo "::set-env name=LD_LIBRARY_PATH::$LD_LIBRARY_PATH" - echo "::set-env name=DYLD_LIBRARY_PATH::$DYLD_LIBRARY_PATH" - - - name: Install Pyomo and extensions - run: | - echo "Clone Pyomo-model-libraries..." - git clone --quiet https://github.com/Pyomo/pyomo-model-libraries.git - echo "" - echo "Install PyUtilib..." - echo "" - pip install --quiet git+https://github.com/PyUtilib/pyutilib - echo "" - echo "Install Pyomo..." - echo "" - python setup.py develop - - - name: Set up coverage tracking - run: | - WORKSPACE=`pwd` - COVERAGE_PROCESS_START=${WORKSPACE}/coveragerc - echo "::set-env name=COVERAGE_PROCESS_START::$COVERAGE_PROCESS_START" - cp ${WORKSPACE}/.coveragerc ${COVERAGE_PROCESS_START} - echo "data_file=${WORKSPACE}/.coverage" >> ${COVERAGE_PROCESS_START} - SITE_PACKAGES=`python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"` - if [ -z "$DISABLE_COVERAGE" ]; then - echo 'import coverage; coverage.process_startup()' \ - > ${SITE_PACKAGES}/run_coverage_at_startup.pth - fi - - - name: Download and install extensions - run: | - pyomo download-extensions - pyomo build-extensions - - - name: Run Pyomo tests - run: | - echo "Run Pyomo tests..." - # Manually invoke the DAT parser so that parse_table_datacmds.py is - # fully generated by a single process before invoking MPI - python -c "from pyomo.dataportal.parse_datacmds import parse_data_commands; parse_data_commands(data='')" - mpirun -np 3 --oversubscribe nosetests -v --eval-attr="mpi and (not fragile)" \ - pyomo `pwd`/pyomo-model-libraries - - - name: Upload coverage to codecov - env: - GITHUB_JOB_NAME: mpi/${{ matrix.TARGET }}/py${{ matrix.python-version }} - run: | - find . -maxdepth 10 -name ".cov*" - coverage combine - coverage report -i - bash <(curl -s https://codecov.io/bash) -X gcov -n "$GITHUB_JOB_NAME" diff --git a/.github/workflows/pr_master_test.yml b/.github/workflows/pr_master_test.yml new file mode 100644 index 00000000000..9adcbe52563 --- /dev/null +++ b/.github/workflows/pr_master_test.yml @@ -0,0 +1,411 @@ +name: GitHub CI + +on: + push: + branches: + - master + pull_request: + branches: + - master + +defaults: + run: + shell: bash -l {0} + +env: + PYTHONWARNINGS: ignore::UserWarning + PYTHON_BASE_PKGS: > + coverage cython dill ipython networkx nose openpyxl pathos + pint pymysql pyro4 pyyaml sphinx_rtd_theme sympy xlrd wheel + PYTHON_NUMPY_PKGS: > + numpy scipy pyodbc pandas matplotlib seaborn + +jobs: + pyomo-tests: + name: ${{ matrix.TARGET }}/${{ matrix.python }}${{ matrix.NAME }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python: [2.7, 3.5, 3.6, 3.7, 3.8, pypy2, pypy3] + mpi: [0] + include: + - os: ubuntu-latest + TARGET: linux + PYENV: pip + + - os: macos-latest + TARGET: osx + PYENV: pip + + - os: windows-latest + TARGET: win + PYENV: conda + PACKAGES: glpk + + - os: ubuntu-latest + python: 3.7 + mpi: 3 + TARGET: linux + PYENV: conda + PACKAGES: mpi4py + NAME: /mpi + + exclude: + - {os: macos-latest, python: pypy2} + - {os: macos-latest, python: pypy3} + - {os: windows-latest, python: pypy2} + - {os: windows-latest, python: pypy3} + + + steps: + - uses: actions/checkout@v2 + + # Ideally we would cache the conda downloads; however, each cache is + # over 850MB, and with 5 python versions, that would consume 4.2 of + # the 5 GB GitHub allows. + # + #- name: Conda package cache + # uses: actions/cache@v1 + # if: matrix.PYENV == 'conda' + # id: conda-cache + # with: + # path: cache/conda + # key: conda-v2-${{runner.os}}-${{matrix.python}} + + - name: Pip package cache + uses: actions/cache@v1 + if: matrix.PYENV == 'pip' + id: pip-cache + with: + path: cache/pip + key: pip-v2-${{runner.os}}-${{matrix.python}} + + - name: OS package cache + uses: actions/cache@v1 + id: os-cache + with: + path: cache/os + key: pkg-v2-${{runner.os}} + + - name: TPL package download cache + uses: actions/cache@v1 + id: download-cache + with: + path: cache/download + key: download-v3-${{runner.os}} + + - name: Update OSX + if: matrix.TARGET == 'osx' + run: | + mkdir -p ${GITHUB_WORKSPACE}/cache/os + export HOMEBREW_CACHE=${GITHUB_WORKSPACE}/cache/os + brew update + # Notes: + # - install glpk + # - pyodbc needs: gcc pkg-config unixodbc freetds + for pkg in bash pkg-config unixodbc freetds glpk; do + brew list $pkg || brew install $pkg + done + #brew link --overwrite gcc + + - name: Update Linux + if: matrix.TARGET == 'linux' + run: | + mkdir -p ${GITHUB_WORKSPACE}/cache/os + # Notes: + # - install glpk + # - ipopt needs: libopenblas-dev gfortran liblapack-dev + sudo apt-get -o Dir::Cache=${GITHUB_WORKSPACE}/cache/os \ + install libopenblas-dev gfortran liblapack-dev glpk-utils + sudo chmod -R 777 ${GITHUB_WORKSPACE}/cache/os + + - name: Set up Python ${{ matrix.python }} + if: matrix.PYENV == 'pip' + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + + - name: Set up Miniconda Python ${{ matrix.python }} + if: matrix.PYENV == 'conda' + uses: goanpeca/setup-miniconda@v1 + with: + auto-update-conda: true + python-version: ${{ matrix.python }} + + # GitHub actions is very fragile when it comes to setting up various + # Python interpreters, expecially the setup-miniconda interface. + # Per the setup-miniconda documentation, it is important to always + # invoke bash as a login shell ('shell: bash -l {0}') so that the + # conda environment is properly activated. However, running within + # a login shell appears to foul up the link to python from + # setup-python. Further, we have anecdotal evidence that + # subprocesses invoked through $(python -c ...) and `python -c ...` + # will not pick up the python activated by setup-python on OSX. + # + # Our solution is to define a PYTHON_EXE environment variable that + # can be explicitly called within subprocess calls to reach the + # correct interpreter. Note that we must explicitly run in a *non* + # login shell to set up the environment variable for the + # setup-python environments. + + - name: Install Python Packages (pip) + if: matrix.PYENV == 'pip' + shell: bash + run: | + python -m pip install --cache-dir cache/pip --upgrade pip + # Note: pandas 1.0.3 causes gams 29.1.0 import to fail in python 3.8 + pip install --cache-dir cache/pip ${PYTHON_BASE_PKGS} \ + ${{matrix.PACKAGES}} + if [[ ${{matrix.python}} != pypy* ]]; then + # NumPy and derivatives either don't build under pypy, or if + # they do, the builds take forever. + pip install --cache-dir cache/pip ${PYTHON_NUMPY_PKGS} + fi + pip install --cache-dir cache/pip cplex \ + || echo "WARNING: CPLEX Community Edition is not available" + pip install --cache-dir cache/pip xpress \ + || echo "WARNING: Xpress Community Edition is not available" + python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \ + % (sys.executable,))' + + - name: Install Python packages (conda) + if: matrix.PYENV == 'conda' + run: | + mkdir -p $GITHUB_WORKSPACE/cache/conda + conda config --set always_yes yes + conda config --set auto_update_conda false + conda config --prepend pkgs_dirs $GITHUB_WORKSPACE/cache/conda + conda info + conda config --show-sources + conda list --show-channel-urls + conda install -q -y -c conda-forge ${PYTHON_BASE_PKGS} \ + ${PYTHON_NUMPY_PKGS} ${{matrix.PACKAGES}} + # Note: CPLEX 12.9 (the last version in conda that supports + # Python 2.7) causes a seg fault in the tests. + conda install -q -y -c ibmdecisionoptimization cplex=12.10 \ + || echo "WARNING: CPLEX Community Edition is not available" + conda install -q -y -c fico-xpress xpress \ + || echo "WARNING: Xpress Community Edition is not available" + python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \ + % (sys.executable,))' + + - name: Setup TPL package directories + run: | + TPL_DIR="${GITHUB_WORKSPACE}/cache/tpl" + mkdir -p "$TPL_DIR" + DOWNLOAD_DIR="${GITHUB_WORKSPACE}/cache/download" + mkdir -p "$DOWNLOAD_DIR" + echo "::set-env name=TPL_DIR::$TPL_DIR" + echo "::set-env name=DOWNLOAD_DIR::$DOWNLOAD_DIR" + + - name: Install Ipopt + run: | + IPOPT_DIR=$TPL_DIR/ipopt + echo "::add-path::$IPOPT_DIR" + mkdir -p $IPOPT_DIR + IPOPT_TAR=${DOWNLOAD_DIR}/ipopt.tar.gz + if test ! -e $IPOPT_TAR; then + echo "...downloading Ipopt" + URL=https://github.com/IDAES/idaes-ext/releases/download/2.0.0 + if test "${{matrix.TARGET}}" == osx; then + echo "IDAES Ipopt not available on OSX" + exit 0 + elif test "${{matrix.TARGET}}" == linux; then + curl --retry 8 -L $URL/idaes-solvers-ubuntu1804-64.tar.gz \ + > $IPOPT_TAR + else + curl --retry 8 -L $URL/idaes-solvers-windows-64.tar.gz \ + $URL/idaes-lib-windows-64.tar.gz > $IPOPT_TAR + fi + fi + cd $IPOPT_DIR + tar -xzi < $IPOPT_TAR + + - name: Install GAMS + # We install using Powershell because the GAMS installer hangs + # when launched from bash on Windows + shell: pwsh + run: | + $GAMS_DIR="${env:TPL_DIR}/gams" + echo "::add-path::$GAMS_DIR" + echo "::set-env name=LD_LIBRARY_PATH::${env:LD_LIBRARY_PATH}:$GAMS_DIR" + echo "::set-env name=DYLD_LIBRARY_PATH::${env:DYLD_LIBRARY_PATH}:$GAMS_DIR" + $INSTALLER="${env:DOWNLOAD_DIR}/gams_install.exe" + $URL="https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0" + if ( "${{matrix.TARGET}}" -eq "win" ) { + $URL = "$URL/windows/windows_x64_64.exe" + } elseif ( "${{matrix.TARGET}}" -eq "osx" ) { + $URL = "$URL/macosx/osx_x64_64_sfx.exe" + } else { + $URL = "$URL/linux/linux_x64_64_sfx.exe" + } + if (-not (Test-Path "$INSTALLER" -PathType Leaf)) { + echo "...downloading GAMS" + Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER" + } + echo "...installing GAMS" + if ( "${{matrix.TARGET}}" -eq "win" ) { + Start-Process -FilePath "$INSTALLER" -ArgumentList ` + "/SP- /NORESTART /VERYSILENT /DIR=$GAMS_DIR /NOICONS" ` + -Wait + } else { + chmod 777 $INSTALLER + Start-Process -FilePath "$INSTALLER" -ArgumentList ` + "-q -d $GAMS_DIR" -Wait + mv $GAMS_DIR/*/* $GAMS_DIR/. + } + + - name: Install GAMS Python bindings + run: | + GAMS_DIR="$TPL_DIR/gams" + py_ver=$($PYTHON_EXE -c 'import sys;v="_%s%s" % sys.version_info[:2] \ + ;print(v if v != "_27" else "")') + if test -e $GAMS_DIR/apifiles/Python/api$py_ver; then + echo "Installing GAMS Python bindings" + pushd $GAMS_DIR/apifiles/Python/api$py_ver + $PYTHON_EXE setup.py install + popd + fi + + - name: Install BARON + shell: pwsh + run: | + $BARON_DIR="${env:TPL_DIR}/baron" + echo "::add-path::$BARON_DIR" + $URL="https://www.minlp.com/downloads/xecs/baron/current/" + if ( "${{matrix.TARGET}}" -eq "win" ) { + $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.exe" + $URL += "baron-win64.exe" + } elseif ( "${{matrix.TARGET}}" -eq "osx" ) { + $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip" + $URL += "baron-osx64.zip" + } else { + $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip" + $URL += "baron-lin64.zip" + } + if (-not (Test-Path "$INSTALLER" -PathType Leaf)) { + echo "...downloading BARON ($URL)" + Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER" + } + echo "...installing BARON" + if ( "${{matrix.TARGET}}" -eq "win" ) { + Start-Process -FilePath "$INSTALLER" -ArgumentList ` + "/SP- /NORESTART /VERYSILENT /DIR=$BARON_DIR /NOICONS" ` + -Wait + } else { + unzip -q $INSTALLER + mv baron-* $BARON_DIR + } + + - name: Install GJH_ASL_JSON + if: matrix.TARGET != 'win' + run: | + GJH_DIR="$TPL_DIR/gjh" + echo "::add-path::${GJH_DIR}" + INSTALL_DIR="${DOWNLOAD_DIR}/gjh" + if test ! -e "$INSTALL_DIR/bin"; then + mkdir -p "$INSTALL_DIR" + INSTALLER="$INSTALL_DIR/gjh_asl_json.zip" + URL="https://codeload.github.com/ghackebeil/gjh_asl_json/zip/master" + curl --retry 8 -L $URL > $INSTALLER + cd $INSTALL_DIR + unzip -q $INSTALLER + cd gjh_asl_json-master/Thirdparty + ./get.ASL + cd .. + make + mv bin "$INSTALL_DIR/bin" + fi + cp -rp "$INSTALL_DIR/bin" "$GJH_DIR" + + - name: Install Pyomo and PyUtilib + run: | + echo "" + echo "Clone Pyomo-model-libraries..." + git clone https://github.com/Pyomo/pyomo-model-libraries.git + echo "" + echo "Install PyUtilib..." + echo "" + $PYTHON_EXE -m pip install git+https://github.com/PyUtilib/pyutilib + echo "" + echo "Install Pyomo..." + echo "" + $PYTHON_EXE setup.py develop + echo "" + echo "Set custom PYOMO_CONFIG_DIR" + echo "" + echo "::set-env name=PYOMO_CONFIG_DIR::${GITHUB_WORKSPACE}/config" + + - name: Set up coverage tracking + run: | + if test "${{matrix.TARGET}}" == win; then + COVERAGE_BASE=${GITHUB_WORKSPACE}\\.cover + else + COVERAGE_BASE=${GITHUB_WORKSPACE}/.cover + fi + COVERAGE_RC=${COVERAGE_BASE}_rc + echo "::set-env name=COVERAGE_RCFILE::$COVERAGE_RC" + echo "::set-env name=COVERAGE_PROCESS_START::$COVERAGE_RC" + cp ${GITHUB_WORKSPACE}/.coveragerc ${COVERAGE_RC} + echo "data_file=${COVERAGE_BASE}age" >> ${COVERAGE_RC} + SITE_PACKAGES=$($PYTHON_EXE -c "from distutils.sysconfig import \ + get_python_lib; print(get_python_lib())") + echo "Python site-packages: $SITE_PACKAGES" + echo 'import coverage; coverage.process_startup()' \ + > ${SITE_PACKAGES}/run_coverage_at_startup.pth + + - name: Download and install extensions + run: | + echo "" + echo "Pyomo download-extensions" + echo "" + pyomo download-extensions + echo "" + echo "Pyomo build-extensions" + echo "" + pyomo build-extensions --parallel 2 + + - name: Report pyomo plugin information + run: | + pyomo help --solvers || exit 1 + pyomo help --transformations || exit 1 + pyomo help --writers || exit 1 + + - name: Run Pyomo tests + if: matrix.mpi == 0 + run: | + test.pyomo -v --cat="nightly" pyomo `pwd`/pyomo-model-libraries + + - name: Run Pyomo MPI tests + if: matrix.mpi != 0 + run: | + # Manually invoke the DAT parser so that parse_table_datacmds.py + # is fully generated by a single process before invoking MPI + python -c "from pyomo.dataportal.parse_datacmds import \ + parse_data_commands; parse_data_commands(data='')" + mpirun -np ${{matrix.mpi}} --oversubscribe nosetests -v \ + --eval-attr="mpi and (not fragile)" \ + pyomo `pwd`/pyomo-model-libraries + + - name: Process code coverage report + env: + CODECOV_NAME: ${{matrix.TARGET}}/${{matrix.python}}${{matrix.NAME}} + run: | + coverage combine + coverage report -i + coverage xml -i + i=0 + while : ; do + curl --retry 8 -L https://codecov.io/bash -o codecov.sh + bash codecov.sh -Z -X gcov -f coverage.xml + if test $? == 0; then + break + elif test $i -ge 4; then + exit 1 + fi + DELAY=$(( RANDOM % 30 + 30)) + echo "Pausing $DELAY seconds before re-attempting upload" + sleep $DELAY + done diff --git a/.github/workflows/push_branch_test.yml b/.github/workflows/push_branch_test.yml index 1d14374c1e1..3e40bf528b4 100644 --- a/.github/workflows/push_branch_test.yml +++ b/.github/workflows/push_branch_test.yml @@ -1,114 +1,410 @@ -name: continuous-integration/github/push +name: GitHub Branch CI -on: push +on: + push: + branches-ignore: + - master + +defaults: + run: + shell: bash -l {0} + +env: + PYTHONWARNINGS: ignore::UserWarning + PYTHON_BASE_PKGS: > + coverage cython dill ipython networkx nose openpyxl pathos + pint pymysql pyro4 pyyaml sphinx_rtd_theme sympy xlrd wheel + PYTHON_NUMPY_PKGS: > + numpy scipy pyodbc pandas matplotlib seaborn jobs: - pyomo-linux-branch-test: - name: ${{ matrix.TARGET }}/py${{ matrix.python-version }} + pyomo-tests: + name: ${{ matrix.TARGET }}/${{ matrix.python }}${{ matrix.NAME }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] + python: [3.8] + mpi: [0] include: - - os: ubuntu-18.04 + - os: ubuntu-latest + TARGET: linux + PYENV: pip + + - os: macos-latest + python: 2.7 + TARGET: osx + PYENV: pip + + - os: windows-latest + python: 3.5 + TARGET: win + PYENV: conda + PACKAGES: glpk + + - os: ubuntu-latest + python: 3.7 + mpi: 3 TARGET: linux - python-version: [3.7] + PYENV: conda + PACKAGES: mpi4py + NAME: /mpi + + exclude: + - {os: macos-latest, python: pypy2} + - {os: macos-latest, python: pypy3} + - {os: windows-latest, python: pypy2} + - {os: windows-latest, python: pypy3} + steps: - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} + + # Ideally we would cache the conda downloads; however, each cache is + # over 850MB, and with 5 python versions, that would consume 4.2 of + # the 5 GB GitHub allows. + # + #- name: Conda package cache + # uses: actions/cache@v1 + # if: matrix.PYENV == 'conda' + # id: conda-cache + # with: + # path: cache/conda + # key: conda-v2-${{runner.os}}-${{matrix.python}} + + - name: Pip package cache + uses: actions/cache@v1 + if: matrix.PYENV == 'pip' + id: pip-cache + with: + path: cache/pip + key: pip-v2-${{runner.os}}-${{matrix.python}} + + - name: OS package cache + uses: actions/cache@v1 + id: os-cache + with: + path: cache/os + key: pkg-v2-${{runner.os}} + + - name: TPL package download cache + uses: actions/cache@v1 + id: download-cache + with: + path: cache/download + key: download-v3-${{runner.os}} + + - name: Update OSX + if: matrix.TARGET == 'osx' + run: | + mkdir -p ${GITHUB_WORKSPACE}/cache/os + export HOMEBREW_CACHE=${GITHUB_WORKSPACE}/cache/os + brew update + # Notes: + # - install glpk + # - pyodbc needs: gcc pkg-config unixodbc freetds + for pkg in bash pkg-config unixodbc freetds glpk; do + brew list $pkg || brew install $pkg + done + #brew link --overwrite gcc + + - name: Update Linux + if: matrix.TARGET == 'linux' + run: | + mkdir -p ${GITHUB_WORKSPACE}/cache/os + # Notes: + # - install glpk + # - ipopt needs: libopenblas-dev gfortran liblapack-dev + sudo apt-get -o Dir::Cache=${GITHUB_WORKSPACE}/cache/os \ + install libopenblas-dev gfortran liblapack-dev glpk-utils + sudo chmod -R 777 ${GITHUB_WORKSPACE}/cache/os + + - name: Set up Python ${{ matrix.python }} + if: matrix.PYENV == 'pip' uses: actions/setup-python@v1 with: - python-version: ${{ matrix.python-version }} - - name: Install Pyomo dependencies + python-version: ${{ matrix.python }} + + - name: Set up Miniconda Python ${{ matrix.python }} + if: matrix.PYENV == 'conda' + uses: goanpeca/setup-miniconda@v1 + with: + auto-update-conda: true + python-version: ${{ matrix.python }} + + # GitHub actions is very fragile when it comes to setting up various + # Python interpreters, expecially the setup-miniconda interface. + # Per the setup-miniconda documentation, it is important to always + # invoke bash as a login shell ('shell: bash -l {0}') so that the + # conda environment is properly activated. However, running within + # a login shell appears to foul up the link to python from + # setup-python. Further, we have anecdotal evidence that + # subprocesses invoked through $(python -c ...) and `python -c ...` + # will not pick up the python activated by setup-python on OSX. + # + # Our solution is to define a PYTHON_EXE environment variable that + # can be explicitly called within subprocess calls to reach the + # correct interpreter. Note that we must explicitly run in a *non* + # login shell to set up the environment variable for the + # setup-python environments. + + - name: Install Python Packages (pip) + if: matrix.PYENV == 'pip' + shell: bash run: | - echo "Upgrade pip..." - python -m pip install --upgrade pip - echo "" - echo "Install Pyomo dependencies..." - echo "" - pip install cython numpy scipy ipython openpyxl sympy pyyaml pyodbc networkx xlrd pandas matplotlib dill seaborn pymysql pyro4 pint pathos nose - echo "" - echo "Install CPLEX Community Edition..." - echo "" - pip install cplex || echo "CPLEX Community Edition is not available for ${{ matrix.python-version }}" - echo "" - echo "Install BARON..." - echo "" - if [ ${{ matrix.TARGET }} == 'osx' ]; then - wget -q https://www.minlp.com/downloads/xecs/baron/current/baron-osx64.zip -O baron_installer.zip - else - wget -q https://www.minlp.com/downloads/xecs/baron/current/baron-lin64.zip -O baron_installer.zip + python -m pip install --cache-dir cache/pip --upgrade pip + # Note: pandas 1.0.3 causes gams 29.1.0 import to fail in python 3.8 + pip install --cache-dir cache/pip ${PYTHON_BASE_PKGS} \ + ${{matrix.PACKAGES}} + if [[ ${{matrix.python}} != pypy* ]]; then + # NumPy and derivatives either don't build under pypy, or if + # they do, the builds take forever. + pip install --cache-dir cache/pip ${PYTHON_NUMPY_PKGS} fi - unzip -q baron_installer.zip - mv baron-* baron-dir - BARON_DIR=$(pwd)/baron-dir - export PATH=$PATH:$BARON_DIR - echo "" - echo "Install IDAES Ipopt..." + pip install --cache-dir cache/pip cplex \ + || echo "WARNING: CPLEX Community Edition is not available" + pip install --cache-dir cache/pip xpress \ + || echo "WARNING: Xpress Community Edition is not available" + python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \ + % (sys.executable,))' + + - name: Install Python packages (conda) + if: matrix.PYENV == 'conda' + run: | + mkdir -p $GITHUB_WORKSPACE/cache/conda + conda config --set always_yes yes + conda config --set auto_update_conda false + conda config --prepend pkgs_dirs $GITHUB_WORKSPACE/cache/conda + conda info + conda config --show-sources + conda list --show-channel-urls + conda install -q -y -c conda-forge ${PYTHON_BASE_PKGS} \ + ${PYTHON_NUMPY_PKGS} ${{matrix.PACKAGES}} + # Note: CPLEX 12.9 (the last version in conda that supports + # Python 2.7) causes a seg fault in the tests. + conda install -q -y -c ibmdecisionoptimization cplex=12.10 \ + || echo "WARNING: CPLEX Community Edition is not available" + conda install -q -y -c fico-xpress xpress \ + || echo "WARNING: Xpress Community Edition is not available" + python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \ + % (sys.executable,))' + + - name: Setup TPL package directories + run: | + TPL_DIR="${GITHUB_WORKSPACE}/cache/tpl" + mkdir -p "$TPL_DIR" + DOWNLOAD_DIR="${GITHUB_WORKSPACE}/cache/download" + mkdir -p "$DOWNLOAD_DIR" + echo "::set-env name=TPL_DIR::$TPL_DIR" + echo "::set-env name=DOWNLOAD_DIR::$DOWNLOAD_DIR" + + - name: Install Ipopt + run: | + IPOPT_DIR=$TPL_DIR/ipopt + echo "::add-path::$IPOPT_DIR" + mkdir -p $IPOPT_DIR + IPOPT_TAR=${DOWNLOAD_DIR}/ipopt.tar.gz + if test ! -e $IPOPT_TAR; then + echo "...downloading Ipopt" + URL=https://github.com/IDAES/idaes-ext/releases/download/2.0.0 + if test "${{matrix.TARGET}}" == osx; then + echo "IDAES Ipopt not available on OSX" + exit 0 + elif test "${{matrix.TARGET}}" == linux; then + curl --retry 8 -L $URL/idaes-solvers-ubuntu1804-64.tar.gz \ + > $IPOPT_TAR + else + curl --retry 8 -L $URL/idaes-solvers-windows-64.tar.gz \ + $URL/idaes-lib-windows-64.tar.gz > $IPOPT_TAR + fi + fi + cd $IPOPT_DIR + tar -xzi < $IPOPT_TAR + + - name: Install GAMS + # We install using Powershell because the GAMS installer hangs + # when launched from bash on Windows + shell: pwsh + run: | + $GAMS_DIR="${env:TPL_DIR}/gams" + echo "::add-path::$GAMS_DIR" + echo "::set-env name=LD_LIBRARY_PATH::${env:LD_LIBRARY_PATH}:$GAMS_DIR" + echo "::set-env name=DYLD_LIBRARY_PATH::${env:DYLD_LIBRARY_PATH}:$GAMS_DIR" + $INSTALLER="${env:DOWNLOAD_DIR}/gams_install.exe" + $URL="https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0" + if ( "${{matrix.TARGET}}" -eq "win" ) { + $URL = "$URL/windows/windows_x64_64.exe" + } elseif ( "${{matrix.TARGET}}" -eq "osx" ) { + $URL = "$URL/macosx/osx_x64_64_sfx.exe" + } else { + $URL = "$URL/linux/linux_x64_64_sfx.exe" + } + if (-not (Test-Path "$INSTALLER" -PathType Leaf)) { + echo "...downloading GAMS" + Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER" + } + echo "...installing GAMS" + if ( "${{matrix.TARGET}}" -eq "win" ) { + Start-Process -FilePath "$INSTALLER" -ArgumentList ` + "/SP- /NORESTART /VERYSILENT /DIR=$GAMS_DIR /NOICONS" ` + -Wait + } else { + chmod 777 $INSTALLER + Start-Process -FilePath "$INSTALLER" -ArgumentList ` + "-q -d $GAMS_DIR" -Wait + mv $GAMS_DIR/*/* $GAMS_DIR/. + } + + - name: Install GAMS Python bindings + run: | + GAMS_DIR="$TPL_DIR/gams" + py_ver=$($PYTHON_EXE -c 'import sys;v="_%s%s" % sys.version_info[:2] \ + ;print(v if v != "_27" else "")') + if test -e $GAMS_DIR/apifiles/Python/api$py_ver; then + echo "Installing GAMS Python bindings" + pushd $GAMS_DIR/apifiles/Python/api$py_ver + $PYTHON_EXE setup.py install + popd + fi + + - name: Install BARON + shell: pwsh + run: | + $BARON_DIR="${env:TPL_DIR}/baron" + echo "::add-path::$BARON_DIR" + $URL="https://www.minlp.com/downloads/xecs/baron/current/" + if ( "${{matrix.TARGET}}" -eq "win" ) { + $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.exe" + $URL += "baron-win64.exe" + } elseif ( "${{matrix.TARGET}}" -eq "osx" ) { + $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip" + $URL += "baron-osx64.zip" + } else { + $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip" + $URL += "baron-lin64.zip" + } + if (-not (Test-Path "$INSTALLER" -PathType Leaf)) { + echo "...downloading BARON ($URL)" + Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER" + } + echo "...installing BARON" + if ( "${{matrix.TARGET}}" -eq "win" ) { + Start-Process -FilePath "$INSTALLER" -ArgumentList ` + "/SP- /NORESTART /VERYSILENT /DIR=$BARON_DIR /NOICONS" ` + -Wait + } else { + unzip -q $INSTALLER + mv baron-* $BARON_DIR + } + + - name: Install GJH_ASL_JSON + if: matrix.TARGET != 'win' + run: | + GJH_DIR="$TPL_DIR/gjh" + echo "::add-path::${GJH_DIR}" + INSTALL_DIR="${DOWNLOAD_DIR}/gjh" + if test ! -e "$INSTALL_DIR/bin"; then + mkdir -p "$INSTALL_DIR" + INSTALLER="$INSTALL_DIR/gjh_asl_json.zip" + URL="https://codeload.github.com/ghackebeil/gjh_asl_json/zip/master" + curl --retry 8 -L $URL > $INSTALLER + cd $INSTALL_DIR + unzip -q $INSTALLER + cd gjh_asl_json-master/Thirdparty + ./get.ASL + cd .. + make + mv bin "$INSTALL_DIR/bin" + fi + cp -rp "$INSTALL_DIR/bin" "$GJH_DIR" + + - name: Install Pyomo and PyUtilib + run: | echo "" - sudo apt-get install libopenblas-dev gfortran liblapack-dev - mkdir ipopt_solver && cd ipopt_solver - wget -q https://github.com/IDAES/idaes-ext/releases/download/2.0.0/idaes-solvers-ubuntu1804-64.tar.gz -O ipopt.tar.gz - tar -xzf ipopt.tar.gz - cd .. - export PATH=$PATH:$(pwd)/ipopt_solver + echo "Clone Pyomo-model-libraries..." + git clone https://github.com/Pyomo/pyomo-model-libraries.git echo "" - echo "Install GJH_ASL_JSON..." + echo "Install PyUtilib..." echo "" - wget -q "https://codeload.github.com/ghackebeil/gjh_asl_json/zip/master" -O gjh_asl_json.zip - unzip -q gjh_asl_json.zip - rm -rf gjh_asl_json.zip - cd gjh_asl_json-master/Thirdparty - ./get.ASL - cd .. - make - export PATH=$PATH:$(pwd)/bin - cd .. + $PYTHON_EXE -m pip install git+https://github.com/PyUtilib/pyutilib echo "" - echo "Install GAMS..." + echo "Install Pyomo..." echo "" - wget -q https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0/linux/linux_x64_64_sfx.exe -O gams_installer.exe - chmod +x gams_installer.exe - ./gams_installer.exe -q -d gams - GAMS_DIR=`ls -d1 $(pwd)/gams/*/ | head -1` - cd gams/*/apifiles/Python/ - py_ver=$(python -c 'import sys;print("%s%s" % sys.version_info[:2])') - gams_ver=api - for ver in api_*; do - if test ${ver:4} -le $py_ver; then - gams_ver=$ver - fi - done - cd $gams_ver - python setup.py -q install -noCheck - export PATH=$PATH:$GAMS_DIR - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GAMS_DIR + $PYTHON_EXE setup.py develop echo "" - echo "Pass key environment variables to subsequent steps" + echo "Set custom PYOMO_CONFIG_DIR" echo "" - echo "::set-env name=PATH::$PATH" - echo "::set-env name=LD_LIBRARY_PATH::$LD_LIBRARY_PATH" + echo "::set-env name=PYOMO_CONFIG_DIR::${GITHUB_WORKSPACE}/config" - - name: Install Pyomo and extensions + - name: Set up coverage tracking + run: | + if test "${{matrix.TARGET}}" == win; then + COVERAGE_BASE=${GITHUB_WORKSPACE}\\.cover + else + COVERAGE_BASE=${GITHUB_WORKSPACE}/.cover + fi + COVERAGE_RC=${COVERAGE_BASE}_rc + echo "::set-env name=COVERAGE_RCFILE::$COVERAGE_RC" + echo "::set-env name=COVERAGE_PROCESS_START::$COVERAGE_RC" + cp ${GITHUB_WORKSPACE}/.coveragerc ${COVERAGE_RC} + echo "data_file=${COVERAGE_BASE}age" >> ${COVERAGE_RC} + SITE_PACKAGES=$($PYTHON_EXE -c "from distutils.sysconfig import \ + get_python_lib; print(get_python_lib())") + echo "Python site-packages: $SITE_PACKAGES" + echo 'import coverage; coverage.process_startup()' \ + > ${SITE_PACKAGES}/run_coverage_at_startup.pth + + - name: Download and install extensions run: | - echo "Clone Pyomo-model-libraries..." - git clone --quiet https://github.com/Pyomo/pyomo-model-libraries.git - echo "" - echo "Install PyUtilib..." - echo "" - pip install --quiet git+https://github.com/PyUtilib/pyutilib echo "" - echo "Install Pyomo..." + echo "Pyomo download-extensions" echo "" - python setup.py develop + pyomo download-extensions echo "" - echo "Download and install extensions..." + echo "Pyomo build-extensions" echo "" - pyomo download-extensions - pyomo build-extensions - - name: Run nightly tests with test.pyomo + pyomo build-extensions --parallel 2 + + - name: Report pyomo plugin information + run: | + pyomo help --solvers || exit 1 + pyomo help --transformations || exit 1 + pyomo help --writers || exit 1 + + - name: Run Pyomo tests + if: matrix.mpi == 0 run: | - echo "Run test.pyomo..." test.pyomo -v --cat="nightly" pyomo `pwd`/pyomo-model-libraries + + - name: Run Pyomo MPI tests + if: matrix.mpi != 0 + run: | + # Manually invoke the DAT parser so that parse_table_datacmds.py + # is fully generated by a single process before invoking MPI + python -c "from pyomo.dataportal.parse_datacmds import \ + parse_data_commands; parse_data_commands(data='')" + mpirun -np ${{matrix.mpi}} --oversubscribe nosetests -v \ + --eval-attr="mpi and (not fragile)" \ + pyomo `pwd`/pyomo-model-libraries + + - name: Process code coverage report + env: + CODECOV_NAME: ${{matrix.TARGET}}/${{matrix.python}}${{matrix.NAME}} + run: | + coverage combine + coverage report -i + coverage xml -i + i=0 + while : ; do + curl --retry 8 -L https://codecov.io/bash -o codecov.sh + bash codecov.sh -Z -X gcov -f coverage.xml + if test $? == 0; then + break + elif test $i -ge 4; then + exit 1 + fi + DELAY=$(( RANDOM % 30 + 30)) + echo "Pausing $DELAY seconds before re-attempting upload" + sleep $DELAY + done diff --git a/.github/workflows/release_wheel_creation.yml b/.github/workflows/release_wheel_creation.yml new file mode 100644 index 00000000000..f20d306dcc4 --- /dev/null +++ b/.github/workflows/release_wheel_creation.yml @@ -0,0 +1,110 @@ +name: Pyomo Release Distribution Creation + +on: + push: + tags: + - '*' + +jobs: + manylinux: + name: ${{ matrix.TARGET }}/wheel_creation + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + include: + - os: ubuntu-latest + TARGET: manylinux + python-version: [3.7] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install twine wheel setuptools + - name: Build manylinux Python wheels + uses: RalfG/python-wheels-manylinux-build@v0.2.2-manylinux2010_x86_64 + with: + python-versions: 'cp27-cp27mu cp35-cp35m cp36-cp36m cp37-cp37m cp38-cp38' + build-requirements: 'cython' + package-path: '' + pip-wheel-args: '' + # When locally testing, --no-deps flag is necessary (PyUtilib dependency will trigger an error otherwise) + - name: Delete linux wheels + run: | + sudo rm -rf wheelhouse/*-linux_x86_64.whl + - name: Upload artifact + uses: actions/upload-artifact@v1 + with: + name: manylinux-wheels + path: wheelhouse + osx: + name: ${{ matrix.TARGET }}py${{ matrix.python-version }}/wheel_creation + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [macos-latest] + include: + - os: macos-latest + TARGET: osx + python-version: [ 2.7, 3.5, 3.6, 3.7, 3.8 ] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install twine wheel setuptools cython + - name: Build OSX Python wheels + run: | + python setup.py --with-cython sdist --format=gztar bdist_wheel + + - name: Upload artifact + uses: actions/upload-artifact@v1 + with: + name: osx-wheels + path: dist + + + windows: + name: ${{ matrix.TARGET }}py${{ matrix.python-version }}/wheel_creation + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [windows-latest] + include: + - os: windows-latest + TARGET: win + python-version: [ 3.6, 3.7, 3.8 ] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + shell: pwsh + run: | + $env:PYTHONWARNINGS="ignore::UserWarning" + Invoke-Expression "python -m pip install --upgrade pip" + Invoke-Expression "pip install setuptools twine wheel cython" + - name: Build Windows Python wheels + shell: pwsh + run: | + $env:PYTHONWARNINGS="ignore::UserWarning" + Invoke-Expression "python setup.py --with-cython sdist --format=gztar bdist_wheel" + - name: Upload artifact + uses: actions/upload-artifact@v1 + with: + name: win-wheels + path: dist diff --git a/.github/workflows/unix_python_matrix_test.yml b/.github/workflows/unix_python_matrix_test.yml deleted file mode 100644 index c323b843527..00000000000 --- a/.github/workflows/unix_python_matrix_test.yml +++ /dev/null @@ -1,169 +0,0 @@ -name: GitHub CI (unix) - -on: - push: - branches: - - master - pull_request: - branches: - - master - -jobs: - pyomo-unix-tests: - name: ${{ matrix.TARGET }}/py${{ matrix.python-version }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [macos-latest, ubuntu-latest] - include: - - os: macos-latest - TARGET: osx - - os: ubuntu-latest - TARGET: linux - python-version: [3.5, 3.6, 3.7, 3.8] - - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: | - if [ ${{ matrix.TARGET }} == 'osx' ]; then - echo "Install pre-dependencies for pyodbc..." - brew update - brew list bash || brew install bash - brew list gcc || brew install gcc - brew link --overwrite gcc - brew list pkg-config || brew install pkg-config - brew list unixodbc || brew install unixodbc - brew list freetds || brew install freetds - fi - echo "" - echo "Upgrade pip..." - echo "" - python -m pip install --upgrade pip - echo "" - echo "Install Pyomo dependencies..." - echo "" - # Note: pandas 1.0.3 causes gams 29.1.0 import to fail in python 3.8 - pip install cython numpy scipy ipython openpyxl sympy pyyaml \ - pyodbc networkx xlrd pandas matplotlib dill seaborn pymysql \ - pyro4 pint pathos coverage nose - echo "" - echo "Install CPLEX Community Edition..." - echo "" - pip install cplex || echo "CPLEX Community Edition is not available for ${{ matrix.python-version }}" - echo "" - echo "Install BARON..." - echo "" - if [ ${{ matrix.TARGET }} == 'osx' ]; then - wget -q https://www.minlp.com/downloads/xecs/baron/current/baron-osx64.zip -O baron_installer.zip - else - wget -q https://www.minlp.com/downloads/xecs/baron/current/baron-lin64.zip -O baron_installer.zip - fi - unzip -q baron_installer.zip - mv baron-* baron-dir - BARON_DIR=$(pwd)/baron-dir - export PATH=$PATH:$BARON_DIR - echo "" - echo "Install IDAES Ipopt (Linux only)..." - echo "" - if [ ${{ matrix.TARGET }} == 'linux' ]; then - sudo apt-get install libopenblas-dev gfortran liblapack-dev - mkdir ipopt_solver && cd ipopt_solver - wget -q https://github.com/IDAES/idaes-ext/releases/download/2.0.0/idaes-solvers-ubuntu1804-64.tar.gz -O ipopt.tar.gz - tar -xzf ipopt.tar.gz - cd .. - export PATH=$PATH:$(pwd)/ipopt_solver - fi - echo "" - echo "Install GJH_ASL_JSON..." - echo "" - wget -q "https://codeload.github.com/ghackebeil/gjh_asl_json/zip/master" -O gjh_asl_json.zip - unzip -q gjh_asl_json.zip - rm -rf gjh_asl_json.zip - cd gjh_asl_json-master/Thirdparty - ./get.ASL - cd .. - make - export PATH=$PATH:$(pwd)/bin - cd .. - echo "" - echo "Install GAMS..." - echo "" - if [ ${{ matrix.TARGET }} == 'osx' ]; then - wget -q https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0/macosx/osx_x64_64_sfx.exe -O gams_installer.exe - else - wget -q https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0/linux/linux_x64_64_sfx.exe -O gams_installer.exe - fi - chmod +x gams_installer.exe - ./gams_installer.exe -q -d gams - GAMS_DIR=`ls -d1 $(pwd)/gams/*/ | head -1` - export PATH=$PATH:$GAMS_DIR - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GAMS_DIR - export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:$GAMS_DIR - cd $GAMS_DIR/apifiles/Python/ - py_ver=$(python -c 'import sys;print("%s%s" % sys.version_info[:2])') - gams_ver=api - for ver in api_*; do - if test ${ver:4} -le $py_ver; then - gams_ver=$ver - fi - done - cd $gams_ver - python setup.py -q install -noCheck - echo "" - echo "Pass key environment variables to subsequent steps" - echo "" - echo "::set-env name=PATH::$PATH" - echo "::set-env name=LD_LIBRARY_PATH::$LD_LIBRARY_PATH" - echo "::set-env name=DYLD_LIBRARY_PATH::$DYLD_LIBRARY_PATH" - - - name: Install Pyomo and extensions - run: | - echo "Clone Pyomo-model-libraries..." - git clone --quiet https://github.com/Pyomo/pyomo-model-libraries.git - echo "" - echo "Install PyUtilib..." - echo "" - pip install --quiet git+https://github.com/PyUtilib/pyutilib - echo "" - echo "Install Pyomo..." - echo "" - python setup.py develop - - - name: Set up coverage tracking - run: | - WORKSPACE=`pwd` - COVERAGE_PROCESS_START=${WORKSPACE}/coveragerc - echo "::set-env name=COVERAGE_PROCESS_START::$COVERAGE_PROCESS_START" - cp ${WORKSPACE}/.coveragerc ${COVERAGE_PROCESS_START} - echo "data_file=${WORKSPACE}/.coverage" >> ${COVERAGE_PROCESS_START} - SITE_PACKAGES=`python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"` - if [ -z "$DISABLE_COVERAGE" ]; then - echo 'import coverage; coverage.process_startup()' \ - > ${SITE_PACKAGES}/run_coverage_at_startup.pth - fi - - - name: Download and install extensions - run: | - pyomo download-extensions - pyomo build-extensions - - - name: Run Pyomo tests - run: | - echo "Run test.pyomo..." - test.pyomo -v --cat="nightly" pyomo `pwd`/pyomo-model-libraries - - - name: Upload coverage to codecov - env: - GITHUB_JOB_NAME: unix/${{ matrix.TARGET }}/py${{ matrix.python-version }} - run: | - find . -maxdepth 10 -name ".cov*" - coverage combine - coverage report -i - bash <(curl -s https://codecov.io/bash) -X gcov -n "$GITHUB_JOB_NAME" diff --git a/.github/workflows/win_python_matrix_test.yml b/.github/workflows/win_python_matrix_test.yml deleted file mode 100644 index 719d6b886a5..00000000000 --- a/.github/workflows/win_python_matrix_test.yml +++ /dev/null @@ -1,159 +0,0 @@ -name: GitHub CI (win) - -on: - pull_request: - branches: - - master - -jobs: - pyomo-tests: - name: win/py${{ matrix.python-version }} - runs-on: windows-latest - strategy: - fail-fast: false # This flag causes all of the matrix to continue to run, even if one matrix option fails - matrix: - python-version: [2.7, 3.5, 3.6, 3.7, 3.8] - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} with Miniconda - uses: goanpeca/setup-miniconda@v1 # Using an action created by user goanpeca to set up different Python Miniconda environments - with: - auto-update-conda: true - python-version: ${{ matrix.python-version }} - - name: Install Pyomo dependencies - shell: pwsh - run: | - $env:PYTHONWARNINGS="ignore::UserWarning" - Write-Host ("Current Enviroment variables: ") - gci env:Path | Sort Name - Write-Host ("") - Write-Host ("Update conda, then force it to NOT update itself again...") - Write-Host ("") - Invoke-Expression "conda config --set always_yes yes" - Invoke-Expression "conda config --set auto_update_conda false" - conda info - conda config --show-sources - conda list --show-channel-urls - Write-Host ("") - Write-Host ("Setting Conda Env Vars... ") - Write-Host ("") - $env:CONDA_INSTALL = "conda install -q -y " - $env:ANACONDA = $env:CONDA_INSTALL + " -c anaconda " - $env:CONDAFORGE = $env:CONDA_INSTALL + " -c conda-forge --no-update-deps " - $env:USING_MINICONDA = 1 - $env:ADDITIONAL_CF_PKGS="setuptools pip coverage sphinx_rtd_theme " - $env:MINICONDA_EXTRAS="" - $env:MINICONDA_EXTRAS="numpy scipy ipython openpyxl sympy pyodbc pyyaml networkx xlrd pandas matplotlib dill seaborn " - $env:ADDITIONAL_CF_PKGS=$env:ADDITIONAL_CF_PKGS + "pymysql pyro4 pint pathos " + $env:MINICONDA_EXTRAS - $env:ADDITIONAL_CF_PKGS=$env:ADDITIONAL_CF_PKGS + " glpk " - $env:EXP = $env:CONDAFORGE + $env:ADDITIONAL_CF_PKGS - Invoke-Expression $env:EXP - $env:CPLEX = $env:CONDAFORGE + "-c ibmdecisionoptimization cplex=12.10" - Write-Host ("") - Write-Host ("Try to install CPLEX...") - Write-Host ("") - try - { - Invoke-Expression $env:CPLEX - } - catch - { - Write-Host ("##########################################################################") - Write-Host ("WARNING: CPLEX Community Edition is not available for Python ${{ matrix.python-version }}") - Write-Host ("##########################################################################") - conda deactivate - conda activate test - } - $env:PYNUMERO = $env:CONDAFORGE + " pynumero_libraries" - Write-Host ("") - Write-Host ("Try to install Pynumero_libraries...") - Write-Host ("") - try - { - Invoke-Expression $env:PYNUMERO - } - catch - { - Write-Host ("##############################################################################") - Write-Host ("WARNING: Python ${{matrix.python-version}}: Pynumero_libraries not available. ") - Write-Host ("##############################################################################") - conda deactivate - conda activate test - } - conda list --show-channel-urls - Write-Host ("") - Write-Host ("Installing BARON") - Write-Host ("") - Invoke-WebRequest -Uri 'https://www.minlp.com/downloads/xecs/baron/current/baron-win64.exe' -OutFile 'baron-win64.exe' - Start-Process -FilePath 'baron-win64.exe' -ArgumentList '/SP- /VERYSILENT /NORESTART /DIR=.\bar_solver /NOICONS' -Wait - Write-Host ("") - Write-Host ("Installing IDAES Ipopt") - Write-Host ("") - New-Item -Path . -Name "ipopt_solver" -ItemType "directory" - cd ipopt_solver - Invoke-WebRequest -Uri 'https://github.com/IDAES/idaes-ext/releases/download/2.0.0/idaes-solvers-windows-64.tar.gz' -OutFile 'ipopt1.tar.gz' - Invoke-Expression 'tar -xzf ipopt1.tar.gz' - Invoke-WebRequest -Uri 'https://github.com/IDAES/idaes-ext/releases/download/2.0.0/idaes-lib-windows-64.tar.gz' -OutFile 'ipopt2.tar.gz' - Invoke-Expression 'tar -xzf ipopt2.tar.gz' - Remove-Item *.tar.gz -Force - cd .. - Write-Host ("") - Write-Host ("Installing GAMS") - Write-Host ("") - Invoke-WebRequest -Uri 'https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0/windows/windows_x64_64.exe' -OutFile 'windows_x64_64.exe' - Start-Process -FilePath 'windows_x64_64.exe' -ArgumentList '/SP- /VERYSILENT /NORESTART /DIR=.\gams /NOICONS' -Wait - cd gams\apifiles\Python\ - if(${{matrix.python-version}} -eq 2.7) { - cd api - python setup.py -q install - }elseif(${{matrix.python-version}} -eq 3.6) { - Write-Host ("PYTHON ${{matrix.python-version}}") - cd api_36 - python setup.py -q install - }elseif(${{matrix.python-version}} -eq 3.7) { - Write-Host ("PYTHON ${{matrix.python-version}}") - cd api_37 - python setup.py -q install -noCheck - }else { - Write-Host ("########################################################################") - Write-Host ("WARNING: Python ${{matrix.python-version}}: GAMS Bindings not supported.") - Write-Host ("########################################################################") - } - cd $env:CWD - Remove-Item *.exe -Force - Write-Host ("") - Write-Host ("New Shell Environment: ") - gci env: | Sort Name - - - name: Install Pyomo and extensions - shell: pwsh - run: | - $env:PYTHONWARNINGS="ignore::UserWarning" - Write-Host ("") - Write-Host ("Clone model library and install PyUtilib...") - Write-Host ("") - git clone --quiet https://github.com/Pyomo/pyomo-model-libraries.git - git clone --quiet https://github.com/PyUtilib/pyutilib.git - cd pyutilib - python setup.py develop - cd .. - Write-Host ("") - Write-Host ("Install Pyomo...") - Write-Host ("") - python setup.py develop - Write-Host ("") - Write-Host "Pyomo download-extensions" - Write-Host ("") - Invoke-Expression "pyomo download-extensions" - - - name: Run nightly tests with test.pyomo - shell: pwsh - run: | - $env:PYTHONWARNINGS="ignore::UserWarning" - Write-Host "Setup and run nosetests" - $env:BUILD_DIR = $(Get-Location).Path - $env:PATH += ';' + $(Get-Location).Path + "\gams" - $env:PATH += ';' + $(Get-Location).Path + "\ipopt_solver" - $env:PATH += ';' + $(Get-Location).Path + "\bar_solver" - $env:EXP = "test.pyomo -v --cat='nightly' pyomo " + $env:BUILD_DIR + "\pyomo-model-libraries" - Invoke-Expression $env:EXP diff --git a/.jenkins.sh b/.jenkins.sh index 893af305425..7f716779c6b 100644 --- a/.jenkins.sh +++ b/.jenkins.sh @@ -102,8 +102,8 @@ if test -z "$MODE" -o "$MODE" == setup; then # Set up coverage for this build export COVERAGE_PROCESS_START=${WORKSPACE}/coveragerc cp ${WORKSPACE}/pyomo/.coveragerc ${COVERAGE_PROCESS_START} - echo "source=${WORKSPACE}/pyomo" >> ${COVERAGE_PROCESS_START} - echo "data_file=${WORKSPACE}/pyomo/.coverage" >> ${COVERAGE_PROCESS_START} + echo "data_file=${WORKSPACE}/pyomo/.coverage" \ + >> ${COVERAGE_PROCESS_START} echo 'import coverage; coverage.process_startup()' \ > "${LOCAL_SITE_PACKAGES}/run_coverage_at_startup.pth" fi @@ -193,7 +193,7 @@ if test -z "$MODE" -o "$MODE" == test; then | tee .cover.upload if test $? == 0 -a `grep -i error .cover.upload | wc -l` -eq 0; then break - elif test $i -ge 3; then + elif test $i -ge 4; then exit 1 fi DELAY=$(( RANDOM % 30 + 15)) diff --git a/.travis.yml b/.travis.yml index 928e75da88c..0e61f8f3419 100644 --- a/.travis.yml +++ b/.travis.yml @@ -104,9 +104,25 @@ after_success: # Combine coverage reports over all subprocesses and upload - ${DOC} find . -maxdepth 10 -name ".cov*" - ${DOC} coverage combine - - ${DOC} codecov --env TAG -X gcov - # Trigger PyomoGallery build, but only when building the master branch - # Note: this is disabled unless a token is injected through an - # environment variable + - ${DOC} coverage report -i + - ${DOC} coverage xml -i + - | + i=0 + while : ; do + i=$[$i+1] + echo "Uploading coverage to codecov (attempt $i)" + ${DOC} codecov --env TAG -X gcov -X s3 + if test $? == 0; then + break + elif test $i -ge 4; then + exit 1 + fi + DELAY=$(( RANDOM % 30 + 30)) + echo "Pausing $DELAY seconds before re-attempting upload" + sleep $DELAY + done + # Trigger PyomoGallery build, but only when building the master branch + # Note: this is disabled unless a token is injected through an + # environment variable - "if [ -n \"${SECRET_TRAVIS_TOKEN}\" -a -n \"${KEY_JOB}\" -a \"${TRAVIS_PULL_REQUEST}\" == false ]; then curl -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Travis-API-Version: 3' -H 'Authorization: token ${SECRET_TRAVIS_TOKEN}' -d '{\"request\": {\"branch\": \"master\"}}' https://api.travis-ci.org/repo/Pyomo%2FPyomoGallery/requests; fi" diff --git a/README.md b/README.md index 28f716baa82..dea07ed9db3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ - +[![Github Actions Status](https://github.com/Pyomo/pyomo/workflows/GitHub%20CI/badge.svg?event=push)](https://github.com/Pyomo/pyomo/actions?query=event%3Apush+workflow%3A%22GitHub+CI%22) [![Travis Status](https://img.shields.io/travis/com/Pyomo/pyomo/master?logo=travis)](https://travis-ci.com/Pyomo/pyomo) -[![Appveyor Status](https://ci.appveyor.com/api/projects/status/km08tbkv05ik14n9/branch/master?svg=true)](https://ci.appveyor.com/project/WilliamHart/pyomo/branch/master) [![Jenkins Status](https://img.shields.io/jenkins/s/https/software.sandia.gov/downloads/pub/pyomo/jenkins/Pyomo_trunk.svg?logo=jenkins&logoColor=white)](https://jenkins-srn.sandia.gov/job/Pyomo_trunk) [![codecov](https://codecov.io/gh/Pyomo/pyomo/branch/master/graph/badge.svg)](https://codecov.io/gh/Pyomo/pyomo) [![Documentation Status](https://readthedocs.org/projects/pyomo/badge/?version=latest)](http://pyomo.readthedocs.org/en/latest/) diff --git a/doc/OnlineDocs/README.txt b/doc/OnlineDocs/README.txt index 84ef11ea75c..237dc8d3fcf 100644 --- a/doc/OnlineDocs/README.txt +++ b/doc/OnlineDocs/README.txt @@ -3,7 +3,7 @@ GETTING STARTED 0. Install Sphinx - pip install sphinx + pip install sphinx sphinx_rtd_theme 1. Edit documentation diff --git a/doc/OnlineDocs/contributed_packages/mindtpy.rst b/doc/OnlineDocs/contributed_packages/mindtpy.rst index f6fc8ca665d..c7a2773fec1 100644 --- a/doc/OnlineDocs/contributed_packages/mindtpy.rst +++ b/doc/OnlineDocs/contributed_packages/mindtpy.rst @@ -7,12 +7,12 @@ These decomposition algorithms usually rely on the solution of Mixed-Intger Line (MILP) and Nonlinear Programs (NLP). MindtPy currently implements the Outer Approximation (OA) algorithm originally described in -`Duran & Grossmann`_. Usage and implementation +`Duran & Grossmann, 1986`_. Usage and implementation details for MindtPy can be found in the PSE 2018 paper Bernal et al., (`ref `_, `preprint `_). -.. _Duran & Grossmann: https://dx.doi.org/10.1007/BF02592064 +.. _Duran & Grossmann, 1986: https://dx.doi.org/10.1007/BF02592064 Usage of MindtPy to solve a Pyomo concrete model involves: @@ -33,7 +33,7 @@ An example which includes the modeling approach may be found below. >>> model.x = Var(bounds=(1.0,10.0),initialize=5.0) >>> model.y = Var(within=Binary) - >>> model.c1 = Constraint(expr=(model.x-3.0)**2 <= 50.0*(1-model.y)) + >>> model.c1 = Constraint(expr=(model.x-4.0)**2 - model.x <= 50.0*(1-model.y)) >>> model.c2 = Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y)) >>> model.objective = Objective(expr=model.x, sense=minimize) @@ -58,6 +58,49 @@ The solution may then be displayed by using the commands >>> SolverFactory('mindtpy').solve(model, mip_solver='glpk', nlp_solver='ipopt', tee=True) +Single tree implementation +--------------------------------------------- + +MindtPy also supports single tree implementation of Outer Approximation (OA) algorithm, which is known as LP/NLP algorithm originally described in `Quesada & Grossmann`_. +The LP/NLP algorithm in MindtPy is implemeted based on the LazyCallback function in commercial solvers. + +.. _Quesada & Grossmann: https://www.sciencedirect.com/science/article/abs/pii/0098135492800288 + + +.. Note:: + +The single tree implementation currently only works with CPLEX. +To use LazyCallback function of CPLEX from Pyomo, the `CPLEX Python API`_ is required. +This means both IBM ILOG CPLEX Optimization Studio and the CPLEX-Python modules should be installed on your computer. + + +.. _CPLEX Python API: https://www.ibm.com/support/knowledgecenter/SSSA5P_12.7.1/ilog.odms.cplex.help/CPLEX/GettingStarted/topics/set_up/Python_setup.html + + +A usage example for single tree is as follows: + +.. code:: + + >>> import pyomo.environ as pyo + >>> model = pyo.ConcreteModel() + + >>> model.x = pyo.Var(bounds=(1.0, 10.0), initialize=5.0) + >>> model.y = pyo.Var(within=Binary) + + >>> model.c1 = Constraint(expr=(model.x-4.0)**2 - model.x <= 50.0*(1-model.y)) + >>> model.c2 = pyo.Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y)) + + >>> model.objective = pyo.Objective(expr=model.x, sense=pyo.minimize) + + Solve the model using single tree implementation in MindtPy + >>> pyo.SolverFactory('mindtpy').solve( + ... model, strategy='OA', + ... mip_solver='cplex_persistent', nlp_solver='ipopt', single_tree=True) + >>> model.objective.display() + + + + MindtPy implementation and optional arguments --------------------------------------------- diff --git a/doc/OnlineDocs/contributed_packages/parmest/driver.rst b/doc/OnlineDocs/contributed_packages/parmest/driver.rst index 696079f46f4..840fa1b61bd 100644 --- a/doc/OnlineDocs/contributed_packages/parmest/driver.rst +++ b/doc/OnlineDocs/contributed_packages/parmest/driver.rst @@ -72,7 +72,16 @@ Section. >>> import pyomo.contrib.parmest.parmest as parmest >>> pest = parmest.Estimator(model_function, data, theta_names, objective_function) - + +Optionally, solver options can be supplied, e.g., + +.. doctest:: + :skipif: not __import__('pyomo.contrib.parmest.parmest').contrib.parmest.parmest.parmest_available + + >>> solver_options = {"max_iter": 6000} + >>> pest = parmest.Estimator(model_function, data, theta_names, objective_function, solver_options) + + Model function -------------- diff --git a/doc/OnlineDocs/contributed_packages/parmest/index.rst b/doc/OnlineDocs/contributed_packages/parmest/index.rst index 0d8a3eca5dc..3052b4e5dbf 100644 --- a/doc/OnlineDocs/contributed_packages/parmest/index.rst +++ b/doc/OnlineDocs/contributed_packages/parmest/index.rst @@ -16,6 +16,7 @@ confidence regions and subsequent creation of scenarios for PySP. examples.rst parallel.rst api.rst + scencreate.rst Indices and Tables ------------------ diff --git a/doc/OnlineDocs/contributed_packages/parmest/scencreate.rst b/doc/OnlineDocs/contributed_packages/parmest/scencreate.rst new file mode 100644 index 00000000000..e9ce28c89eb --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/parmest/scencreate.rst @@ -0,0 +1,34 @@ +Scenario Creation +================= + +In addition to model-based parameter estimation, parmest can create +scenarios for use in optimization under uncertainty. To do this, one +first creates an ``Estimator`` object, then a ``ScenarioCreator`` +object, which has methods to add ``ParmestScen`` scenario objects to a +``ScenarioSet`` object, which can write them to a csv file or output them +via an iterator method. + +Example +------- + +This example is in the semibatch subdirectory of the examples directory in +the file ``scencreate.py``. It creates a csv file with scenarios that +correspond one-to-one with the experiments used as input data. It also +creates a few scenarios using the bootstrap methods and outputs prints the +scenarios to the screen, accessing them via the ``ScensItator`` a ``print`` + +.. literalinclude:: ../../../../pyomo/contrib/parmest/examples/semibatch/scencreate.py + :language: python + +.. note:: + This example may produce an error message your version of Ipopt is not based + on a good linear solver. + + +API +--- + +.. automodule:: pyomo.contrib.parmest.scenariocreator + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/OnlineDocs/contribution_guide.rst b/doc/OnlineDocs/contribution_guide.rst index 7117dfb6d64..3439a96ff88 100644 --- a/doc/OnlineDocs/contribution_guide.rst +++ b/doc/OnlineDocs/contribution_guide.rst @@ -234,7 +234,48 @@ these changes to the master branch on your fork, :: git push my-fork master + + +Setting up your development environment ++++++++++++++++++++++++++++++++++++++++ + +After cloning your fork, you will want to install Pyomo from source. + +Step 1 (recommended): Create a new conda environment. + +:: + + conda create --name pyomodev + +You may change the environment name from ``pyomodev`` as you see fit. Then activate the environment: + +:: + conda activate pyomodev + +Step 2: Install PyUtilib + +You will likely need the master branch of PyUtilib to contribute to Pyomo. Clone a copy of the repository in a new directory: + +:: + + git clone https://github.com/PyUtilib/pyutilib + +Then in the directory containing the clone of PyUtilib run: + +:: + + python setup.py develop + +Step 3: Install Pyomo + +Finally, move to the directory containing the clone of your Pyomo fork and run: + +:: + + python setup.py develop + +These commands register the cloned code with the active python environment (``pyomodev``). This way, your changes to the source code for ``pyomo`` and ``pyutilib`` are automatically used by the active environment. You can create another conda environment to switch to alternate versions of pyomo (e.g., stable). Review Process -------------- diff --git a/doc/OnlineDocs/modeling_extensions/gdp.rst b/doc/OnlineDocs/modeling_extensions/gdp.rst index a3e066d2cb1..9fb6feeb03a 100644 --- a/doc/OnlineDocs/modeling_extensions/gdp.rst +++ b/doc/OnlineDocs/modeling_extensions/gdp.rst @@ -62,15 +62,15 @@ Transformation To use standard commercial solvers, you must convert the disjunctive model to a standard MIP/MINLP model. The two classical strategies for doing so are the (included) Big-M and Hull reformulations. -From the Pyomo command line, include the option ``--transform pyomo.gdp.bigm`` or ``--transform pyomo.gdp.chull``. +From the Pyomo command line, include the option ``--transform pyomo.gdp.bigm`` or ``--transform pyomo.gdp.hull``. If you are using a Python script, ``TransformationFactory`` accomplishes the same functionality: - ``TransformationFactory('gdp.bigm').apply_to(model)`` -- ``TransformationFactory('gdp.chull').apply_to(model)`` +- ``TransformationFactory('gdp.hull').apply_to(model)`` .. note:: - - all variables that appear in disjuncts need upper and lower bounds for chull + - all variables that appear in disjuncts need upper and lower bounds for hull - for linear models, the BigM transform can estimate reasonably tight M values for you if variables are bounded. diff --git a/doc/attic/GettingStarted/current/pyomo.txt b/doc/attic/GettingStarted/current/pyomo.txt index acd07b9bed8..027dcd58afc 100644 --- a/doc/attic/GettingStarted/current/pyomo.txt +++ b/doc/attic/GettingStarted/current/pyomo.txt @@ -1042,7 +1042,7 @@ In order to use the solvers currently avaialbe, one must convert the disjunctive model to a standard MIP/MINLP model. The easiest way to do that is using the (included) BigM or Convex Hull transformations. From the Pyomo command line, include the option +--transform pyomo.gdp.bigm+ -or +--transform pyomo.gdp.chull+ +or +--transform pyomo.gdp.hull+ === Notes === diff --git a/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst b/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst index d7992b93d5e..49649012825 100644 --- a/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst +++ b/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst @@ -47,7 +47,7 @@ In order to use the solvers currently available, one must convert the disjunctive model to a standard MIP/MINLP model. The easiest way to do that is using the (included) BigM or Convex Hull transformations. From the Pyomo command line, include the option ``--transform pyomo.gdp.bigm`` -or ``--transform pyomo.gdp.chull`` +or ``--transform pyomo.gdp.hull`` Notes ----- diff --git a/examples/gdp/medTermPurchasing_Literal.py b/examples/gdp/medTermPurchasing_Literal.py index e6d3d2a6b03..41058a0deb6 100755 --- a/examples/gdp/medTermPurchasing_Literal.py +++ b/examples/gdp/medTermPurchasing_Literal.py @@ -606,7 +606,7 @@ def FD_contract(model, j, t): if __name__ == "__main__": - m = build_model().create_instance('medTermPurchasing_Literal_Chull.dat') + m = build_model().create_instance('medTermPurchasing_Literal_Hull.dat') TransformationFactory('gdp.bigm').apply_to(m) SolverFactory('gams').solve(m, solver='baron', tee=True, add_options=['option optcr=1e-6;']) m.profit.display() diff --git a/examples/gdp/small_lit/basic_step.py b/examples/gdp/small_lit/basic_step.py index fd62921e06b..89cf0ffc0b0 100644 --- a/examples/gdp/small_lit/basic_step.py +++ b/examples/gdp/small_lit/basic_step.py @@ -39,14 +39,14 @@ def disjunctions(model,i): def solve_base_model(): m_base = build_gdp_model() - m_chull = TransformationFactory('gdp.chull').create_using(m_base) + m_hull = TransformationFactory('gdp.hull').create_using(m_base) #m_bigm = TransformationFactory('gdp.bigm').create_using(m_base, bigM=100) solver = SolverFactory('gams') - solver.solve(m_chull, solver='baron') - #m_chull.pprint() - m_chull.objective.display() - m_chull.x1.display() - m_chull.x2.display() + solver.solve(m_hull, solver='baron') + #m_hull.pprint() + m_hull.objective.display() + m_hull.x1.display() + m_hull.x2.display() def solve_basic_step_model(): @@ -57,7 +57,7 @@ def solve_basic_step_model(): #with open('pprint.log','w') as outputfile: # m_base.disjunctions.pprint(outputfile) - #m_bs_chull = TransformationFactory('gdp.chull').create_using(m_base) + #m_bs_hull = TransformationFactory('gdp.hull').create_using(m_base) m_bigm = TransformationFactory('gdp.bigm').create_using(m_base, bigM=100) m_bigm.pprint() diff --git a/examples/gdp/small_lit/nonconvex_HEN.py b/examples/gdp/small_lit/nonconvex_HEN.py index 1dd276d4dc7..1c3cb9f4e84 100644 --- a/examples/gdp/small_lit/nonconvex_HEN.py +++ b/examples/gdp/small_lit/nonconvex_HEN.py @@ -76,7 +76,7 @@ def exchanger_disjunction(m, disjctn): # Decide whether to reformulate as MINLP and what method to use reformulation = True - reformulation_method = 'chull' + reformulation_method = 'hull' model = build_gdp_model() model.pprint() @@ -84,8 +84,8 @@ def exchanger_disjunction(m, disjctn): if reformulation: if reformulation_method == 'bigm': TransformationFactory('gdp.bigm').apply_to(model,bigM=600*(50**0.6)+2*46500) - elif reformulation_method == 'chull': - TransformationFactory('gdp.chull').apply_to(model) + elif reformulation_method == 'hull': + TransformationFactory('gdp.hull').apply_to(model) res = SolverFactory('gams').solve(model, tee=True, solver='baron', add_options=['option optcr = 0;'], keepfiles=True) else: # Note: MC++ needs to be properly installed to use strategy GLOA diff --git a/examples/gdp/strip_packing/strip_packing_8rect.py b/examples/gdp/strip_packing/strip_packing_8rect.py index 7b7c0344459..9fb96500f03 100644 --- a/examples/gdp/strip_packing/strip_packing_8rect.py +++ b/examples/gdp/strip_packing/strip_packing_8rect.py @@ -88,6 +88,6 @@ def no_overlap(m, i, j): if __name__ == "__main__": model = build_rect_strip_packing_model() - TransformationFactory('gdp.chull').apply_to(model) + TransformationFactory('gdp.hull').apply_to(model) opt = SolverFactory('gurobi') results = opt.solve(model, tee=True) diff --git a/pyomo/contrib/pynumero/extensions/__init__.py b/pyomo/common/collections/__init__.py similarity index 91% rename from pyomo/contrib/pynumero/extensions/__init__.py rename to pyomo/common/collections/__init__.py index cd6b0b75748..2ba62ce0e56 100644 --- a/pyomo/contrib/pynumero/extensions/__init__.py +++ b/pyomo/common/collections/__init__.py @@ -7,3 +7,5 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + +from .orderedset import OrderedDict, OrderedSet diff --git a/pyomo/common/collections/orderedset.py b/pyomo/common/collections/orderedset.py new file mode 100644 index 00000000000..6740069deb5 --- /dev/null +++ b/pyomo/common/collections/orderedset.py @@ -0,0 +1,84 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import six +from six import itervalues, iteritems + +if six.PY3: + from collections.abc import MutableSet as collections_MutableSet +else: + from collections import MutableSet as collections_MutableSet +try: + from collections import OrderedDict +except: + from ordereddict import OrderedDict + +class OrderedSet(collections_MutableSet): + __slots__ = ('_dict') + + def __init__(self, iterable=None): + self._dict = OrderedDict() + if iterable is not None: + self.update(iterable) + + def __str__(self): + """String representation of the mapping.""" + return "OrderedSet(%s)" % (', '.join(repr(x) for x in self)) + + + def update(self, iterable): + for val in iterable: + self.add(val) + + # + # This method must be defined for deepcopy/pickling + # because this class relies on Python ids. + # + def __setstate__(self, state): + self._dict = state + + def __getstate__(self): + return self._dict + + # + # Implement MutableSet abstract methods + # + + def __contains__(self, val): + return val in self._dict + + def __iter__(self): + return iter(self._dict) + + def __len__(self): + return len(self._dict) + + def add(self, val): + """Add an element.""" + if val not in self._dict: + self._dict[val] = None + + def discard(self, val): + """Remove an element. Do not raise an exception if absent.""" + if val in self._dict: + del self._dict[val] + + # + # The remaining MutableSet methods have slow default + # implementations. + # + + def clear(self): + """Remove all elements from this set.""" + self._dict.clear() + + def remove(self, val): + """Remove an element. If not a member, raise a KeyError.""" + del self._dict[val] diff --git a/pyomo/common/config.py b/pyomo/common/config.py index 64912988c4d..3e57b28c7ff 100644 --- a/pyomo/common/config.py +++ b/pyomo/common/config.py @@ -11,6 +11,7 @@ import os import platform +import enum import six from pyutilib.misc.config import ConfigBlock, ConfigList, ConfigValue @@ -157,3 +158,13 @@ def add_docstring_list(docstring, configblock, indent_by=4): indent_spacing=0, width=256 ).splitlines(True)) + + +class ConfigEnum(enum.Enum): + @classmethod + def from_enum_or_string(cls, arg): + if type(arg) is str: + return cls[arg] + else: + # Handles enum or integer inputs + return cls(arg) diff --git a/pyomo/common/tests/test_config.py b/pyomo/common/tests/test_config.py index 530a5afbf05..e21b6856d29 100644 --- a/pyomo/common/tests/test_config.py +++ b/pyomo/common/tests/test_config.py @@ -15,7 +15,7 @@ ConfigBlock, ConfigList, ConfigValue, PositiveInt, NegativeInt, NonPositiveInt, NonNegativeInt, PositiveFloat, NegativeFloat, NonPositiveFloat, NonNegativeFloat, - In, Path, PathList + In, Path, PathList, ConfigEnum ) class TestConfig(unittest.TestCase): @@ -338,3 +338,15 @@ def norm(x): c.a = () self.assertEqual(len(c.a), 0) self.assertIs(type(c.a), list) + + def test_ConfigEnum(self): + class TestEnum(ConfigEnum): + ITEM_ONE = 1 + ITEM_TWO = 2 + + self.assertEqual(TestEnum.from_enum_or_string(1), + TestEnum.ITEM_ONE) + self.assertEqual(TestEnum.from_enum_or_string( + TestEnum.ITEM_TWO), TestEnum.ITEM_TWO) + self.assertEqual(TestEnum.from_enum_or_string('ITEM_ONE'), + TestEnum.ITEM_ONE) diff --git a/pyomo/common/tests/test_orderedset.py b/pyomo/common/tests/test_orderedset.py new file mode 100644 index 00000000000..d43460c6c9c --- /dev/null +++ b/pyomo/common/tests/test_orderedset.py @@ -0,0 +1,70 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pickle +import pyutilib.th as unittest + +from pyomo.common.collections import OrderedSet + +class testOrderedSet(unittest.TestCase): + def test_constructor(self): + a = OrderedSet() + self.assertEqual(len(a), 0) + self.assertEqual(list(a), []) + self.assertEqual(str(a), 'OrderedSet()') + + ref = [1,9,'a',4,2,None] + a = OrderedSet(ref) + self.assertEqual(len(a), 6) + self.assertEqual(list(a), ref) + self.assertEqual(str(a), "OrderedSet(1, 9, 'a', 4, 2, None)") + + def test_in_add(self): + a = OrderedSet() + self.assertNotIn(1, a) + self.assertNotIn(None, a) + + a.add(None) + self.assertNotIn(1, a) + self.assertIn(None, a) + + a.add(1) + self.assertIn(1, a) + self.assertIn(None, a) + + a.add(0) + self.assertEqual(list(a), [None,1,0]) + + # Adding a member alrady in the set does not change the ordering + a.add(1) + self.assertEqual(list(a), [None,1,0]) + + def test_discard_remove_clear(self): + a = OrderedSet([1,3,2,4]) + a.discard(3) + self.assertEqual(list(a), [1,2,4]) + a.discard(3) + self.assertEqual(list(a), [1,2,4]) + + a.remove(2) + self.assertEqual(list(a), [1,4]) + with self.assertRaisesRegex(KeyError,'2'): + a.remove(2) + + a.clear() + self.assertEqual(list(a), []) + + def test_pickle(self): + ref = [1,9,'a',4,2,None] + a = OrderedSet(ref) + b = pickle.loads(pickle.dumps(a)) + self.assertEqual(a, b) + self.assertIsNot(a, b) + self.assertIsNot(a._dict, b._dict) diff --git a/pyomo/contrib/fbbt/fbbt.py b/pyomo/contrib/fbbt/fbbt.py index 79a3026fbac..52e5b251ae3 100644 --- a/pyomo/contrib/fbbt/fbbt.py +++ b/pyomo/contrib/fbbt/fbbt.py @@ -319,7 +319,7 @@ def _prop_bnds_leaf_to_root_asin(node, bnds_dict, feasibility_tol): assert len(node.args) == 1 arg = node.args[0] lb1, ub1 = bnds_dict[arg] - bnds_dict[node] = interval.asin(lb1, ub1, -interval.inf, interval.inf) + bnds_dict[node] = interval.asin(lb1, ub1, -interval.inf, interval.inf, feasibility_tol) def _prop_bnds_leaf_to_root_acos(node, bnds_dict, feasibility_tol): @@ -339,7 +339,7 @@ def _prop_bnds_leaf_to_root_acos(node, bnds_dict, feasibility_tol): assert len(node.args) == 1 arg = node.args[0] lb1, ub1 = bnds_dict[arg] - bnds_dict[node] = interval.acos(lb1, ub1, -interval.inf, interval.inf) + bnds_dict[node] = interval.acos(lb1, ub1, -interval.inf, interval.inf, feasibility_tol) def _prop_bnds_leaf_to_root_atan(node, bnds_dict, feasibility_tol): @@ -809,7 +809,7 @@ def _prop_bnds_root_to_leaf_sin(node, bnds_dict, feasibility_tol): arg = node.args[0] lb0, ub0 = bnds_dict[node] lb1, ub1 = bnds_dict[arg] - _lb1, _ub1 = interval.asin(lb0, ub0, lb1, ub1) + _lb1, _ub1 = interval.asin(lb0, ub0, lb1, ub1, feasibility_tol) if _lb1 > lb1: lb1 = _lb1 if _ub1 < ub1: @@ -835,7 +835,7 @@ def _prop_bnds_root_to_leaf_cos(node, bnds_dict, feasibility_tol): arg = node.args[0] lb0, ub0 = bnds_dict[node] lb1, ub1 = bnds_dict[arg] - _lb1, _ub1 = interval.acos(lb0, ub0, lb1, ub1) + _lb1, _ub1 = interval.acos(lb0, ub0, lb1, ub1, feasibility_tol) if _lb1 > lb1: lb1 = _lb1 if _ub1 < ub1: diff --git a/pyomo/contrib/fbbt/interval.py b/pyomo/contrib/fbbt/interval.py index ee4b59f29b9..df305cfde93 100644 --- a/pyomo/contrib/fbbt/interval.py +++ b/pyomo/contrib/fbbt/interval.py @@ -418,7 +418,7 @@ def tan(xl, xu): return lb, ub -def asin(xl, xu, yl, yu): +def asin(xl, xu, yl, yu, feasibility_tol): """ y = asin(x); propagate bounds from x to y x = sin(y) @@ -471,7 +471,7 @@ def asin(xl, xu, yl, yu): # satisfies xl = sin(y) lb1 = i1 + dist lb2 = i2 + dist - if lb1 >= yl: + if lb1 >= yl - feasibility_tol: lb = lb1 else: lb = lb2 @@ -486,7 +486,7 @@ def asin(xl, xu, yl, yu): dist = pi / 2 - y_tmp lb1 = i1 + dist lb2 = i2 + dist - if lb1 >= yl: + if lb1 >= yl - feasibility_tol: lb = lb1 else: lb = lb2 @@ -506,7 +506,7 @@ def asin(xl, xu, yl, yu): dist = pi / 2 - y_tmp ub1 = i1 - dist ub2 = i2 - dist - if ub1 <= yu: + if ub1 <= yu + feasibility_tol: ub = ub1 else: ub = ub2 @@ -521,7 +521,7 @@ def asin(xl, xu, yl, yu): dist = y_tmp - (-pi / 2) ub1 = i1 - dist ub2 = i2 - dist - if ub1 <= yu: + if ub1 <= yu + feasibility_tol: ub = ub1 else: ub = ub2 @@ -529,7 +529,7 @@ def asin(xl, xu, yl, yu): return lb, ub -def acos(xl, xu, yl, yu): +def acos(xl, xu, yl, yu, feasibility_tol): """ y = acos(x); propagate bounds from x to y x = cos(y) @@ -582,7 +582,7 @@ def acos(xl, xu, yl, yu): # satisfies xl = sin(y) lb1 = i1 + dist lb2 = i2 + dist - if lb1 >= yl: + if lb1 >= yl - feasibility_tol: lb = lb1 else: lb = lb2 @@ -598,7 +598,7 @@ def acos(xl, xu, yl, yu): dist = y_tmp lb1 = i1 + dist lb2 = i2 + dist - if lb1 >= yl: + if lb1 >= yl - feasibility_tol: lb = lb1 else: lb = lb2 @@ -618,7 +618,7 @@ def acos(xl, xu, yl, yu): dist = y_tmp ub1 = i1 - dist ub2 = i2 - dist - if ub1 <= yu: + if ub1 <= yu + feasibility_tol: ub = ub1 else: ub = ub2 @@ -633,7 +633,7 @@ def acos(xl, xu, yl, yu): dist = pi - y_tmp ub1 = i1 - dist ub2 = i2 - dist - if ub1 <= yu: + if ub1 <= yu + feasibility_tol: ub = ub1 else: ub = ub2 diff --git a/pyomo/contrib/fbbt/tests/test_fbbt.py b/pyomo/contrib/fbbt/tests/test_fbbt.py index 9ac426f0706..8c96d26f10b 100644 --- a/pyomo/contrib/fbbt/tests/test_fbbt.py +++ b/pyomo/contrib/fbbt/tests/test_fbbt.py @@ -797,3 +797,22 @@ def test_encountered_bugs2(self): self.assertEqual(m.x.ub, None) self.assertEqual(m.y.lb, None) self.assertEqual(m.y.ub, None) + + def test_encountered_bugs3(self): + xl = 0.033689710575092756 + xu = 0.04008169994804723 + yl = 0.03369608678342047 + yu = 0.04009243987444148 + + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(xl, xu)) + m.y = pe.Var(bounds=(yl, yu)) + + m.c = pe.Constraint(expr=m.x == pe.sin(m.y)) + + fbbt(m.c) + + self.assertAlmostEqual(m.x.lb, xl) + self.assertAlmostEqual(m.x.ub, xu) + self.assertAlmostEqual(m.y.lb, yl) + self.assertAlmostEqual(m.y.ub, yu) diff --git a/pyomo/contrib/fbbt/tests/test_interval.py b/pyomo/contrib/fbbt/tests/test_interval.py index b2f0bfebd6d..0160c7163e7 100644 --- a/pyomo/contrib/fbbt/tests/test_interval.py +++ b/pyomo/contrib/fbbt/tests/test_interval.py @@ -252,55 +252,55 @@ def test_tan(self): @unittest.skipIf(not numpy_available, 'Numpy is not available.') def test_asin(self): - yl, yu = interval.asin(-0.5, 0.5, -interval.inf, interval.inf) + yl, yu = interval.asin(-0.5, 0.5, -interval.inf, interval.inf, feasibility_tol=1e-8) self.assertEqual(yl, -interval.inf) self.assertEqual(yu, interval.inf) - yl, yu = interval.asin(-0.5, 0.5, -math.pi, math.pi) + yl, yu = interval.asin(-0.5, 0.5, -math.pi, math.pi, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -math.pi, 12) self.assertAlmostEqual(yu, math.pi, 12) - yl, yu = interval.asin(-0.5, 0.5, -math.pi/2, math.pi/2) + yl, yu = interval.asin(-0.5, 0.5, -math.pi/2, math.pi/2, feasibility_tol=1e-8) self.assertAlmostEqual(yl, math.asin(-0.5)) self.assertAlmostEqual(yu, math.asin(0.5)) - yl, yu = interval.asin(-0.5, 0.5, -math.pi/2-0.1, math.pi/2+0.1) + yl, yu = interval.asin(-0.5, 0.5, -math.pi/2-0.1, math.pi/2+0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, math.asin(-0.5)) self.assertAlmostEqual(yu, math.asin(0.5)) - yl, yu = interval.asin(-0.5, 0.5, -math.pi/2+0.1, math.pi/2-0.1) + yl, yu = interval.asin(-0.5, 0.5, -math.pi/2+0.1, math.pi/2-0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, math.asin(-0.5)) self.assertAlmostEqual(yu, math.asin(0.5)) - yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi, 1.5*math.pi) + yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi, 1.5*math.pi, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -3.6651914291880920, 12) self.assertAlmostEqual(yu, 3.6651914291880920, 12) - yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi-0.1, 1.5*math.pi+0.1) + yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi-0.1, 1.5*math.pi+0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -3.6651914291880920, 12) self.assertAlmostEqual(yu, 3.6651914291880920, 12) - yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi+0.1, 1.5*math.pi-0.1) + yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi+0.1, 1.5*math.pi-0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -3.6651914291880920, 12) self.assertAlmostEqual(yu, 3.6651914291880920, 12) @unittest.skipIf(not numpy_available, 'Numpy is not available.') def test_acos(self): - yl, yu = interval.acos(-0.5, 0.5, -interval.inf, interval.inf) + yl, yu = interval.acos(-0.5, 0.5, -interval.inf, interval.inf, feasibility_tol=1e-8) self.assertEqual(yl, -interval.inf) self.assertEqual(yu, interval.inf) - yl, yu = interval.acos(-0.5, 0.5, -0.5*math.pi, 0.5*math.pi) + yl, yu = interval.acos(-0.5, 0.5, -0.5*math.pi, 0.5*math.pi, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -0.5*math.pi, 12) self.assertAlmostEqual(yu, 0.5*math.pi, 12) - yl, yu = interval.acos(-0.5, 0.5, 0, math.pi) + yl, yu = interval.acos(-0.5, 0.5, 0, math.pi, feasibility_tol=1e-8) self.assertAlmostEqual(yl, math.acos(0.5)) self.assertAlmostEqual(yu, math.acos(-0.5)) - yl, yu = interval.acos(-0.5, 0.5, 0-0.1, math.pi+0.1) + yl, yu = interval.acos(-0.5, 0.5, 0-0.1, math.pi+0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, math.acos(0.5)) self.assertAlmostEqual(yu, math.acos(-0.5)) - yl, yu = interval.acos(-0.5, 0.5, 0+0.1, math.pi-0.1) + yl, yu = interval.acos(-0.5, 0.5, 0+0.1, math.pi-0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, math.acos(0.5)) self.assertAlmostEqual(yu, math.acos(-0.5)) - yl, yu = interval.acos(-0.5, 0.5, -math.pi, 0) + yl, yu = interval.acos(-0.5, 0.5, -math.pi, 0, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -math.acos(-0.5), 12) self.assertAlmostEqual(yu, -math.acos(0.5), 12) - yl, yu = interval.acos(-0.5, 0.5, -math.pi-0.1, 0+0.1) + yl, yu = interval.acos(-0.5, 0.5, -math.pi-0.1, 0+0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -math.acos(-0.5), 12) self.assertAlmostEqual(yu, -math.acos(0.5), 12) - yl, yu = interval.acos(-0.5, 0.5, -math.pi+0.1, 0-0.1) + yl, yu = interval.acos(-0.5, 0.5, -math.pi+0.1, 0-0.1, feasibility_tol=1e-8) self.assertAlmostEqual(yl, -math.acos(-0.5), 12) self.assertAlmostEqual(yu, -math.acos(0.5), 12) diff --git a/pyomo/contrib/fme/__init__.py b/pyomo/contrib/fme/__init__.py index 7f6aba5f78c..e69de29bb2d 100644 --- a/pyomo/contrib/fme/__init__.py +++ b/pyomo/contrib/fme/__init__.py @@ -1 +0,0 @@ -import pyomo.contrib.fme.fourier_motzkin_elimination diff --git a/pyomo/contrib/fme/fourier_motzkin_elimination.py b/pyomo/contrib/fme/fourier_motzkin_elimination.py index c7d38606686..ac45e34a7d8 100644 --- a/pyomo/contrib/fme/fourier_motzkin_elimination.py +++ b/pyomo/contrib/fme/fourier_motzkin_elimination.py @@ -10,7 +10,7 @@ from pyomo.core import (Var, Block, Constraint, Param, Set, Suffix, Expression, Objective, SortComponents, value, ConstraintList) -from pyomo.core.base import (TransformationFactory, _VarData) +from pyomo.core.base import TransformationFactory, _VarData from pyomo.core.base.block import _BlockData from pyomo.core.base.param import _ParamData from pyomo.core.base.constraint import _ConstraintData @@ -20,6 +20,34 @@ from pyomo.repn.standard_repn import generate_standard_repn from pyomo.core.kernel.component_map import ComponentMap from pyomo.core.kernel.component_set import ComponentSet +from pyomo.opt import TerminationCondition + +import logging + +from six import iteritems +import inspect + +logger = logging.getLogger('pyomo.contrib.fourier_motzkin_elimination') + +def _check_var_bounds_filter(constraint): + """Check if the constraint is already implied by the variable bounds""" + # this is one of our constraints, so we know that it is >=. + min_lhs = 0 + for v, coef in iteritems(constraint['map']): + if coef > 0: + if v.lb is None: + return True # we don't have var bounds with which to imply the + # constraint... + min_lhs += coef*v.lb + elif coef < 0: + if v.ub is None: + return True # we don't have var bounds with which to imply the + # constraint... + min_lhs += coef*v.ub + # we do need value here since we didn't control v.lb and v.ub above. + if value(min_lhs) >= constraint['lower']: + return False # constraint implied by var bounds + return True def vars_to_eliminate_list(x): if isinstance(x, (Var, _VarData)): @@ -68,6 +96,20 @@ class Fourier_Motzkin_Elimination_Transformation(Transformation): Note that these variables must all be continuous and the model must be linear.""" )) + CONFIG.declare('constraint_filtering_callback', ConfigValue( + default=_check_var_bounds_filter, + description="A callback that determines whether or not new " + "constraints generated by Fourier-Motzkin elimination are added " + "to the model", + doc=""" + Specify None in order for no constraint filtering to occur during the + transformation. + + Specify a function that accepts a constraint (represented in the >= + dictionary form used in this transformation) and returns a Boolean + indicating whether or not to add it to the model. + """ + )) def __init__(self): """Initialize transformation object""" @@ -77,6 +119,7 @@ def _apply_to(self, instance, **kwds): config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) vars_to_eliminate = config.vars_to_eliminate + self.constraint_filter = config.constraint_filtering_callback if vars_to_eliminate is None: raise RuntimeError("The Fourier-Motzkin Elimination transformation " "requires the argument vars_to_eliminate, a " @@ -100,13 +143,13 @@ def _apply_to(self, instance, **kwds): descend_into=Block, sort=SortComponents.deterministic, active=True): - if obj.type() in ctypes_not_to_transform: + if obj.ctype in ctypes_not_to_transform: continue - elif obj.type() is Constraint: + elif obj.ctype is Constraint: cons_list = self._process_constraint(obj) constraints.extend(cons_list) obj.deactivate() # the truth will be on our transformation block - elif obj.type() is Var: + elif obj.ctype is Var: # variable bounds are constraints, but we only need them if this # is a variable we are projecting out if obj not in vars_to_eliminate: @@ -126,13 +169,23 @@ def _apply_to(self, instance, **kwds): "handle purely algebraic models. That is, only " "Sets, Params, Vars, Constraints, Expressions, Blocks, " "and Objectives may be active on the model." % (obj.name, - obj.type())) + obj.ctype)) new_constraints = self._fourier_motzkin_elimination(constraints, vars_to_eliminate) # put the new constraints on the transformation block for cons in new_constraints: + if self.constraint_filter is not None: + try: + keep = self.constraint_filter(cons) + except: + logger.error("Problem calling constraint filter callback " + "on constraint with right-hand side %s and " + "body:\n%s" % (cons['lower'], cons['body'])) + raise + if not keep: + continue body = cons['body'] lhs = sum(coef*var for (coef, var) in zip(body.linear_coefs, body.linear_vars)) + \ @@ -153,7 +206,7 @@ def _apply_to(self, instance, **kwds): projected_constraints.add(lhs >= lower) def _process_constraint(self, constraint): - """Transforms a pyomo Constraint objective into a list of dictionaries + """Transforms a pyomo Constraint object into a list of dictionaries representing only >= constraints. That is, if the constraint has both an ub and a lb, it is transformed into two constraints. Otherwise it is flipped if it is <=. Each dictionary contains the keys 'lower', @@ -163,10 +216,12 @@ def _process_constraint(self, constraint): """ body = constraint.body std_repn = generate_standard_repn(body) - cons_dict = {'lower': constraint.lower, + # make sure that we store the lower bound's value so that we need not + # worry again during the transformation + cons_dict = {'lower': value(constraint.lower), 'body': std_repn } - upper = constraint.upper + upper = value(constraint.upper) constraints_to_add = [cons_dict] if upper is not None: # if it has both bounds @@ -191,14 +246,18 @@ def _move_constant_and_add_map(self, cons_dict): and moves the constant to the RHS """ body = cons_dict['body'] - constant = body.constant + constant = value(body.constant) cons_dict['lower'] -= constant body.constant = 0 # store a map of vars to coefficients. We can't use this in place of # standard repn because determinism, but this will save a lot of linear - # time searches later. - cons_dict['map'] = ComponentMap(zip(body.linear_vars, body.linear_coefs)) + # time searches later. Note also that we will take the value of the + # coeficient here so that we never have to worry about it again during + # the transformation. + cons_dict['map'] = ComponentMap(zip(body.linear_vars, + [value(coef) for coef in + body.linear_coefs])) def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate): """Performs FME on the constraint list in the argument @@ -322,3 +381,89 @@ def _add_linear_constraints(self, cons1, cons2): ans['lower'] = cons1['lower'] + cons2['lower'] return ans + + def post_process_fme_constraints(self, m, solver_factory, tolerance=0): + """Function that solves a sequence of LPs problems to check if + constraints are implied by each other. Deletes any that are. + + Parameters + ---------------- + m: A model, already transformed with FME. Note that if constraints + have been added, activated, or deactivated, we will check for + redundancy against the whole active part of the model. If you call + this straight after FME, you are only checking within the projected + constraints, but otherwise it is up to the user. + solver_factory: A SolverFactory object (constructed with a solver + which can solve the continuous relaxation of the + active constraints on the model. That is, if you + had nonlinear constraints unrelated to the variables + being projected, you need to either deactivate them or + provide a solver which will do the right thing.) + tolerance: Tolerance at which we decide a constraint is implied by the + others. Default is 0, meaning we remove the constraint if + the LP solve finds the constraint can be tight but not + violated. Setting this to a small positive value would + remove constraints more conservatively. Setting it to a + negative value would result in a relaxed problem. + """ + # make sure m looks like what we expect + if not hasattr(m, "_pyomo_contrib_fme_transformation"): + raise RuntimeError("It looks like model %s has not been " + "transformed with the " + "fourier_motzkin_elimination transformation!" + % m.name) + transBlock = m._pyomo_contrib_fme_transformation + constraints = transBlock.projected_constraints + + # relax integrality so that we can do this with LP solves. + TransformationFactory('core.relax_integer_vars').apply_to( + m, transform_deactivated_blocks=True) + # deactivate any active objectives on the model, and save what we did so + # we can undo it after. + active_objs = [] + for obj in m.component_data_objects(Objective, descend_into=True): + if obj.active: + active_objs.append(obj) + obj.deactivate() + # add placeholder for our own objective + obj_name = unique_component_name(m, '_fme_post_process_obj') + obj = Objective(expr=0) + m.add_component(obj_name, obj) + for i in constraints: + # If someone wants us to ignore it and leave it in the model, we + # can. + if not constraints[i].active: + continue + # deactivate the constraint + constraints[i].deactivate() + m.del_component(obj) + # make objective to maximize its infeasibility + obj = Objective(expr=constraints[i].body - constraints[i].lower) + m.add_component(obj_name, obj) + results = solver_factory.solve(m) + print(results.solver.termination_condition) + if results.solver.termination_condition == \ + TerminationCondition.unbounded: + obj_val = -float('inf') + elif results.solver.termination_condition != \ + TerminationCondition.optimal: + raise RuntimeError("Unsuccessful subproblem solve when checking" + "constraint %s.\n\t" + "Termination Condition: %s" % + (constraints[i].name, + results.solver.termination_condition)) + else: + obj_val = value(obj) + # if we couldn't make it infeasible, it's useless + if obj_val >= tolerance: + m.del_component(constraints[i]) + del constraints[i] + else: + constraints[i].activate() + + # clean up + m.del_component(obj) + for obj in active_objs: + obj.activate() + # undo relax integrality + TransformationFactory('core.relax_integer_vars').apply_to(m, undo=True) diff --git a/pyomo/contrib/fme/plugins.py b/pyomo/contrib/fme/plugins.py index ef739700808..73e6acc24ce 100644 --- a/pyomo/contrib/fme/plugins.py +++ b/pyomo/contrib/fme/plugins.py @@ -1,7 +1,2 @@ -from pyomo.core.base import TransformationFactory -from .fourier_motzkin_elimination import \ - Fourier_Motzkin_Elimination_Transformation - def load(): - TransformationFactory.register('contrib.fourier_motzkin_elimination')( - Fourier_Motzkin_Elimination_Transformation) + import pyomo.contrib.fme.fourier_motzkin_elimination diff --git a/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py b/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py index 21e23278fc4..731ff374cf9 100644 --- a/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py +++ b/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py @@ -13,15 +13,29 @@ currdir = dirname(abspath(__file__))+os.sep import pyutilib.th as unittest +from pyomo.common.log import LoggingIntercept from pyomo.core import (Var, Constraint, Param, ConcreteModel, NonNegativeReals, - Binary, value, Block) + Binary, value, Block, Objective) from pyomo.core.base import TransformationFactory from pyomo.core.expr.current import log from pyomo.gdp import Disjunction, Disjunct from pyomo.repn.standard_repn import generate_standard_repn from pyomo.core.kernel.component_set import ComponentSet +from pyomo.opt import SolverFactory, check_available_solvers +import pyomo.contrib.fme.fourier_motzkin_elimination + +from six import StringIO +import logging +import random + +solvers = check_available_solvers('glpk') class TestFourierMotzkinElimination(unittest.TestCase): + def setUp(self): + # will need this so we know transformation block names in the test that + # includes hull transformation + random.seed(666) + @staticmethod def makeModel(): """ @@ -59,10 +73,13 @@ def test_no_vars_specified(self): apply_to, m) - def check_projected_constraints(self, m): + unfiltered_indices = [1, 2, 3, 6] + filtered_indices = [1, 2, 3, 4] + + def check_projected_constraints(self, m, indices): constraints = m._pyomo_contrib_fme_transformation.projected_constraints # x - 0.01y <= 1 - cons = constraints[1] + cons = constraints[indices[0]] self.assertEqual(value(cons.lower), -1) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -76,7 +93,7 @@ def check_projected_constraints(self, m): self.assertEqual(coefs[1], 0.01) # y <= 1000*(1 - u_1) - cons = constraints[2] + cons = constraints[indices[1]] self.assertEqual(value(cons.lower), -1000) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -89,7 +106,7 @@ def check_projected_constraints(self, m): self.assertEqual(coefs[1], -1000) # -x + 0.01y + 1 <= 1000*(1 - u_2) - cons = constraints[3] + cons = constraints[indices[2]] self.assertEqual(value(cons.lower), -999) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -104,7 +121,7 @@ def check_projected_constraints(self, m): self.assertEqual(coefs[2], -1000) # u_2 + 100u_1 >= 1 - cons = constraints[6] + cons = constraints[indices[3]] self.assertEqual(value(cons.lower), 1) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -120,27 +137,44 @@ def test_transformed_constraints_indexed_var_arg(self): m = self.makeModel() TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( m, - vars_to_eliminate = m.lamb) - + vars_to_eliminate = m.lamb, + constraint_filtering_callback=None) # we get some trivial constraints too, but let's check that the ones # that should be there really are - self.check_projected_constraints(m) + self.check_projected_constraints(m, self.unfiltered_indices) def test_transformed_constraints_varData_list_arg(self): m = self.makeModel() TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( m, - vars_to_eliminate = [m.lamb[1], m.lamb[2]]) + vars_to_eliminate = [m.lamb[1], m.lamb[2]], + constraint_filtering_callback=None) - self.check_projected_constraints(m) + self.check_projected_constraints(m, self.unfiltered_indices) def test_transformed_constraints_indexedVar_list(self): m = self.makeModel() TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( m, - vars_to_eliminate = [m.lamb]) + vars_to_eliminate = [m.lamb], + constraint_filtering_callback=None) - self.check_projected_constraints(m) + self.check_projected_constraints(m, self.unfiltered_indices) + + def test_default_constraint_filtering(self): + # We will filter constraints which are trivial based on variable bounds + # during the transformation. This checks that we removed the constraints + # we expect. + m = self.makeModel() + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + m, + vars_to_eliminate = m.lamb) + + # we still have all the right constraints + self.check_projected_constraints(m, self.filtered_indices) + # but now we *only* have the right constraints + constraints = m._pyomo_contrib_fme_transformation.projected_constraints + self.assertEqual(len(constraints), 4) def test_original_constraints_deactivated(self): m = self.makeModel() @@ -218,6 +252,44 @@ def test_components_we_do_not_understand_error(self): m, vars_to_eliminate=m.x) + def test_bad_constraint_filtering_callback_error(self): + m = self.makeModel() + def not_a_callback(cons): + raise RuntimeError("I don't know how to do my job.") + fme = TransformationFactory('contrib.fourier_motzkin_elimination') + log = StringIO() + with LoggingIntercept(log, 'pyomo.contrib.fourier_motzkin_elimination', + logging.ERROR): + self.assertRaisesRegexp( + RuntimeError, + "I don't know how to do my job.", + fme.apply_to, + m, + vars_to_eliminate=m.x, + constraint_filtering_callback=not_a_callback) + self.assertRegexpMatches( + log.getvalue(), + "Problem calling constraint filter callback " + "on constraint with right-hand side -1.0 and body:*") + + def test_constraint_filtering_callback_not_callable_error(self): + m = self.makeModel() + fme = TransformationFactory('contrib.fourier_motzkin_elimination') + log = StringIO() + with LoggingIntercept(log, 'pyomo.contrib.fourier_motzkin_elimination', + logging.ERROR): + self.assertRaisesRegexp( + TypeError, + "'int' object is not callable", + fme.apply_to, + m, + vars_to_eliminate=m.x, + constraint_filtering_callback=5) + self.assertRegexpMatches( + log.getvalue(), + "Problem calling constraint filter callback " + "on constraint with right-hand side -1.0 and body:*") + def test_combine_three_inequalities_and_flatten_blocks(self): m = ConcreteModel() m.x = Var() @@ -242,48 +314,9 @@ def test_combine_three_inequalities_and_flatten_blocks(self): self.assertIsNone(cons.upper) self.assertIs(cons.body, m.x) - def test_project_disaggregated_vars(self): - """This is a little bit more of an integration test with GDP, - but also an example of why FME is 'useful.' We will give a GDP, - take chull relaxation, and then project out the disaggregated - variables.""" - - m = ConcreteModel() - m.p = Var([1, 2], bounds=(0, 10)) - m.time1 = Disjunction(expr=[m.p[1] >= 1, m.p[1] == 0]) - - m.on = Disjunct() - m.on.above_min = Constraint(expr=m.p[2] >= 1) - m.on.ramping = Constraint(expr=m.p[2] - m.p[1] <= 3) - m.on.on_before = Constraint(expr=m.p[1] >= 1) - - m.startup = Disjunct() - m.startup.startup_limit = Constraint(expr=(1, m.p[2], 2)) - m.startup.off_before = Constraint(expr=m.p[1] == 0) - - m.off = Disjunct() - m.off.off = Constraint(expr=m.p[2] == 0) - m.time2 = Disjunction(expr=[m.on, m.startup, m.off]) - - TransformationFactory('gdp.chull').apply_to(m) - relaxationBlocks = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - disaggregatedVars = ComponentSet([relaxationBlocks[0].component("p[1]"), - relaxationBlocks[1].component("p[1]"), - relaxationBlocks[2].component("p[1]"), - relaxationBlocks[2].component("p[2]"), - relaxationBlocks[3].component("p[1]"), - relaxationBlocks[3].component("p[2]"), - relaxationBlocks[4].component("p[1]"), - relaxationBlocks[4].component("p[2]")]) - TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( - m, vars_to_eliminate=disaggregatedVars) - - constraints = m._pyomo_contrib_fme_transformation.projected_constraints - # we of course get tremendous amounts of garbage, but we make sure that - # what should be here is: - + def check_hull_projected_constraints(self, m, constraints, indices): # p[1] >= on.ind_var - cons = constraints[22] + cons = constraints[indices[0]] self.assertEqual(cons.lower, 0) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -296,7 +329,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[1], -1) # p[1] <= 10*on.ind_var + 10*off.ind_var - cons = constraints[20] + cons = constraints[indices[1]] self.assertEqual(cons.lower, 0) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -311,7 +344,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[2], -1) # p[1] >= time1_disjuncts[0].ind_var - cons = constraints[58] + cons = constraints[indices[2]] self.assertEqual(cons.lower, 0) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -324,7 +357,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[0], 1) # p[1] <= 10*time1_disjuncts[0].ind_var - cons = constraints[61] + cons = constraints[indices[3]] self.assertEqual(cons.lower, 0) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -337,7 +370,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[1], -1) # p[2] - p[1] <= 3*on.ind_var + 2*startup.ind_var - cons = constraints[56] + cons = constraints[indices[4]] self.assertEqual(value(cons.lower), 0) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -354,7 +387,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[2], 2) # p[2] >= on.ind_var + startup.ind_var - cons = constraints[38] + cons = constraints[indices[5]] self.assertEqual(cons.lower, 0) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -369,7 +402,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[2], -1) # p[2] <= 10*on.ind_var + 2*startup.ind_var - cons = constraints[32] + cons = constraints[indices[6]] self.assertEqual(cons.lower, 0) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -384,7 +417,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[2], -1) # 1 <= time1_disjuncts[0].ind_var + time_1.disjuncts[1].ind_var - cons = constraints[1] + cons = constraints[indices[7]] self.assertEqual(cons.lower, 1) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -397,7 +430,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[1], 1) # 1 >= time1_disjuncts[0].ind_var + time_1.disjuncts[1].ind_var - cons = constraints[2] + cons = constraints[indices[8]] self.assertEqual(cons.lower, -1) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -410,7 +443,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[1], -1) # 1 <= on.ind_var + startup.ind_var + off.ind_var - cons = constraints[3] + cons = constraints[indices[9]] self.assertEqual(cons.lower, 1) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -425,7 +458,7 @@ def test_project_disaggregated_vars(self): self.assertEqual(body.linear_coefs[2], 1) # 1 >= on.ind_var + startup.ind_var + off.ind_var - cons = constraints[4] + cons = constraints[indices[10]] self.assertEqual(cons.lower, -1) self.assertIsNone(cons.upper) body = generate_standard_repn(cons.body) @@ -439,6 +472,105 @@ def test_project_disaggregated_vars(self): self.assertIs(body.linear_vars[2], m.off.indicator_var) self.assertEqual(body.linear_coefs[2], -1) + def create_hull_model(self): + m = ConcreteModel() + m.p = Var([1, 2], bounds=(0, 10)) + m.time1 = Disjunction(expr=[m.p[1] >= 1, m.p[1] == 0]) + + m.on = Disjunct() + m.on.above_min = Constraint(expr=m.p[2] >= 1) + m.on.ramping = Constraint(expr=m.p[2] - m.p[1] <= 3) + m.on.on_before = Constraint(expr=m.p[1] >= 1) + + m.startup = Disjunct() + m.startup.startup_limit = Constraint(expr=(1, m.p[2], 2)) + m.startup.off_before = Constraint(expr=m.p[1] == 0) + + m.off = Disjunct() + m.off.off = Constraint(expr=m.p[2] == 0) + m.time2 = Disjunction(expr=[m.on, m.startup, m.off]) + + m.obj = Objective(expr=m.p[1] + m.p[2]) + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + disaggregatedVars = ComponentSet( + [hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[0]), + hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[1]), + hull.get_disaggregated_var(m.p[1], m.on), + hull.get_disaggregated_var(m.p[2], m.on), + hull.get_disaggregated_var(m.p[1], m.startup), + hull.get_disaggregated_var(m.p[2], m.startup), + hull.get_disaggregated_var(m.p[1], m.off), + hull.get_disaggregated_var(m.p[2], m.off) + ]) + + # from nose.tools import set_trace + # set_trace() + # disaggregatedVars = ComponentSet([relaxationBlocks[0].component("p[1]"), + # relaxationBlocks[1].component("p[1]"), + # relaxationBlocks[2].component("p[1]"), + # relaxationBlocks[2].component("p[2]"), + # relaxationBlocks[3].component("p[1]"), + # relaxationBlocks[3].component("p[2]"), + # relaxationBlocks[4].component("p[1]"), + # relaxationBlocks[4].component("p[2]")]) + + return m, disaggregatedVars + + def test_project_disaggregated_vars(self): + """This is a little bit more of an integration test with GDP, + but also an example of why FME is 'useful.' We will give a GDP, + take hull relaxation, and then project out the disaggregated + variables.""" + m, disaggregatedVars = self.create_hull_model() + + filtered = TransformationFactory('contrib.fourier_motzkin_elimination').\ + create_using(m, vars_to_eliminate=disaggregatedVars) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + m, vars_to_eliminate=disaggregatedVars, + constraint_filtering_callback=None) + + constraints = m._pyomo_contrib_fme_transformation.projected_constraints + # we of course get tremendous amounts of garbage, but we make sure that + # what should be here is: + self.check_hull_projected_constraints(m, constraints, [22, 20, 58, 61, + 56, 38, 32, 1, 2, + 4, 5]) + # and when we filter, it's still there. + constraints = filtered._pyomo_contrib_fme_transformation.\ + projected_constraints + self.check_hull_projected_constraints(filtered, constraints, [6, 5, 16, + 17, 15, + 11, 8, 1, + 2, 3, 4]) + + @unittest.skipIf(not 'glpk' in solvers, 'glpk not available') + def test_post_processing(self): + m, disaggregatedVars = self.create_hull_model() + fme = TransformationFactory('contrib.fourier_motzkin_elimination') + fme.apply_to(m, vars_to_eliminate=disaggregatedVars) + # post-process + fme.post_process_fme_constraints(m, SolverFactory('glpk')) + + constraints = m._pyomo_contrib_fme_transformation.projected_constraints + self.assertEqual(len(constraints), 11) + + # They should be the same as the above, but now these are *all* the + # constraints + self.check_hull_projected_constraints(m, constraints, [6, 5, 16, 17, + 15, 11, 8, 1, 2, + 3, 4]) + + # and check that we didn't change the model + for disj in m.component_data_objects(Disjunct): + self.assertIs(disj.indicator_var.domain, Binary) + self.assertEqual(len([o for o in m.component_data_objects(Objective)]), + 1) + self.assertIsInstance(m.component("obj"), Objective) + self.assertTrue(m.obj.active) + + @unittest.skipIf(not 'glpk' in solvers, 'glpk not available') def test_model_with_unrelated_nonlinear_expressions(self): m = ConcreteModel() m.x = Var([1, 2, 3], bounds=(0,3)) @@ -454,8 +586,9 @@ def cons(m, i): # This is vacuous, but I just want something that's not quadratic m.cons4 = Constraint(expr=m.x[3] <= log(m.y + 1)) - TransformationFactory('contrib.fourier_motzkin_elimination').\ - apply_to(m, vars_to_eliminate=m.x) + fme = TransformationFactory('contrib.fourier_motzkin_elimination') + fme.apply_to(m, vars_to_eliminate=m.x, + constraint_filtering_callback=None) constraints = m._pyomo_contrib_fme_transformation.projected_constraints # 0 <= y <= 3 @@ -524,3 +657,27 @@ def cons(m, i): for i in constraints: self.assertLessEqual(value(constraints[i].lower), value(constraints[i].body)) + m.y.fixed = False + m.z.fixed = False + + # check post process these are non-convex, so I don't want to deal with + # it... (and this is a good test that I *don't* deal with it.) + constraints[4].deactivate() + constraints[3].deactivate() + constraints[1].deactivate() + # NOTE also that some of the suproblems in this test are unbounded: We + # need to keep those constraints. + fme.post_process_fme_constraints(m, SolverFactory('glpk')) + # we needed all the constraints, so we kept them all + self.assertEqual(len(constraints), 6) + + # last check that if someone activates something on the model in + # between, we just use it. (I struggle to imagine why you would do this + # because why withold the information *during* FME, but if there's some + # reason, we may as well use all the information we've got.) + m.some_new_cons = Constraint(expr=m.y <= 2) + fme.post_process_fme_constraints(m, SolverFactory('glpk')) + # now we should have lost one constraint + self.assertEqual(len(constraints), 5) + # and it should be the y <= 3 one... + self.assertIsNone(dict(constraints).get(5)) diff --git a/pyomo/contrib/gdpopt/GDPopt.py b/pyomo/contrib/gdpopt/GDPopt.py index c0ab2320d8b..0ccc1f7e225 100644 --- a/pyomo/contrib/gdpopt/GDPopt.py +++ b/pyomo/contrib/gdpopt/GDPopt.py @@ -105,7 +105,7 @@ def solve(self, model, **kwds): model (Block): a Pyomo model or block to be solved """ - config = self.CONFIG(kwds.pop('options', {})) + config = self.CONFIG(kwds.pop('options', {}), preserve_implicit=True) config.set_value(kwds) with setup_solver_environment(model, config) as solve_data: diff --git a/pyomo/contrib/gdpopt/branch_and_bound.py b/pyomo/contrib/gdpopt/branch_and_bound.py index bed740da6fd..70b5f063270 100644 --- a/pyomo/contrib/gdpopt/branch_and_bound.py +++ b/pyomo/contrib/gdpopt/branch_and_bound.py @@ -228,7 +228,7 @@ def _prescreen_node(node_data, node_model, solve_data): if node_data.node_count == 0: config.logger.info("Root node is not satisfiable. Problem is infeasible.") else: - config.debug.info("SAT solver pruned node %s" % node_data.node_count) + config.logger.info("SAT solver pruned node %s" % node_data.node_count) new_lb = new_ub = float('inf') else: # Solve model subproblem diff --git a/pyomo/contrib/gdpopt/cut_generation.py b/pyomo/contrib/gdpopt/cut_generation.py index 3090211882f..6b6db57e5df 100644 --- a/pyomo/contrib/gdpopt/cut_generation.py +++ b/pyomo/contrib/gdpopt/cut_generation.py @@ -1,7 +1,9 @@ """This module provides functions for cut generation.""" from __future__ import division +from collections import namedtuple from math import copysign, fabs +from six import iteritems from pyomo.contrib.gdp_bounds.info import disjunctive_bounds from pyomo.contrib.gdpopt.util import time_code, constraints_in_True_disjuncts from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error @@ -13,6 +15,8 @@ from pyomo.core.kernel.component_set import ComponentSet from pyomo.gdp import Disjunct +MAX_SYMBOLIC_DERIV_SIZE = 1000 +JacInfo = namedtuple('JacInfo', ['mode','vars','jac']) def add_subproblem_cuts(subprob_result, solve_data, config): if config.strategy == "LOA": @@ -60,19 +64,32 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config): "Adding OA cut for %s with dual value %s" % (constr.name, dual_value)) - # Cache jacobians - jacobians = GDPopt.jacobians.get(constr, None) - if jacobians is None: - constr_vars = list(identify_variables(constr.body, include_fixed=False)) - if len(constr_vars) >= 1000: + # Cache jacobian + jacobian = GDPopt.jacobians.get(constr, None) + if jacobian is None: + constr_vars = list(identify_variables( + constr.body, include_fixed=False)) + if len(constr_vars) >= MAX_SYMBOLIC_DERIV_SIZE: mode = differentiate.Modes.reverse_numeric else: mode = differentiate.Modes.sympy + try: + jac_list = differentiate( + constr.body, wrt_list=constr_vars, mode=mode) + jac_map = ComponentMap(zip(constr_vars, jac_list)) + except: + if mode is differentiate.Modes.reverse_numeric: + raise + mode = differentiate.Modes.reverse_numeric + jac_map = ComponentMap() + jacobian = JacInfo(mode=mode, vars=constr_vars, jac=jac_map) + GDPopt.jacobians[constr] = jacobian + # Recompute numeric derivatives + if not jacobian.jac: jac_list = differentiate( - constr.body, wrt_list=constr_vars, mode=mode) - jacobians = ComponentMap(zip(constr_vars, jac_list)) - GDPopt.jacobians[constr] = jacobians + constr.body, wrt_list=jacobian.vars, mode=jacobian.mode) + jacobian.jac.update(zip(jacobian.vars, jac_list)) # Create a block on which to put outer approximation cuts. oa_utils = parent_block.component('GDPopt_OA') @@ -92,11 +109,12 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config): new_oa_cut = ( copysign(1, sign_adjust * dual_value) * ( value(constr.body) - rhs + sum( - value(jacobians[var]) * (var - value(var)) - for var in jacobians)) - slack_var <= 0) + value(jac) * (var - value(var)) + for var, jac in iteritems(jacobian.jac)) + ) - slack_var <= 0) if new_oa_cut.polynomial_degree() not in (1, 0): - for var in jacobians: - print(var.name, value(jacobians[var])) + for var, jac in iteritems(jacobian.jac): + print(var.name, value(jac)) oa_cuts.add(expr=new_oa_cut) counter += 1 except ZeroDivisionError: @@ -106,6 +124,9 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config): % (constr.name,) ) # Simply continue on to the next constraint. + # Clear out the numeric Jacobian values + if jacobian.mode is differentiate.Modes.reverse_numeric: + jacobian.jac.clear() config.logger.info('Added %s OA cuts' % counter) diff --git a/pyomo/contrib/gdpopt/tests/test_gdpopt.py b/pyomo/contrib/gdpopt/tests/test_gdpopt.py index 4b223612c3d..50a2f370cc2 100644 --- a/pyomo/contrib/gdpopt/tests/test_gdpopt.py +++ b/pyomo/contrib/gdpopt/tests/test_gdpopt.py @@ -5,7 +5,6 @@ from six import StringIO -import pyomo.core.base.symbolic import pyutilib.th as unittest from pyomo.common.log import LoggingIntercept from pyomo.contrib.gdpopt.GDPopt import GDPoptSolver @@ -148,8 +147,6 @@ def test_is_feasible_function(self): @unittest.skipIf(not LOA_solvers_available, "Required subsolvers %s are not available" % (LOA_solvers,)) -@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, - "Symbolic differentiation is not available") class TestGDPopt(unittest.TestCase): """Tests for the GDPopt solver plugin.""" diff --git a/pyomo/contrib/gdpopt/util.py b/pyomo/contrib/gdpopt/util.py index 3ea56c8c9db..389e96ae092 100644 --- a/pyomo/contrib/gdpopt/util.py +++ b/pyomo/contrib/gdpopt/util.py @@ -105,7 +105,7 @@ def presolve_lp_nlp(solve_data, config): return False, None -def process_objective(solve_data, config, move_linear_objective=False): +def process_objective(solve_data, config, move_linear_objective=False, use_mcpp=True): """Process model objective function. Check that the model has only 1 valid objective. @@ -144,10 +144,11 @@ def process_objective(solve_data, config, move_linear_objective=False): if move_linear_objective: config.logger.info("Moving objective to constraint set.") else: - config.logger.info("Objective is nonlinear. Moving it to constraint set.") + config.logger.info( + "Objective is nonlinear. Moving it to constraint set.") util_blk.objective_value = Var(domain=Reals, initialize=0) - if mcpp_available(): + if mcpp_available() and use_mcpp: mc_obj = McCormick(main_obj.expr) util_blk.objective_value.setub(mc_obj.upper()) util_blk.objective_value.setlb(mc_obj.lower()) @@ -206,8 +207,8 @@ def copy_var_list_values(from_list, to_list, config, # Check to see if this is just a tolerance issue if ignore_integrality \ and ('is not in domain Binary' in err_msg - or 'is not in domain Integers' in err_msg): - v_to.value = value(v_from, exception=False) + or 'is not in domain Integers' in err_msg): + v_to.value = value(v_from, exception=False) elif 'is not in domain Binary' in err_msg and ( fabs(var_val - 1) <= config.integer_tolerance or fabs(var_val) <= config.integer_tolerance): diff --git a/pyomo/contrib/interior_point/__init__.py b/pyomo/contrib/interior_point/__init__.py new file mode 100644 index 00000000000..1bc67ee9611 --- /dev/null +++ b/pyomo/contrib/interior_point/__init__.py @@ -0,0 +1,8 @@ +from pyomo.common.dependencies import numpy_available, scipy_available +if not numpy_available or not scipy_available: + import pyutilib.th as unittest + raise unittest.SkipTest('numpy and scipy required for interior point') +from .interface import BaseInteriorPointInterface, InteriorPointInterface +from .interior_point import InteriorPointSolver, InteriorPointStatus +from pyomo.contrib.interior_point import linalg +from .inverse_reduced_hessian import inv_reduced_hessian_barrier diff --git a/pyomo/contrib/interior_point/examples/ex1.py b/pyomo/contrib/interior_point/examples/ex1.py new file mode 100644 index 00000000000..f71c5f27890 --- /dev/null +++ b/pyomo/contrib/interior_point/examples/ex1.py @@ -0,0 +1,28 @@ +import pyomo.environ as pe +from pyomo.contrib.interior_point.interior_point import InteriorPointSolver +from pyomo.contrib.interior_point.interface import InteriorPointInterface +from pyomo.contrib.interior_point.linalg.mumps_interface import MumpsInterface +import logging + + +logging.basicConfig(level=logging.INFO) +# Supposedly this sets the root logger's level to INFO. +# But when linear_solver.logger logs with debug, +# it gets propagated to a mysterious root logger with +# level NOTSET... + +m = pe.ConcreteModel() +m.x = pe.Var() +m.y = pe.Var() +m.obj = pe.Objective(expr=m.x**2 + m.y**2) +m.c1 = pe.Constraint(expr=m.y == pe.exp(m.x)) +m.c2 = pe.Constraint(expr=m.y >= (m.x - 1)**2) +interface = InteriorPointInterface(m) +linear_solver = MumpsInterface( +# log_filename='lin_sol.log', + icntl_options={11: 1}, # Set error level to 1 (most detailed) + ) + +ip_solver = InteriorPointSolver(linear_solver) +x, duals_eq, duals_ineq = ip_solver.solve(interface) +print(x, duals_eq, duals_ineq) diff --git a/pyomo/contrib/interior_point/interface.py b/pyomo/contrib/interior_point/interface.py new file mode 100644 index 00000000000..13c5072554a --- /dev/null +++ b/pyomo/contrib/interior_point/interface.py @@ -0,0 +1,624 @@ +from abc import ABCMeta, abstractmethod +import six +from pyomo.contrib.pynumero.interfaces import pyomo_nlp, ampl_nlp +from pyomo.contrib.pynumero.sparse import BlockMatrix, BlockVector +import numpy as np +import scipy.sparse +from pyutilib.misc.timing import HierarchicalTimer + + +class BaseInteriorPointInterface(six.with_metaclass(ABCMeta, object)): + @abstractmethod + def n_primals(self): + pass + + @abstractmethod + def nnz_hessian_lag(self): + pass + + @abstractmethod + def primals_lb(self): + pass + + @abstractmethod + def primals_ub(self): + pass + + @abstractmethod + def init_primals(self): + pass + + @abstractmethod + def set_primals(self, primals): + pass + + @abstractmethod + def get_primals(self): + pass + + @abstractmethod + def get_obj_factor(self): + pass + + @abstractmethod + def set_obj_factor(self, obj_factor): + pass + + @abstractmethod + def evaluate_objective(self): + pass + + @abstractmethod + def evaluate_grad_objective(self): + pass + + @abstractmethod + def n_eq_constraints(self): + pass + + @abstractmethod + def n_ineq_constraints(self): + pass + + @abstractmethod + def nnz_jacobian_eq(self): + pass + + @abstractmethod + def nnz_jacobian_ineq(self): + pass + + @abstractmethod + def ineq_lb(self): + pass + + @abstractmethod + def ineq_ub(self): + pass + + @abstractmethod + def init_duals_eq(self): + pass + + @abstractmethod + def init_duals_ineq(self): + pass + + @abstractmethod + def set_duals_eq(self, duals_eq): + pass + + @abstractmethod + def set_duals_ineq(self, duals_ineq): + pass + + @abstractmethod + def get_duals_eq(self): + pass + + @abstractmethod + def get_duals_ineq(self): + pass + + @abstractmethod + def evaluate_eq_constraints(self): + pass + + @abstractmethod + def evaluate_ineq_constraints(self): + pass + + @abstractmethod + def evaluate_jacobian_eq(self): + pass + + @abstractmethod + def evaluate_jacobian_ineq(self): + pass + + @abstractmethod + def init_slacks(self): + pass + + @abstractmethod + def init_duals_primals_lb(self): + pass + + @abstractmethod + def init_duals_primals_ub(self): + pass + + @abstractmethod + def init_duals_slacks_lb(self): + pass + + @abstractmethod + def init_duals_slacks_ub(self): + pass + + @abstractmethod + def set_slacks(self, slacks): + pass + + @abstractmethod + def set_duals_primals_lb(self, duals): + pass + + @abstractmethod + def set_duals_primals_ub(self, duals): + pass + + @abstractmethod + def set_duals_slacks_lb(self, duals): + pass + + @abstractmethod + def set_duals_slacks_ub(self, duals): + pass + + @abstractmethod + def get_slacks(self): + pass + + @abstractmethod + def get_duals_primals_lb(self): + pass + + @abstractmethod + def get_duals_primals_ub(self): + pass + + @abstractmethod + def get_duals_slacks_lb(self): + pass + + @abstractmethod + def get_duals_slacks_ub(self): + pass + + @abstractmethod + def set_barrier_parameter(self, barrier): + pass + + @abstractmethod + def evaluate_primal_dual_kkt_matrix(self, timer=None): + pass + + @abstractmethod + def evaluate_primal_dual_kkt_rhs(self, timer=None): + pass + + @abstractmethod + def set_primal_dual_kkt_solution(self, sol): + pass + + @abstractmethod + def get_delta_primals(self): + pass + + @abstractmethod + def get_delta_slacks(self): + pass + + @abstractmethod + def get_delta_duals_eq(self): + pass + + @abstractmethod + def get_delta_duals_ineq(self): + pass + + @abstractmethod + def get_delta_duals_primals_lb(self): + pass + + @abstractmethod + def get_delta_duals_primals_ub(self): + pass + + @abstractmethod + def get_delta_duals_slacks_lb(self): + pass + + @abstractmethod + def get_delta_duals_slacks_ub(self): + pass + + def regularize_equality_gradient(self, kkt, coef, copy_kkt=True): + raise RuntimeError( + 'Equality gradient regularization is necessary but no ' + 'function has been implemented for doing so.') + + def regularize_hessian(self, kkt, coef, copy_kkt=True): + raise RuntimeError( + 'Hessian of Lagrangian regularization is necessary but no ' + 'function has been implemented for doing so.') + + +class InteriorPointInterface(BaseInteriorPointInterface): + def __init__(self, pyomo_model): + if type(pyomo_model) is str: + # Assume argument is the name of an nl file + self._nlp = ampl_nlp.AmplNLP(pyomo_model) + else: + self._nlp = pyomo_nlp.PyomoNLP(pyomo_model) + self._slacks = self.init_slacks() + + # set the init_duals_primals_lb/ub from ipopt_zL_out, ipopt_zU_out if available + # need to compress them as well and initialize the duals_primals_lb/ub + self._init_duals_primals_lb, self._init_duals_primals_ub =\ + self._get_full_duals_primals_bounds() + self._init_duals_primals_lb[np.isneginf(self._nlp.primals_lb())] = 0 + self._init_duals_primals_ub[np.isinf(self._nlp.primals_ub())] = 0 + self._duals_primals_lb = self._init_duals_primals_lb.copy() + self._duals_primals_ub = self._init_duals_primals_ub.copy() + + # set the init_duals_slacks_lb/ub from the init_duals_ineq + # need to be compressed and set according to their sign + # (-) value indicates it the upper is active, while (+) indicates + # that lower is active + self._init_duals_slacks_lb = self._nlp.init_duals_ineq().copy() + self._init_duals_slacks_lb[self._init_duals_slacks_lb < 0] = 0 + self._init_duals_slacks_ub = self._nlp.init_duals_ineq().copy() + self._init_duals_slacks_ub[self._init_duals_slacks_ub > 0] = 0 + self._init_duals_slacks_ub *= -1.0 + + self._duals_slacks_lb = self._init_duals_slacks_lb.copy() + self._duals_slacks_ub = self._init_duals_slacks_ub.copy() + + self._delta_primals = None + self._delta_slacks = None + self._delta_duals_eq = None + self._delta_duals_ineq = None + self._barrier = None + + def n_primals(self): + return self._nlp.n_primals() + + def nnz_hessian_lag(self): + return self._nlp.nnz_hessian_lag() + + def set_obj_factor(self, obj_factor): + self._nlp.set_obj_factor(obj_factor) + + def get_obj_factor(self): + return self._nlp.get_obj_factor() + + def n_eq_constraints(self): + return self._nlp.n_eq_constraints() + + def n_ineq_constraints(self): + return self._nlp.n_ineq_constraints() + + def nnz_jacobian_eq(self): + return self._nlp.nnz_jacobian_eq() + + def nnz_jacobian_ineq(self): + return self._nlp.nnz_jacobian_ineq() + + def init_primals(self): + primals = self._nlp.init_primals() + return primals + + def init_slacks(self): + slacks = self._nlp.evaluate_ineq_constraints() + return slacks + + def init_duals_eq(self): + return self._nlp.init_duals_eq() + + def init_duals_ineq(self): + return self._nlp.init_duals_ineq() + + def init_duals_primals_lb(self): + return self._init_duals_primals_lb + + def init_duals_primals_ub(self): + return self._init_duals_primals_ub + + def init_duals_slacks_lb(self): + return self._init_duals_slacks_lb + + def init_duals_slacks_ub(self): + return self._init_duals_slacks_ub + + def set_primals(self, primals): + self._nlp.set_primals(primals) + + def set_slacks(self, slacks): + self._slacks = slacks + + def set_duals_eq(self, duals): + self._nlp.set_duals_eq(duals) + + def set_duals_ineq(self, duals): + self._nlp.set_duals_ineq(duals) + + def set_duals_primals_lb(self, duals): + self._duals_primals_lb = duals + + def set_duals_primals_ub(self, duals): + self._duals_primals_ub = duals + + def set_duals_slacks_lb(self, duals): + self._duals_slacks_lb = duals + + def set_duals_slacks_ub(self, duals): + self._duals_slacks_ub = duals + + def get_primals(self): + return self._nlp.get_primals() + + def get_slacks(self): + return self._slacks + + def get_duals_eq(self): + return self._nlp.get_duals_eq() + + def get_duals_ineq(self): + return self._nlp.get_duals_ineq() + + def get_duals_primals_lb(self): + return self._duals_primals_lb + + def get_duals_primals_ub(self): + return self._duals_primals_ub + + def get_duals_slacks_lb(self): + return self._duals_slacks_lb + + def get_duals_slacks_ub(self): + return self._duals_slacks_ub + + def primals_lb(self): + return self._nlp.primals_lb() + + def primals_ub(self): + return self._nlp.primals_ub() + + def ineq_lb(self): + return self._nlp.ineq_lb() + + def ineq_ub(self): + return self._nlp.ineq_ub() + + def set_barrier_parameter(self, barrier): + self._barrier = barrier + + def pyomo_nlp(self): + return self._nlp + + def evaluate_primal_dual_kkt_matrix(self, timer=None): + if timer is None: + timer = HierarchicalTimer() + timer.start('eval hess') + hessian = self._nlp.evaluate_hessian_lag() + timer.stop('eval hess') + timer.start('eval jac') + jac_eq = self._nlp.evaluate_jacobian_eq() + jac_ineq = self._nlp.evaluate_jacobian_ineq() + timer.stop('eval jac') + + duals_primals_lb = self._duals_primals_lb + duals_primals_ub = self._duals_primals_ub + duals_slacks_lb = self._duals_slacks_lb + duals_slacks_ub = self._duals_slacks_ub + primals = self._nlp.get_primals() + + timer.start('hess block') + data = (duals_primals_lb/(primals - self._nlp.primals_lb()) + + duals_primals_ub/(self._nlp.primals_ub() - primals)) + n = self._nlp.n_primals() + indices = np.arange(n) + hess_block = scipy.sparse.coo_matrix((data, (indices, indices)), shape=(n, n)) + hess_block += hessian + timer.stop('hess block') + + timer.start('slack block') + data = (duals_slacks_lb/(self._slacks - self._nlp.ineq_lb()) + + duals_slacks_ub/(self._nlp.ineq_ub() - self._slacks)) + n = self._nlp.n_ineq_constraints() + indices = np.arange(n) + slack_block = scipy.sparse.coo_matrix((data, (indices, indices)), shape=(n, n)) + timer.stop('slack block') + + timer.start('set block') + kkt = BlockMatrix(4, 4) + kkt.set_block(0, 0, hess_block) + kkt.set_block(1, 1, slack_block) + kkt.set_block(2, 0, jac_eq) + kkt.set_block(0, 2, jac_eq.transpose()) + kkt.set_block(3, 0, jac_ineq) + kkt.set_block(0, 3, jac_ineq.transpose()) + kkt.set_block(3, 1, -scipy.sparse.identity( + self._nlp.n_ineq_constraints(), + format='coo')) + kkt.set_block(1, 3, -scipy.sparse.identity( + self._nlp.n_ineq_constraints(), + format='coo')) + timer.stop('set block') + return kkt + + def evaluate_primal_dual_kkt_rhs(self, timer=None): + if timer is None: + timer = HierarchicalTimer() + timer.start('eval grad obj') + grad_obj = self.get_obj_factor() * self.evaluate_grad_objective() + timer.stop('eval grad obj') + timer.start('eval jac') + jac_eq = self._nlp.evaluate_jacobian_eq() + jac_ineq = self._nlp.evaluate_jacobian_ineq() + timer.stop('eval jac') + timer.start('eval cons') + eq_resid = self._nlp.evaluate_eq_constraints() + ineq_resid = self._nlp.evaluate_ineq_constraints() - self._slacks + timer.stop('eval cons') + + timer.start('grad_lag_primals') + grad_lag_primals = (grad_obj + + jac_eq.transpose() * self._nlp.get_duals_eq() + + jac_ineq.transpose() * self._nlp.get_duals_ineq() - + self._barrier / (self._nlp.get_primals() - self._nlp.primals_lb()) + + self._barrier / (self._nlp.primals_ub() - self._nlp.get_primals())) + timer.stop('grad_lag_primals') + + timer.start('grad_lag_slacks') + grad_lag_slacks = (-self._nlp.get_duals_ineq() - + self._barrier / (self._slacks - self._nlp.ineq_lb()) + + self._barrier / (self._nlp.ineq_ub() - self._slacks)) + timer.stop('grad_lag_slacks') + + rhs = BlockVector(4) + rhs.set_block(0, grad_lag_primals) + rhs.set_block(1, grad_lag_slacks) + rhs.set_block(2, eq_resid) + rhs.set_block(3, ineq_resid) + rhs = -rhs + return rhs + + def set_primal_dual_kkt_solution(self, sol): + self._delta_primals = sol.get_block(0) + self._delta_slacks = sol.get_block(1) + self._delta_duals_eq = sol.get_block(2) + self._delta_duals_ineq = sol.get_block(3) + + def get_delta_primals(self): + return self._delta_primals + + def get_delta_slacks(self): + return self._delta_slacks + + def get_delta_duals_eq(self): + return self._delta_duals_eq + + def get_delta_duals_ineq(self): + return self._delta_duals_ineq + + def get_delta_duals_primals_lb(self): + res = (((self._barrier - self._duals_primals_lb * self._delta_primals) / + (self._nlp.get_primals() - self._nlp.primals_lb())) - + self._duals_primals_lb) + return res + + def get_delta_duals_primals_ub(self): + res = (((self._barrier + self._duals_primals_ub * self._delta_primals) / + (self._nlp.primals_ub() - self._nlp.get_primals())) - + self._duals_primals_ub) + return res + + def get_delta_duals_slacks_lb(self): + res = (((self._barrier - self._duals_slacks_lb * self._delta_slacks) / + (self._slacks - self._nlp.ineq_lb())) - + self._duals_slacks_lb) + return res + + def get_delta_duals_slacks_ub(self): + res = (((self._barrier + self._duals_slacks_ub * self._delta_slacks) / + (self._nlp.ineq_ub() - self._slacks)) - + self._duals_slacks_ub) + return res + + def evaluate_objective(self): + return self._nlp.evaluate_objective() + + def evaluate_eq_constraints(self): + return self._nlp.evaluate_eq_constraints() + + def evaluate_ineq_constraints(self): + return self._nlp.evaluate_ineq_constraints() + + def evaluate_grad_objective(self): + return self._nlp.evaluate_grad_objective() + + def evaluate_jacobian_eq(self): + return self._nlp.evaluate_jacobian_eq() + + def evaluate_jacobian_ineq(self): + return self._nlp.evaluate_jacobian_ineq() + + def regularize_equality_gradient(self, kkt, coef, copy_kkt=True): + # Not technically regularizing the equality gradient ... + # Replace this with a regularize_diagonal_block function? + # Then call with kkt matrix and the value of the perturbation? + + # Use a constant perturbation to regularize the equality constraint + # gradient + if copy_kkt: + kkt = kkt.copy() + reg_coef = coef + ptb = (reg_coef * + scipy.sparse.identity(self._nlp.n_eq_constraints(), + format='coo')) + + kkt.set_block(2, 2, ptb) + return kkt + + def regularize_hessian(self, kkt, coef, copy_kkt=True): + if copy_kkt: + kkt = kkt.copy() + + hess = kkt.get_block(0, 0) + ptb = coef * scipy.sparse.identity(self._nlp.n_primals(), format='coo') + hess += ptb + kkt.set_block(0, 0, hess) + return kkt + + def _get_full_duals_primals_bounds(self): + full_duals_primals_lb = None + full_duals_primals_ub = None + # Check in case _nlp was constructed as an AmplNLP (from an nl file) + if (hasattr(self._nlp, 'pyomo_model') and + hasattr(self._nlp, 'get_pyomo_variables')): + pyomo_model = self._nlp.pyomo_model() + pyomo_variables = self._nlp.get_pyomo_variables() + if hasattr(pyomo_model,'ipopt_zL_out'): + zL_suffix = pyomo_model.ipopt_zL_out + full_duals_primals_lb = np.empty(self._nlp.n_primals()) + for i,v in enumerate(pyomo_variables): + if v in zL_suffix: + full_duals_primals_lb[i] = zL_suffix[v] + + if hasattr(pyomo_model,'ipopt_zU_out'): + zU_suffix = pyomo_model.ipopt_zU_out + full_duals_primals_ub = np.empty(self._nlp.n_primals()) + for i,v in enumerate(pyomo_variables): + if v in zU_suffix: + full_duals_primals_ub[i] = zU_suffix[v] + + if full_duals_primals_lb is None: + full_duals_primals_lb = np.ones(self._nlp.n_primals()) + + if full_duals_primals_ub is None: + full_duals_primals_ub = np.ones(self._nlp.n_primals()) + + return full_duals_primals_lb, full_duals_primals_ub + + def load_primals_into_pyomo_model(self): + if not isinstance(self._nlp, pyomo_nlp.PyomoNLP): + raise RuntimeError('Can only load primals into a pyomo model if a pyomo model was used in the constructor.') + + pyomo_variables = self._nlp.get_pyomo_variables() + primals = self._nlp.get_primals() + for i, v in enumerate(pyomo_variables): + v.value = primals[i] + + def pyomo_model(self): + return self._nlp.pyomo_model() + + def get_pyomo_variables(self): + return self._nlp.get_pyomo_variables() + + def get_pyomo_constraints(self): + return self._nlp.get_pyomo_constraints() + + def variable_names(self): + return self._nlp.variable_names() + + def constraint_names(self): + return self._nlp.constraint_names() + + def get_primal_indices(self, pyomo_variables): + return self._nlp.get_primal_indices(pyomo_variables) + + def get_constraint_indices(self, pyomo_constraints): + return self._nlp.get_constraint_indices(pyomo_constraints) diff --git a/pyomo/contrib/interior_point/interior_point.py b/pyomo/contrib/interior_point/interior_point.py new file mode 100644 index 00000000000..b2cda7399c0 --- /dev/null +++ b/pyomo/contrib/interior_point/interior_point.py @@ -0,0 +1,677 @@ +from pyomo.contrib.pynumero.interfaces.utils import build_bounds_mask, build_compression_matrix +import numpy as np +import logging +import time +from .linalg.results import LinearSolverStatus +from pyutilib.misc.timing import HierarchicalTimer +import enum + + +""" +Interface Requirements +---------------------- +1) duals_primals_lb[i] must always be 0 if primals_lb[i] is -inf +2) duals_primals_ub[i] must always be 0 if primals_ub[i] is inf +3) duals_slacks_lb[i] must always be 0 if ineq_lb[i] is -inf +4) duals_slacks_ub[i] must always be 0 if ineq_ub[i] is inf +""" + + +ip_logger = logging.getLogger('interior_point') + + +class InteriorPointStatus(enum.Enum): + optimal = 0 + error = 1 + + +class LinearSolveContext(object): + def __init__(self, + interior_point_logger, + linear_solver_logger, + filename=None, + level=logging.INFO): + + self.interior_point_logger = interior_point_logger + self.linear_solver_logger = linear_solver_logger + self.filename = filename + + if filename: + self.handler = logging.FileHandler(filename) + self.handler.setLevel(level) + + def __enter__(self): + self.linear_solver_logger.propagate = False + self.interior_point_logger.propagate = False + if self.filename: + self.linear_solver_logger.addHandler(self.handler) + self.interior_point_logger.addHandler(self.handler) + + + def __exit__(self, et, ev, tb): + self.linear_solver_logger.propagate = True + self.interior_point_logger.propagate = True + if self.filename: + self.linear_solver_logger.removeHandler(self.handler) + self.interior_point_logger.removeHandler(self.handler) + + +# How should the RegContext work? +# TODO: in this class, use the linear_solver_context to ... +# Use linear_solver_logger to write iter_no and reg_coef +# +# Define a method for logging IP_reg_info to the linear solver log +# Method can be called within linear_solve_context +class FactorizationContext(object): + def __init__(self, logger): + # Any reason to pass in a logging level here? + # ^ So the "regularization log" can have its own outlvl + self.logger = logger + + def __enter__(self): + self.logger.debug('Factorizing KKT') + self.log_header() + return self + + def __exit__(self, et, ev, tb): + self.logger.debug('Finished factorizing KKT') + # Will this swallow exceptions in this context? + + def log_header(self): + self.logger.debug('{_iter:<10}' + '{reg_iter:<10}' + '{num_realloc:<10}' + '{reg_coef:<10}' + '{neg_eig:<10}' + '{status:<10}'.format( + _iter='Iter', + reg_iter='reg_iter', + num_realloc='# realloc', + reg_coef='reg_coef', + neg_eig='neg_eig', + status='status')) + + def log_info(self, _iter, reg_iter, num_realloc, coef, neg_eig, status): + self.logger.debug('{_iter:<10}' + '{reg_iter:<10}' + '{num_realloc:<10}' + '{reg_coef:<10.2e}' + '{neg_eig:<10}' + '{status:<10}'.format( + _iter=_iter, + reg_iter=reg_iter, + num_realloc=num_realloc, + reg_coef=coef, + neg_eig=str(neg_eig), + status=status.name)) + + +class InteriorPointSolver(object): + """ + Class for creating interior point solvers with different options + """ + def __init__(self, + linear_solver, + max_iter=100, + tol=1e-8, + linear_solver_log_filename=None, + max_reallocation_iterations=5, + reallocation_factor=2): + self.linear_solver = linear_solver + self.max_iter = max_iter + self.tol = tol + self.linear_solver_log_filename = linear_solver_log_filename + self.max_reallocation_iterations = max_reallocation_iterations + self.reallocation_factor = reallocation_factor + self.base_eq_reg_coef = -1e-8 + self._barrier_parameter = 0.1 + self._minimum_barrier_parameter = 1e-9 + self.hess_reg_coef = 1e-4 + self.max_reg_iter = 6 + self.reg_factor_increase = 100 + + self.logger = logging.getLogger('interior_point') + self._iter = 0 + self.factorization_context = FactorizationContext(self.logger) + + if linear_solver_log_filename: + with open(linear_solver_log_filename, 'w'): + pass + + self.linear_solver_logger = self.linear_solver.getLogger() + self.linear_solve_context = LinearSolveContext(self.logger, + self.linear_solver_logger, + self.linear_solver_log_filename) + + def update_barrier_parameter(self): + self._barrier_parameter = max(self._minimum_barrier_parameter, min(0.5 * self._barrier_parameter, self._barrier_parameter ** 1.5)) + + def set_linear_solver(self, linear_solver): + """This method exists to hopefully make it easy to try the same IP + algorithm with different linear solvers. + Subclasses may have linear-solver specific methods, in which case + this should not be called. + + Hopefully the linear solver interface can be standardized such that + this is not a problem. (Need a generalized method for set_options) + """ + self.linear_solver = linear_solver + + def set_interface(self, interface): + self.interface = interface + + def solve(self, interface, timer=None, report_timing=False): + """ + Parameters + ---------- + interface: pyomo.contrib.interior_point.interface.BaseInteriorPointInterface + The interior point interface. This object handles the function evaluation, + building the KKT matrix, and building the KKT right hand side. + timer: HierarchicalTimer + report_timing: bool + """ + linear_solver = self.linear_solver + max_iter = self.max_iter + tol = self.tol + if timer is None: + timer = HierarchicalTimer() + + timer.start('IP solve') + timer.start('init') + + self._barrier_parameter = 0.1 + + self.set_interface(interface) + + t0 = time.time() + primals = interface.init_primals().copy() + slacks = interface.init_slacks().copy() + duals_eq = interface.init_duals_eq().copy() + duals_ineq = interface.init_duals_ineq().copy() + duals_primals_lb = interface.init_duals_primals_lb().copy() + duals_primals_ub = interface.init_duals_primals_ub().copy() + duals_slacks_lb = interface.init_duals_slacks_lb().copy() + duals_slacks_ub = interface.init_duals_slacks_ub().copy() + + self.process_init(primals, interface.primals_lb(), interface.primals_ub()) + self.process_init(slacks, interface.ineq_lb(), interface.ineq_ub()) + self.process_init_duals_lb(duals_primals_lb, self.interface.primals_lb()) + self.process_init_duals_ub(duals_primals_ub, self.interface.primals_ub()) + self.process_init_duals_lb(duals_slacks_lb, self.interface.ineq_lb()) + self.process_init_duals_ub(duals_slacks_ub, self.interface.ineq_ub()) + + interface.set_barrier_parameter(self._barrier_parameter) + + alpha_primal_max = 1 + alpha_dual_max = 1 + + self.logger.info('{_iter:<6}' + '{objective:<11}' + '{primal_inf:<11}' + '{dual_inf:<11}' + '{compl_inf:<11}' + '{barrier:<11}' + '{alpha_p:<11}' + '{alpha_d:<11}' + '{reg:<11}' + '{time:<7}'.format(_iter='Iter', + objective='Objective', + primal_inf='Prim Inf', + dual_inf='Dual Inf', + compl_inf='Comp Inf', + barrier='Barrier', + alpha_p='Prim Step', + alpha_d='Dual Step', + reg='Reg', + time='Time')) + + reg_coef = 0 + + timer.stop('init') + status = InteriorPointStatus.error + + for _iter in range(max_iter): + self._iter = _iter + + interface.set_primals(primals) + interface.set_slacks(slacks) + interface.set_duals_eq(duals_eq) + interface.set_duals_ineq(duals_ineq) + interface.set_duals_primals_lb(duals_primals_lb) + interface.set_duals_primals_ub(duals_primals_ub) + interface.set_duals_slacks_lb(duals_slacks_lb) + interface.set_duals_slacks_ub(duals_slacks_ub) + + timer.start('convergence check') + primal_inf, dual_inf, complimentarity_inf = \ + self.check_convergence(barrier=0, timer=timer) + timer.stop('convergence check') + objective = interface.evaluate_objective() + self.logger.info('{_iter:<6}' + '{objective:<11.2e}' + '{primal_inf:<11.2e}' + '{dual_inf:<11.2e}' + '{compl_inf:<11.2e}' + '{barrier:<11.2e}' + '{alpha_p:<11.2e}' + '{alpha_d:<11.2e}' + '{reg:<11.2e}' + '{time:<7.3f}'.format(_iter=_iter, + objective=objective, + primal_inf=primal_inf, + dual_inf=dual_inf, + compl_inf=complimentarity_inf, + barrier=self._barrier_parameter, + alpha_p=alpha_primal_max, + alpha_d=alpha_dual_max, + reg=reg_coef, + time=time.time() - t0)) + + if max(primal_inf, dual_inf, complimentarity_inf) <= tol: + status = InteriorPointStatus.optimal + break + timer.start('convergence check') + primal_inf, dual_inf, complimentarity_inf = \ + self.check_convergence(barrier=self._barrier_parameter, timer=timer) + timer.stop('convergence check') + if max(primal_inf, dual_inf, complimentarity_inf) \ + <= 0.1 * self._barrier_parameter: + # This comparison is made with barrier problem infeasibility. + # Sometimes have trouble getting dual infeasibility low enough + self.update_barrier_parameter() + + interface.set_barrier_parameter(self._barrier_parameter) + timer.start('eval') + timer.start('eval kkt') + kkt = interface.evaluate_primal_dual_kkt_matrix(timer=timer) + timer.stop('eval kkt') + timer.start('eval rhs') + rhs = interface.evaluate_primal_dual_kkt_rhs(timer=timer) + timer.stop('eval rhs') + timer.stop('eval') + + # Factorize linear system + timer.start('factorize') + reg_coef = self.factorize(kkt=kkt, timer=timer) + timer.stop('factorize') + + timer.start('back solve') + with self.linear_solve_context: + self.logger.info('Iter: %s' % self._iter) + delta = linear_solver.do_back_solve(rhs) + timer.stop('back solve') + + interface.set_primal_dual_kkt_solution(delta) + timer.start('frac boundary') + alpha_primal_max, alpha_dual_max = \ + self.fraction_to_the_boundary() + timer.stop('frac boundary') + delta_primals = interface.get_delta_primals() + delta_slacks = interface.get_delta_slacks() + delta_duals_eq = interface.get_delta_duals_eq() + delta_duals_ineq = interface.get_delta_duals_ineq() + delta_duals_primals_lb = interface.get_delta_duals_primals_lb() + delta_duals_primals_ub = interface.get_delta_duals_primals_ub() + delta_duals_slacks_lb = interface.get_delta_duals_slacks_lb() + delta_duals_slacks_ub = interface.get_delta_duals_slacks_ub() + + primals += alpha_primal_max * delta_primals + slacks += alpha_primal_max * delta_slacks + duals_eq += alpha_dual_max * delta_duals_eq + duals_ineq += alpha_dual_max * delta_duals_ineq + duals_primals_lb += alpha_dual_max * delta_duals_primals_lb + duals_primals_ub += alpha_dual_max * delta_duals_primals_ub + duals_slacks_lb += alpha_dual_max * delta_duals_slacks_lb + duals_slacks_ub += alpha_dual_max * delta_duals_slacks_ub + + timer.stop('IP solve') + if report_timing: + print(timer) + return status + + def factorize(self, kkt, timer=None): + desired_n_neg_evals = (self.interface.n_eq_constraints() + + self.interface.n_ineq_constraints()) + reg_iter = 0 + with self.factorization_context as fact_con: + status, num_realloc = try_factorization_and_reallocation(kkt=kkt, + linear_solver=self.linear_solver, + reallocation_factor=self.reallocation_factor, + max_iter=self.max_reallocation_iterations, + timer=timer) + if status not in {LinearSolverStatus.successful, LinearSolverStatus.singular}: + raise RuntimeError('Could not factorize KKT system; linear solver status: ' + str(status)) + + if status == LinearSolverStatus.successful: + neg_eig = self.linear_solver.get_inertia()[1] + else: + neg_eig = None + fact_con.log_info(_iter=self._iter, reg_iter=reg_iter, num_realloc=num_realloc, + coef=0, neg_eig=neg_eig, status=status) + reg_iter += 1 + + if status == LinearSolverStatus.singular: + kkt = self.interface.regularize_equality_gradient(kkt=kkt, + coef=self.base_eq_reg_coef * self._barrier_parameter**0.25, + copy_kkt=False) + + total_hess_reg_coef = self.hess_reg_coef + last_hess_reg_coef = 0 + + while neg_eig != desired_n_neg_evals or status == LinearSolverStatus.singular: + kkt = self.interface.regularize_hessian(kkt=kkt, + coef=total_hess_reg_coef - last_hess_reg_coef, + copy_kkt=False) + status, num_realloc = try_factorization_and_reallocation(kkt=kkt, + linear_solver=self.linear_solver, + reallocation_factor=self.reallocation_factor, + max_iter=self.max_reallocation_iterations, + timer=timer) + if status != LinearSolverStatus.successful: + raise RuntimeError('Could not factorize KKT system; linear solver status: ' + str(status)) + neg_eig = self.linear_solver.get_inertia()[1] + fact_con.log_info(_iter=self._iter, reg_iter=reg_iter, num_realloc=num_realloc, + coef=total_hess_reg_coef, neg_eig=neg_eig, status=status) + reg_iter += 1 + if reg_iter > self.max_reg_iter: + raise RuntimeError('Exceeded maximum number of regularization iterations.') + last_hess_reg_coef = total_hess_reg_coef + total_hess_reg_coef *= self.reg_factor_increase + + return last_hess_reg_coef + + def process_init(self, x, lb, ub): + process_init(x, lb, ub) + + def process_init_duals_lb(self, x, lb): + process_init_duals_lb(x, lb) + + def process_init_duals_ub(self, x, ub): + process_init_duals_ub(x, ub) + + def check_convergence(self, barrier, timer=None): + """ + Parameters + ---------- + barrier: float + timer: HierarchicalTimer + + Returns + ------- + primal_inf: float + dual_inf: float + complimentarity_inf: float + """ + if timer is None: + timer = HierarchicalTimer() + + interface = self.interface + slacks = interface.get_slacks() + timer.start('grad obj') + grad_obj = interface.get_obj_factor() * interface.evaluate_grad_objective() + timer.stop('grad obj') + timer.start('jac eq') + jac_eq = interface.evaluate_jacobian_eq() + timer.stop('jac eq') + timer.start('jac ineq') + jac_ineq = interface.evaluate_jacobian_ineq() + timer.stop('jac ineq') + timer.start('eq cons') + eq_resid = interface.evaluate_eq_constraints() + timer.stop('eq cons') + timer.start('ineq cons') + ineq_resid = interface.evaluate_ineq_constraints() - slacks + timer.stop('ineq cons') + primals = interface.get_primals() + duals_eq = interface.get_duals_eq() + duals_ineq = interface.get_duals_ineq() + duals_primals_lb = interface.get_duals_primals_lb() + duals_primals_ub = interface.get_duals_primals_ub() + duals_slacks_lb = interface.get_duals_slacks_lb() + duals_slacks_ub = interface.get_duals_slacks_ub() + + primals_lb = interface.primals_lb() + primals_ub = interface.primals_ub() + primals_lb_mod = primals_lb.copy() + primals_ub_mod = primals_ub.copy() + primals_lb_mod[np.isneginf(primals_lb)] = 0 # these entries get multiplied by 0 + primals_ub_mod[np.isinf(primals_ub)] = 0 # these entries get multiplied by 0 + + ineq_lb = interface.ineq_lb() + ineq_ub = interface.ineq_ub() + ineq_lb_mod = ineq_lb.copy() + ineq_ub_mod = ineq_ub.copy() + ineq_lb_mod[np.isneginf(ineq_lb)] = 0 # these entries get multiplied by 0 + ineq_ub_mod[np.isinf(ineq_ub)] = 0 # these entries get multiplied by 0 + + timer.start('grad_lag_primals') + grad_lag_primals = grad_obj + jac_eq.transpose() * duals_eq + grad_lag_primals += jac_ineq.transpose() * duals_ineq + grad_lag_primals -= duals_primals_lb + grad_lag_primals += duals_primals_ub + timer.stop('grad_lag_primals') + timer.start('grad_lag_slacks') + grad_lag_slacks = (-duals_ineq - + duals_slacks_lb + + duals_slacks_ub) + timer.stop('grad_lag_slacks') + timer.start('bound resids') + primals_lb_resid = (primals - primals_lb_mod) * duals_primals_lb - barrier + primals_ub_resid = (primals_ub_mod - primals) * duals_primals_ub - barrier + primals_lb_resid[np.isneginf(primals_lb)] = 0 + primals_ub_resid[np.isinf(primals_ub)] = 0 + slacks_lb_resid = (slacks - ineq_lb_mod) * duals_slacks_lb - barrier + slacks_ub_resid = (ineq_ub_mod - slacks) * duals_slacks_ub - barrier + slacks_lb_resid[np.isneginf(ineq_lb)] = 0 + slacks_ub_resid[np.isinf(ineq_ub)] = 0 + timer.stop('bound resids') + + if eq_resid.size == 0: + max_eq_resid = 0 + else: + max_eq_resid = np.max(np.abs(eq_resid)) + if ineq_resid.size == 0: + max_ineq_resid = 0 + else: + max_ineq_resid = np.max(np.abs(ineq_resid)) + primal_inf = max(max_eq_resid, max_ineq_resid) + + max_grad_lag_primals = np.max(np.abs(grad_lag_primals)) + if grad_lag_slacks.size == 0: + max_grad_lag_slacks = 0 + else: + max_grad_lag_slacks = np.max(np.abs(grad_lag_slacks)) + dual_inf = max(max_grad_lag_primals, max_grad_lag_slacks) + + if primals_lb_resid.size == 0: + max_primals_lb_resid = 0 + else: + max_primals_lb_resid = np.max(np.abs(primals_lb_resid)) + if primals_ub_resid.size == 0: + max_primals_ub_resid = 0 + else: + max_primals_ub_resid = np.max(np.abs(primals_ub_resid)) + if slacks_lb_resid.size == 0: + max_slacks_lb_resid = 0 + else: + max_slacks_lb_resid = np.max(np.abs(slacks_lb_resid)) + if slacks_ub_resid.size == 0: + max_slacks_ub_resid = 0 + else: + max_slacks_ub_resid = np.max(np.abs(slacks_ub_resid)) + complimentarity_inf = max(max_primals_lb_resid, max_primals_ub_resid, + max_slacks_lb_resid, max_slacks_ub_resid) + + return primal_inf, dual_inf, complimentarity_inf + + def fraction_to_the_boundary(self): + return fraction_to_the_boundary(self.interface, 1 - self._barrier_parameter) + + +def try_factorization_and_reallocation(kkt, linear_solver, reallocation_factor, max_iter, timer=None): + if timer is None: + timer = HierarchicalTimer() + + assert max_iter >= 1 + for count in range(max_iter): + timer.start('symbolic') + """ + Performance could be improved significantly by only performing symbolic factorization once. + However, we first have to make sure the nonzero structure (and ordering of row and column arrays) + of the KKT matrix never changes. We have not had time to test this thoroughly, yet. + """ + res = linear_solver.do_symbolic_factorization(matrix=kkt, raise_on_error=False) + timer.stop('symbolic') + if res.status == LinearSolverStatus.successful: + timer.start('numeric') + res = linear_solver.do_numeric_factorization(matrix=kkt, raise_on_error=False) + timer.stop('numeric') + status = res.status + if status == LinearSolverStatus.not_enough_memory: + linear_solver.increase_memory_allocation(reallocation_factor) + else: + break + return status, count + + +def _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl): + delta_x_mod = delta_x.copy() + delta_x_mod[delta_x_mod == 0] = 1 + alpha = -tau * (x - xl) / delta_x_mod + alpha[delta_x >= 0] = np.inf + if alpha.size == 0: + return 1 + else: + return min(alpha.min(), 1) + + +def _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu): + delta_x_mod = delta_x.copy() + delta_x_mod[delta_x_mod == 0] = 1 + alpha = tau * (xu - x) / delta_x_mod + alpha[delta_x <= 0] = np.inf + if alpha.size == 0: + return 1 + else: + return min(alpha.min(), 1) + + +def fraction_to_the_boundary(interface, tau): + """ + Parameters + ---------- + interface: pyomo.contrib.interior_point.interface.BaseInteriorPointInterface + tau: float + + Returns + ------- + alpha_primal_max: float + alpha_dual_max: float + """ + primals = interface.get_primals() + slacks = interface.get_slacks() + duals_primals_lb = interface.get_duals_primals_lb() + duals_primals_ub = interface.get_duals_primals_ub() + duals_slacks_lb = interface.get_duals_slacks_lb() + duals_slacks_ub = interface.get_duals_slacks_ub() + + delta_primals = interface.get_delta_primals() + delta_slacks = interface.get_delta_slacks() + delta_duals_primals_lb = interface.get_delta_duals_primals_lb() + delta_duals_primals_ub = interface.get_delta_duals_primals_ub() + delta_duals_slacks_lb = interface.get_delta_duals_slacks_lb() + delta_duals_slacks_ub = interface.get_delta_duals_slacks_ub() + + primals_lb = interface.primals_lb() + primals_ub = interface.primals_ub() + ineq_lb = interface.ineq_lb() + ineq_ub = interface.ineq_ub() + + alpha_primal_max_a = _fraction_to_the_boundary_helper_lb( + tau=tau, + x=primals, + delta_x=delta_primals, + xl=primals_lb) + alpha_primal_max_b = _fraction_to_the_boundary_helper_ub( + tau=tau, + x=primals, + delta_x=delta_primals, + xu=primals_ub) + alpha_primal_max_c = _fraction_to_the_boundary_helper_lb( + tau=tau, + x=slacks, + delta_x=delta_slacks, + xl=ineq_lb) + alpha_primal_max_d = _fraction_to_the_boundary_helper_ub( + tau=tau, + x=slacks, + delta_x=delta_slacks, + xu=ineq_ub) + alpha_primal_max = min(alpha_primal_max_a, alpha_primal_max_b, + alpha_primal_max_c, alpha_primal_max_d) + + alpha_dual_max_a = _fraction_to_the_boundary_helper_lb( + tau=tau, + x=duals_primals_lb, + delta_x=delta_duals_primals_lb, + xl=np.zeros(duals_primals_lb.size)) + alpha_dual_max_b = _fraction_to_the_boundary_helper_lb( + tau=tau, + x=duals_primals_ub, + delta_x=delta_duals_primals_ub, + xl=np.zeros(duals_primals_ub.size)) + alpha_dual_max_c = _fraction_to_the_boundary_helper_lb( + tau=tau, + x=duals_slacks_lb, + delta_x=delta_duals_slacks_lb, + xl=np.zeros(duals_slacks_lb.size)) + alpha_dual_max_d = _fraction_to_the_boundary_helper_lb( + tau=tau, + x=duals_slacks_ub, + delta_x=delta_duals_slacks_ub, + xl=np.zeros(duals_slacks_ub.size)) + alpha_dual_max = min(alpha_dual_max_a, alpha_dual_max_b, + alpha_dual_max_c, alpha_dual_max_d) + + return alpha_primal_max, alpha_dual_max + + +def process_init(x, lb, ub): + if np.any((ub - lb) < 0): + raise ValueError( + 'Lower bounds for variables/inequalities should not be larger than upper bounds.') + if np.any((ub - lb) == 0): + raise ValueError( + 'Variables and inequalities should not have equal lower and upper bounds.') + + lb_mask = build_bounds_mask(lb) + ub_mask = build_bounds_mask(ub) + + lb_only = np.logical_and(lb_mask, np.logical_not(ub_mask)) + ub_only = np.logical_and(ub_mask, np.logical_not(lb_mask)) + lb_and_ub = np.logical_and(lb_mask, ub_mask) + out_of_bounds = ((x >= ub) + (x <= lb)) + out_of_bounds_lb_only = np.logical_and(out_of_bounds, lb_only) + out_of_bounds_ub_only = np.logical_and(out_of_bounds, ub_only) + out_of_bounds_lb_and_ub = np.logical_and(out_of_bounds, lb_and_ub) + + cm = build_compression_matrix(out_of_bounds_lb_only) + x[out_of_bounds_lb_only] = cm * (lb + 1) + + cm = build_compression_matrix(out_of_bounds_ub_only) + x[out_of_bounds_ub_only] = cm * (ub - 1) + + del cm + cm1 = build_compression_matrix(lb_and_ub) + cm2 = build_compression_matrix(out_of_bounds_lb_and_ub) + x[out_of_bounds_lb_and_ub] = cm2 * (0.5 * cm1.transpose() * (cm1 * lb + cm1 * ub)) + + +def process_init_duals_lb(x, lb): + x[x <= 0] = 1 + x[np.isneginf(lb)] = 0 + + +def process_init_duals_ub(x, ub): + x[x <= 0] = 1 + x[np.isinf(ub)] = 0 diff --git a/pyomo/contrib/interior_point/inverse_reduced_hessian.py b/pyomo/contrib/interior_point/inverse_reduced_hessian.py new file mode 100644 index 00000000000..e677254a2ca --- /dev/null +++ b/pyomo/contrib/interior_point/inverse_reduced_hessian.py @@ -0,0 +1,124 @@ +import pyomo.environ as pyo +from pyomo.opt import check_optimal_termination +from pyomo.common.dependencies import attempt_import +from .interface import InteriorPointInterface +from .linalg.scipy_interface import ScipyInterface + +np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', minimum_version='1.13.0') + + +# Todo: This function currently used IPOPT for the initial solve - should accept solver +def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e-6, tee=False): + """ + This function computes the inverse of the reduced Hessian of a problem at the + solution. This function first solves the problem with Ipopt and then generates + the KKT system for the barrier subproblem to compute the inverse reduced hessian. + + For more information on the reduced Hessian, see "Numerical Optimization", 2nd Edition + Nocedal and Wright, 2006. + + The approach used in this method can be found in, "Computational Strategies for + the Optimal Operation of Large-Scale Chemical Processes", Dissertation, V. Zavala + 2008. See section 3.2.1. + + Parameters + ---------- + model : Pyomo model + The Pyomo model that we want to solve and analyze + independent_variables : list of Pyomo variables + This is the list of independent variables for computing the reduced hessian. + These variables must not be at their bounds at the solution of the + optimization problem. + bound_tolerance : float + The tolerance to use when checking if the variables are too close to their bound. + If they are too close, then the routine will exit without a reduced hessian. + tee : bool + This flag is sent to the tee option of the solver. If true, then the solver + log is output to the console. + """ + m = model + + # make sure the necessary suffixes are added + # so the reduced hessian kkt system is setup correctly from + # the ipopt solution + if not hasattr(m, 'ipopt_zL_out'): + m.ipopt_zL_out = pyo.Suffix(direction=pyo.Suffix.IMPORT) + if not hasattr(m, 'ipopt_zU_out'): + m.ipopt_zU_out = pyo.Suffix(direction=pyo.Suffix.IMPORT) + if not hasattr(m, 'ipopt_zL_in'): + m.ipopt_zL_in = pyo.Suffix(direction=pyo.Suffix.EXPORT) + if not hasattr(m, 'ipopt_zU_in'): + m.ipopt_zU_in = pyo.Suffix(direction=pyo.Suffix.EXPORT) + if not hasattr(m, 'dual'): + m.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT) + + # create the ipopt solver + solver = pyo.SolverFactory('ipopt') + # set options to prevent bounds relaxation (and 0 slacks) + solver.options['bound_relax_factor']=0 + solver.options['honor_original_bounds']='no' + # solve the problem + status = solver.solve(m, tee=tee) + if not check_optimal_termination(status): + return status, None + + # compute the barrier parameter + # ToDo: this needs to eventually come from the solver itself + estimated_mu = list() + for v in m.ipopt_zL_out: + if v.has_lb(): + estimated_mu.append((pyo.value(v) - v.lb)*m.ipopt_zL_out[v]) + for v in m.ipopt_zU_out: + if v.has_ub(): + estimated_mu.append((v.ub - pyo.value(v))*m.ipopt_zU_out[v]) + if len(estimated_mu) == 0: + mu = 10**-8.6 + else: + mu = sum(estimated_mu)/len(estimated_mu) + # check to make sure these estimates were all reasonable + if any([abs(mu-estmu) > 1e-7 for estmu in estimated_mu]): + print('Warning: estimated values of mu do not seem consistent - using mu=10^(-8.6)') + mu = 10**-8.6 + + # collect the list of var data objects for the independent variables + ind_vardatas = list() + for v in independent_variables: + if v.is_indexed(): + for k in v: + ind_vardatas.append(v[k]) + else: + ind_vardatas.append(v) + + # check that none of the independent variables are at their bounds + for v in ind_vardatas: + if (v.has_lb() and pyo.value(v) - v.lb <= bound_tolerance) or \ + (v.has_ub() and v.ub - pyo.value(b) <= bound_tolerance): + raise ValueError("Independent variable: {} has a solution value that is near" + " its bound (according to tolerance). The reduced hessian" + " computation does not support this at this time. All" + " independent variables should be in their interior.".format(v)) + + # find the list of indices that we need to make up the reduced hessian + kkt_builder = InteriorPointInterface(m) + pyomo_nlp = kkt_builder.pyomo_nlp() + ind_var_indices = pyomo_nlp.get_primal_indices(ind_vardatas) + + # setup the computation of the reduced hessian + kkt_builder.set_barrier_parameter(mu) + kkt = kkt_builder.evaluate_primal_dual_kkt_matrix() + linear_solver = ScipyInterface(compute_inertia=False) + linear_solver.do_symbolic_factorization(kkt) + linear_solver.do_numeric_factorization(kkt) + + n_rh = len(ind_var_indices) + rhs = np.zeros(kkt.shape[0]) + inv_red_hess = np.zeros((n_rh, n_rh)) + + for rhi, vari in enumerate(ind_var_indices): + rhs[vari] = 1 + v = linear_solver.do_back_solve(rhs) + rhs[vari] = 0 + for rhj, varj in enumerate(ind_var_indices): + inv_red_hess[rhi,rhj] = v[varj] + + return status, inv_red_hess diff --git a/pyomo/contrib/interior_point/linalg/__init__.py b/pyomo/contrib/interior_point/linalg/__init__.py new file mode 100644 index 00000000000..7889ad25a78 --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/__init__.py @@ -0,0 +1,4 @@ +from .results import LinearSolverStatus +from .scipy_interface import ScipyInterface +from .mumps_interface import MumpsInterface +from .ma27_interface import InteriorPointMA27Interface diff --git a/pyomo/contrib/interior_point/linalg/base_linear_solver_interface.py b/pyomo/contrib/interior_point/linalg/base_linear_solver_interface.py new file mode 100644 index 00000000000..b776d93a98d --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/base_linear_solver_interface.py @@ -0,0 +1,33 @@ +from abc import ABCMeta, abstractmethod +import six +import logging + + +class LinearSolverInterface(six.with_metaclass(ABCMeta, object)): + @classmethod + def getLoggerName(cls): + return 'linear_solver' + + @classmethod + def getLogger(cls): + name = 'interior_point.' + cls.getLoggerName() + return logging.getLogger(name) + + @abstractmethod + def do_symbolic_factorization(self, matrix, raise_on_error=True): + pass + + @abstractmethod + def do_numeric_factorization(self, matrix, raise_on_error=True): + pass + + def increase_memory_allocation(self, factor): + raise NotImplementedError('Should be implemented by base class.') + + @abstractmethod + def do_back_solve(self, rhs): + pass + + @abstractmethod + def get_inertia(self): + pass diff --git a/pyomo/contrib/interior_point/linalg/ma27_interface.py b/pyomo/contrib/interior_point/linalg/ma27_interface.py new file mode 100644 index 00000000000..78da74312f6 --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/ma27_interface.py @@ -0,0 +1,125 @@ +from .base_linear_solver_interface import LinearSolverInterface +from .results import LinearSolverStatus, LinearSolverResults +from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface +from scipy.sparse import isspmatrix_coo, tril +from pyomo.contrib.pynumero.sparse import BlockVector + + +class InteriorPointMA27Interface(LinearSolverInterface): + @classmethod + def getLoggerName(cls): + return 'ma27' + + def __init__(self, cntl_options=None, icntl_options=None, iw_factor=1.2, a_factor=2): + self._ma27 = MA27Interface(iw_factor=iw_factor, a_factor=a_factor) + + if cntl_options is None: + cntl_options = dict() + if icntl_options is None: + icntl_options = dict() + + for k, v in cntl_options.items(): + self.set_cntl(k, v) + for k, v in icntl_options.items(): + self.set_icntl(k, v) + + self._dim = None + self._num_status = None + + def do_symbolic_factorization(self, matrix, raise_on_error=True): + self._num_status = None + if not isspmatrix_coo(matrix): + matrix = matrix.tocoo() + matrix = tril(matrix) + nrows, ncols = matrix.shape + if nrows != ncols: + raise ValueError('Matrix must be square') + self._dim = nrows + + stat = self._ma27.do_symbolic_factorization(dim=self._dim, irn=matrix.row, icn=matrix.col) + res = LinearSolverResults() + if stat == 0: + res.status = LinearSolverStatus.successful + else: + if raise_on_error: + raise RuntimeError('Symbolic factorization was not successful; return code: ' + str(stat)) + if stat in {-3, -4}: + res.status = LinearSolverStatus.not_enough_memory + elif stat in {-5, 3}: + res.status = LinearSolverStatus.singular + else: + res.status = LinearSolverStatus.error + return res + + def do_numeric_factorization(self, matrix, raise_on_error=True): + if not isspmatrix_coo(matrix): + matrix = matrix.tocoo() + matrix = tril(matrix) + nrows, ncols = matrix.shape + if nrows != ncols: + raise ValueError('Matrix must be square') + if nrows != self._dim: + raise ValueError('Matrix dimensions do not match the dimensions of ' + 'the matrix used for symbolic factorization') + + stat = self._ma27.do_numeric_factorization(irn=matrix.row, icn=matrix.col, dim=self._dim, entries=matrix.data) + res = LinearSolverResults() + if stat == 0: + res.status = LinearSolverStatus.successful + else: + if raise_on_error: + raise RuntimeError('Numeric factorization was not successful; return code: ' + str(stat)) + if stat in {-3, -4}: + res.status = LinearSolverStatus.not_enough_memory + elif stat in {-5, 3}: + res.status = LinearSolverStatus.singular + else: + res.status = LinearSolverStatus.error + + self._num_status = res.status + + return res + + def increase_memory_allocation(self, factor): + self._ma27.iw_factor *= factor + self._ma27.a_factor *= factor + + def do_back_solve(self, rhs): + if isinstance(rhs, BlockVector): + _rhs = rhs.flatten() + result = _rhs + else: + result = rhs.copy() + + result = self._ma27.do_backsolve(result) + + if isinstance(rhs, BlockVector): + _result = rhs.copy_structure() + _result.copyfrom(result) + result = _result + + return result + + def get_inertia(self): + if self._num_status is None: + raise RuntimeError('Must call do_numeric_factorization before inertia can be computed') + if self._num_status != LinearSolverStatus.successful: + raise RuntimeError('Can only compute inertia if the numeric factorization was successful.') + num_negative_eigenvalues = self.get_info(15) + num_positive_eigenvalues = self._dim - num_negative_eigenvalues + return (num_positive_eigenvalues, num_negative_eigenvalues, 0) + + def set_icntl(self, key, value): + self._ma27.set_icntl(key, value) + + def set_cntl(self, key, value): + self._ma27.set_cntl(key, value) + + def get_icntl(self, key): + return self._ma27.get_icntl(key) + + def get_cntl(self, key): + return self._ma27.get_cntl(key) + + def get_info(self, key): + return self._ma27.get_info(key) diff --git a/pyomo/contrib/interior_point/linalg/mumps_interface.py b/pyomo/contrib/interior_point/linalg/mumps_interface.py new file mode 100644 index 00000000000..4e977673c4c --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/mumps_interface.py @@ -0,0 +1,219 @@ +from .base_linear_solver_interface import LinearSolverInterface +from .results import LinearSolverStatus, LinearSolverResults +from pyomo.common.dependencies import attempt_import +from scipy.sparse import isspmatrix_coo, tril +from collections import OrderedDict +import logging +mumps, mumps_available = attempt_import(name='pyomo.contrib.pynumero.linalg.mumps_interface', + error_message='pymumps is required to use the MumpsInterface') + + +class MumpsInterface(LinearSolverInterface): + + @classmethod + def getLoggerName(cls): + return 'mumps' + + def __init__(self, par=1, comm=None, cntl_options=None, icntl_options=None): + self._mumps = mumps.MumpsCentralizedAssembledLinearSolver(sym=2, + par=par, + comm=comm) + + if cntl_options is None: + cntl_options = dict() + if icntl_options is None: + icntl_options = dict() + + # These options are set in order to get the correct inertia. + if 13 not in icntl_options: + icntl_options[13] = 1 + if 24 not in icntl_options: + icntl_options[24] = 0 + + for k, v in cntl_options.items(): + self.set_cntl(k, v) + for k, v in icntl_options.items(): + self.set_icntl(k, v) + + self.error_level = self.get_icntl(11) + self.log_error = bool(self.error_level) + self._dim = None + self.logger = self.getLogger() + self.log_header(include_error=self.log_error) + self._prev_allocation = None + + def do_symbolic_factorization(self, matrix, raise_on_error=True): + if not isspmatrix_coo(matrix): + matrix = matrix.tocoo() + matrix = tril(matrix) + nrows, ncols = matrix.shape + self._dim = nrows + + try: + self._mumps.do_symbolic_factorization(matrix) + self._prev_allocation = self.get_infog(16) + except RuntimeError as err: + if raise_on_error: + raise err + + stat = self.get_infog(1) + res = LinearSolverResults() + if stat == 0: + res.status = LinearSolverStatus.successful + elif stat in {-6, -10}: + res.status = LinearSolverStatus.singular + elif stat < 0: + res.status = LinearSolverStatus.error + else: + res.status = LinearSolverStatus.warning + return res + + def do_numeric_factorization(self, matrix, raise_on_error=True): + if not isspmatrix_coo(matrix): + matrix = matrix.tocoo() + matrix = tril(matrix) + try: + self._mumps.do_numeric_factorization(matrix) + except RuntimeError as err: + if raise_on_error: + raise err + + stat = self.get_infog(1) + res = LinearSolverResults() + if stat == 0: + res.status = LinearSolverStatus.successful + elif stat in {-6, -10}: + res.status = LinearSolverStatus.singular + elif stat in {-8, -9}: + res.status = LinearSolverStatus.not_enough_memory + elif stat < 0: + res.status = LinearSolverStatus.error + else: + res.status = LinearSolverStatus.warning + return res + + def increase_memory_allocation(self, factor): + # info(16) is rounded to the nearest MB, so it could be zero + if self._prev_allocation == 0: + new_allocation = 1 + else: + new_allocation = factor*self._prev_allocation + # Here I set the memory allocation directly instead of increasing + # the "percent-increase-from-predicted" parameter ICNTL(14) + self.set_icntl(23, new_allocation) + self._prev_allocation = new_allocation + return new_allocation + + def do_back_solve(self, rhs): + res = self._mumps.do_back_solve(rhs) + self.log_info() + return res + + def get_inertia(self): + num_negative_eigenvalues = self.get_infog(12) + num_zero_eigenvalues = self.get_infog(28) + num_positive_eigenvalues = self._dim - num_negative_eigenvalues - num_zero_eigenvalues + return num_positive_eigenvalues, num_negative_eigenvalues, num_zero_eigenvalues + + def get_error_info(self): + # Access error level contained in ICNTL(11) (Fortran indexing). + # Assuming this value has not changed since the solve was performed. + error_level = self.get_icntl(11) + info = OrderedDict() + if error_level == 0: + return info + elif error_level == 1: + info['||A||'] = self.get_rinfog(4) + info['||x||'] = self.get_rinfog(5) + info['Max resid'] = self.get_rinfog(6) + info['Max error'] = self.get_rinfog(9) + return info + elif error_level == 2: + info['||A||'] = self.get_rinfog(4) + info['||x||'] = self.get_rinfog(5) + info['Max resid'] = self.get_rinfog(6) + return info + + def set_icntl(self, key, value): + if key == 13: + if value <= 0: + raise ValueError( + 'ICNTL(13) must be positive for the MumpsInterface.') + elif key == 24: + if value != 0: + raise ValueError( + 'ICNTL(24) must be 0 for the MumpsInterface.') + self._mumps.set_icntl(key, value) + + def set_cntl(self, key, value): + self._mumps.set_cntl(key, value) + + def get_icntl(self, key): + return self._mumps.get_icntl(key) + + def get_cntl(self, key): + return self._mumps.get_cntl(key) + + def get_info(self, key): + return self._mumps.get_info(key) + + def get_infog(self, key): + return self._mumps.get_infog(key) + + def get_rinfo(self, key): + return self._mumps.get_rinfo(key) + + def get_rinfog(self, key): + return self._mumps.get_rinfog(key) + + def log_header(self, include_error=True, extra_fields=None): + if extra_fields is None: + extra_fields = list() + header_fields = [] + header_fields.append('Status') + header_fields.append('n_null') + header_fields.append('n_neg') + + if include_error: + header_fields.extend(self.get_error_info().keys()) + + header_fields.extend(extra_fields) + + # Allocate 10 spaces for integer values + header_string = '{0:<10}' + header_string += '{1:<10}' + header_string += '{2:<10}' + + # Allocate 15 spaces for the rest, which I assume are floats + for i in range(4, len(header_fields)): + header_string += '{' + str(i) + ':<15}' + + self.logger.info(header_string.format(*header_fields)) + + def log_info(self): + # Which fields to log should be specified at the instance level + # Any logging that should be done on an iteration-specific case + # should be handled by the IP solver + fields=[] + fields.append(self.get_infog(1)) # Status, 0 for success + fields.append(self.get_infog(28)) # Number of null pivots + fields.append(self.get_infog(12)) # Number of negative pivots + + include_error = self.log_error + if include_error: + fields.extend(self.get_error_info().values()) + + extra_fields = [] + fields.extend(extra_fields) + + # Allocate 10 spaces for integer values + log_string = '{0:<10}' + log_string += '{1:<10}' + log_string += '{2:<10}' + + # Allocate 15 spsaces for the rest, which I assume are floats + for i in range(4, len(fields)): + log_string += '{' + str(i) + ':<15.3e}' + + self.logger.info(log_string.format(*fields)) + diff --git a/pyomo/contrib/interior_point/linalg/results.py b/pyomo/contrib/interior_point/linalg/results.py new file mode 100644 index 00000000000..6cf67f1b945 --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/results.py @@ -0,0 +1,14 @@ +import enum + + +class LinearSolverStatus(enum.Enum): + successful = 0 + not_enough_memory = 1 + singular = 2 + error = 3 + warning = 4 + + +class LinearSolverResults(object): + def __init__(self): + self.status = None diff --git a/pyomo/contrib/interior_point/linalg/scipy_interface.py b/pyomo/contrib/interior_point/linalg/scipy_interface.py new file mode 100644 index 00000000000..442452f037b --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/scipy_interface.py @@ -0,0 +1,67 @@ +from .base_linear_solver_interface import LinearSolverInterface +from .results import LinearSolverStatus, LinearSolverResults +from scipy.sparse.linalg import splu +from scipy.linalg import eigvals +from scipy.sparse import isspmatrix_csc +from pyomo.contrib.pynumero.sparse.block_vector import BlockVector +import logging +import numpy as np + + +class ScipyInterface(LinearSolverInterface): + def __init__(self, compute_inertia=False): + self._lu = None + self._inertia = None + self.compute_inertia = compute_inertia + + self.logger = logging.getLogger('scipy') + self.logger.propagate = False + + def do_symbolic_factorization(self, matrix, raise_on_error=True): + res = LinearSolverResults() + res.status = LinearSolverStatus.successful + return res + + def do_numeric_factorization(self, matrix, raise_on_error=True): + if not isspmatrix_csc(matrix): + matrix = matrix.tocsc() + res = LinearSolverResults() + try: + self._lu = splu(matrix) + res.status = LinearSolverStatus.successful + except RuntimeError as err: + if raise_on_error: + raise err + if 'Factor is exactly singular' in str(err): + res.status = LinearSolverStatus.singular + else: + res.status = LinearSolverStatus.error + + if self.compute_inertia: + eig = eigvals(matrix.toarray()) + pos_eig = np.count_nonzero((eig > 0)) + neg_eigh = np.count_nonzero((eig < 0)) + zero_eig = np.count_nonzero(eig == 0) + self._inertia = (pos_eig, neg_eigh, zero_eig) + + return res + + def do_back_solve(self, rhs): + if isinstance(rhs, BlockVector): + _rhs = rhs.flatten() + else: + _rhs = rhs + + result = self._lu.solve(_rhs) + + if isinstance(rhs, BlockVector): + _result = rhs.copy_structure() + _result.copyfrom(result) + result = _result + + return result + + def get_inertia(self): + if self._inertia is None: + raise RuntimeError('The intertia was not computed during do_numeric_factorization. Set compute_inertia to True.') + return self._inertia diff --git a/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py b/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py new file mode 100644 index 00000000000..94a11cec1a3 --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py @@ -0,0 +1,120 @@ +import pyutilib.th as unittest +from pyomo.common.dependencies import attempt_import +np, np_available = attempt_import('numpy', minimum_version='1.13.0') +scipy, scipy_available = attempt_import('scipy.sparse') +mumps, mumps_available = attempt_import('mumps') +if not np_available or not scipy_available: + raise unittest.SkipTest('numpy and scipy are needed for interior point tests') +import numpy as np +from scipy.sparse import coo_matrix, tril +from pyomo.contrib import interior_point as ip +from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface +ma27_available = MA27Interface.available() + + +def get_base_matrix(use_tril): + if use_tril: + row = [0, 1, 1, 2, 2] + col = [0, 0, 1, 0, 2] + data = [1, 7, 4, 3, 6] + else: + row = [0, 0, 0, 1, 1, 2, 2] + col = [0, 1, 2, 0, 1, 0, 2] + data = [1, 7, 3, 7, 4, 3, 6] + mat = coo_matrix((data, (row, col)), shape=(3,3), dtype=np.double) + return mat + + +def get_base_matrix_wrong_order(use_tril): + if use_tril: + row = [1, 0, 1, 2, 2] + col = [0, 0, 1, 0, 2] + data = [7, 1, 4, 3, 6] + else: + row = [1, 0, 0, 0, 1, 2, 2] + col = [0, 1, 2, 0, 1, 0, 2] + data = [7, 7, 3, 1, 4, 3, 6] + mat = coo_matrix((data, (row, col)), shape=(3,3), dtype=np.double) + return mat + + +class TestTrilBehavior(unittest.TestCase): + """ + Some of the other tests in this file depend on + the behavior of tril that is tested in this + test, namely the tests in TestWrongNonzeroOrdering. + """ + def test_tril_behavior(self): + mat = get_base_matrix(use_tril=True) + mat2 = tril(mat) + self.assertTrue(np.all(mat.row == mat2.row)) + self.assertTrue(np.all(mat.col == mat2.col)) + self.assertTrue(np.allclose(mat.data, mat2.data)) + + mat = get_base_matrix_wrong_order(use_tril=True) + self.assertFalse(np.all(mat.row == mat2.row)) + self.assertFalse(np.allclose(mat.data, mat2.data)) + mat2 = tril(mat) + self.assertTrue(np.all(mat.row == mat2.row)) + self.assertTrue(np.all(mat.col == mat2.col)) + self.assertTrue(np.allclose(mat.data, mat2.data)) + + +class TestLinearSolvers(unittest.TestCase): + def _test_linear_solvers(self, solver): + mat = get_base_matrix(use_tril=False) + zero_mat = mat.copy() + zero_mat.data.fill(0) + stat = solver.do_symbolic_factorization(zero_mat) + self.assertEqual(stat.status, ip.linalg.LinearSolverStatus.successful) + stat = solver.do_numeric_factorization(mat) + self.assertEqual(stat.status, ip.linalg.LinearSolverStatus.successful) + x_true = np.array([1, 2, 3], dtype=np.double) + rhs = mat * x_true + x = solver.do_back_solve(rhs) + self.assertTrue(np.allclose(x, x_true)) + x_true = np.array([4, 2, 3], dtype=np.double) + rhs = mat * x_true + x = solver.do_back_solve(rhs) + self.assertTrue(np.allclose(x, x_true)) + + def test_scipy(self): + solver = ip.linalg.ScipyInterface() + self._test_linear_solvers(solver) + + @unittest.skipIf(not mumps_available, 'mumps is needed for interior point mumps tests') + def test_mumps(self): + solver = ip.linalg.MumpsInterface() + self._test_linear_solvers(solver) + + @unittest.skipIf(not ma27_available, 'MA27 is needed for interior point MA27 tests') + def test_ma27(self): + solver = ip.linalg.InteriorPointMA27Interface() + self._test_linear_solvers(solver) + + +@unittest.skip('This does not work yet') +class TestWrongNonzeroOrdering(unittest.TestCase): + def _test_solvers(self, solver, use_tril): + mat = get_base_matrix(use_tril=use_tril) + wrong_order_mat = get_base_matrix_wrong_order(use_tril=use_tril) + stat = solver.do_symbolic_factorization(mat) + stat = solver.do_numeric_factorization(wrong_order_mat) + x_true = np.array([1, 2, 3], dtype=np.double) + rhs = mat * x_true + x = solver.do_back_solve(rhs) + self.assertTrue(np.allclose(x, x_true)) + + def test_scipy(self): + solver = ip.linalg.ScipyInterface() + self._test_solvers(solver, use_tril=False) + + @unittest.skipIf(not mumps_available, 'mumps is needed for interior point mumps tests') + def test_mumps(self): + solver = ip.linalg.MumpsInterface() + self._test_solvers(solver, use_tril=True) + + @unittest.skipIf(not ma27_available, 'MA27 is needed for interior point MA27 tests') + def test_ma27(self): + solver = ip.linalg.InteriorPointMA27Interface() + self._test_solvers(solver, use_tril=True) diff --git a/pyomo/contrib/interior_point/linalg/tests/test_realloc.py b/pyomo/contrib/interior_point/linalg/tests/test_realloc.py new file mode 100644 index 00000000000..6203985dc12 --- /dev/null +++ b/pyomo/contrib/interior_point/linalg/tests/test_realloc.py @@ -0,0 +1,67 @@ +import pyutilib.th as unittest +from pyomo.common.dependencies import attempt_import +np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', + minimum_version='1.13.0') +scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy') +mumps, mumps_available = attempt_import('mumps') +if not (numpy_available and scipy_available): + raise unittest.SkipTest('Interior point tests require numpy and scipy') +from scipy.sparse import coo_matrix +import pyomo.contrib.interior_point as ip + + +class TestReallocation(unittest.TestCase): + @unittest.skipIf(not mumps_available, 'mumps is not available') + def test_reallocate_memory_mumps(self): + + # Create a tri-diagonal matrix with small entries on the diagonal + n = 10000 + small_val = 1e-7 + big_val = 1e2 + irn = [] + jcn = [] + ent = [] + for i in range(n-1): + irn.extend([i+1, i, i]) + jcn.extend([i, i, i+1]) + ent.extend([big_val,small_val,big_val]) + irn.append(n-1) + jcn.append(n-1) + ent.append(small_val) + irn = np.array(irn) + jcn = np.array(jcn) + ent = np.array(ent) + + matrix = coo_matrix((ent, (irn, jcn)), shape=(n,n)) + + linear_solver = ip.linalg.MumpsInterface() + linear_solver.do_symbolic_factorization(matrix) + + predicted = linear_solver.get_infog(16) + + res = linear_solver.do_numeric_factorization(matrix, raise_on_error=False) + self.assertEqual(res.status, ip.linalg.LinearSolverStatus.not_enough_memory) + + linear_solver.do_symbolic_factorization(matrix) + + factor = 2 + linear_solver.increase_memory_allocation(factor) + + res = linear_solver.do_numeric_factorization(matrix) + self.assertEqual(res.status, ip.linalg.LinearSolverStatus.successful) + + # Expected memory allocation (MB) + self.assertEqual(linear_solver._prev_allocation, 6) + + actual = linear_solver.get_infog(18) + + # Sanity checks: + # Make sure actual memory usage is greater than initial guess + self.assertTrue(predicted < actual) + # Make sure memory allocation is at least as much as was used + self.assertTrue(actual <= linear_solver._prev_allocation) + + +if __name__ == '__main__': + test_realloc = TestReallocation() + test_realloc.test_reallocate_memory_mumps() diff --git a/pyomo/contrib/interior_point/tests/__init__.py b/pyomo/contrib/interior_point/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/interior_point/tests/test_interior_point.py b/pyomo/contrib/interior_point/tests/test_interior_point.py new file mode 100644 index 00000000000..b3328d1529b --- /dev/null +++ b/pyomo/contrib/interior_point/tests/test_interior_point.py @@ -0,0 +1,197 @@ +import pyutilib.th as unittest +import pyomo.environ as pe +from pyomo.common.dependencies import attempt_import + +np, numpy_availalbe = attempt_import('numpy', 'Interior point requires numpy', minimum_version='1.13.0') +scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy') +mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps') +if not (numpy_availalbe and scipy_available): + raise unittest.SkipTest('Interior point tests require numpy and scipy') + +import numpy as np + +from pyomo.contrib.pynumero.asl import AmplInterface +asl_available = AmplInterface.available() +import pyomo.contrib.interior_point as ip +from pyomo.contrib.interior_point.interior_point import (process_init, + process_init_duals_lb, + process_init_duals_ub, + _fraction_to_the_boundary_helper_lb, + _fraction_to_the_boundary_helper_ub) +from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface +ma27_available = MA27Interface.available() + + +@unittest.skipIf(not asl_available, 'asl is not available') +class TestSolveInteriorPoint(unittest.TestCase): + def _test_solve_interior_point_1(self, linear_solver): + m = pe.ConcreteModel() + m.x = pe.Var() + m.y = pe.Var() + m.obj = pe.Objective(expr=m.x**2 + m.y**2) + m.c1 = pe.Constraint(expr=m.y == pe.exp(m.x)) + m.c2 = pe.Constraint(expr=m.y >= (m.x - 1)**2) + interface = ip.InteriorPointInterface(m) + ip_solver = ip.InteriorPointSolver(linear_solver) + status = ip_solver.solve(interface) + self.assertEqual(status, ip.InteriorPointStatus.optimal) + x = interface.get_primals() + duals_eq = interface.get_duals_eq() + duals_ineq = interface.get_duals_ineq() + self.assertAlmostEqual(x[0], 0) + self.assertAlmostEqual(x[1], 1) + self.assertAlmostEqual(duals_eq[0], -1-1.0/3.0) + self.assertAlmostEqual(duals_ineq[0], 2.0/3.0) + interface.load_primals_into_pyomo_model() + self.assertAlmostEqual(m.x.value, 0) + self.assertAlmostEqual(m.y.value, 1) + + def _test_solve_interior_point_2(self, linear_solver): + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(1, 4)) + m.obj = pe.Objective(expr=m.x**2) + interface = ip.InteriorPointInterface(m) + ip_solver = ip.InteriorPointSolver(linear_solver) + status = ip_solver.solve(interface) + self.assertEqual(status, ip.InteriorPointStatus.optimal) + interface.load_primals_into_pyomo_model() + self.assertAlmostEqual(m.x.value, 1) + + def test_ip1_scipy(self): + solver = ip.linalg.ScipyInterface() + solver.compute_inertia = True + self._test_solve_interior_point_1(solver) + + def test_ip2_scipy(self): + solver = ip.linalg.ScipyInterface() + solver.compute_inertia = True + self._test_solve_interior_point_2(solver) + + @unittest.skipIf(not mumps_available, 'Mumps is not available') + def test_ip1_mumps(self): + solver = ip.linalg.MumpsInterface() + self._test_solve_interior_point_1(solver) + + @unittest.skipIf(not mumps_available, 'Mumps is not available') + def test_ip2_mumps(self): + solver = ip.linalg.MumpsInterface() + self._test_solve_interior_point_2(solver) + + @unittest.skipIf(not ma27_available, 'MA27 is not available') + def test_ip1_ma27(self): + solver = ip.linalg.InteriorPointMA27Interface() + self._test_solve_interior_point_1(solver) + + @unittest.skipIf(not ma27_available, 'MA27 is not available') + def test_ip2_ma27(self): + solver = ip.linalg.InteriorPointMA27Interface() + self._test_solve_interior_point_2(solver) + + +class TestProcessInit(unittest.TestCase): + def testprocess_init(self): + lb = np.array([-np.inf, -np.inf, -2, -2], dtype=np.double) + ub = np.array([ np.inf, 2, np.inf, 2], dtype=np.double) + + x = np.array([ 0, 0, 0, 0], dtype=np.double) + process_init(x, lb, ub) + self.assertTrue(np.allclose(x, np.array([0, 0, 0, 0], dtype=np.double))) + + x = np.array([ -2, -2, -2, -2], dtype=np.double) + process_init(x, lb, ub) + self.assertTrue(np.allclose(x, np.array([-2, -2, -1, 0], dtype=np.double))) + + x = np.array([ -3, -3, -3, -3], dtype=np.double) + process_init(x, lb, ub) + self.assertTrue(np.allclose(x, np.array([-3, -3, -1, 0], dtype=np.double))) + + x = np.array([ 2, 2, 2, 2], dtype=np.double) + process_init(x, lb, ub) + self.assertTrue(np.allclose(x, np.array([2, 1, 2, 0], dtype=np.double))) + + x = np.array([ 3, 3, 3, 3], dtype=np.double) + process_init(x, lb, ub) + self.assertTrue(np.allclose(x, np.array([3, 1, 3, 0], dtype=np.double))) + + def testprocess_init_duals(self): + x = np.array([0, 0, 0, 0], dtype=np.double) + lb = np.array([-5, 0, -np.inf, 2], dtype=np.double) + process_init_duals_lb(x, lb) + self.assertTrue(np.allclose(x, np.array([1, 1, 0, 1], dtype=np.double))) + + x = np.array([-1, -1, -1, -1], dtype=np.double) + process_init_duals_lb(x, lb) + self.assertTrue(np.allclose(x, np.array([1, 1, 0, 1], dtype=np.double))) + + x = np.array([2, 2, 2, 2], dtype=np.double) + ub = np.array([-5, 0, np.inf, 2], dtype=np.double) + process_init_duals_ub(x, ub) + self.assertTrue(np.allclose(x, np.array([2, 2, 0, 2], dtype=np.double))) + + +class TestFractionToTheBoundary(unittest.TestCase): + def test_fraction_to_the_boundary_helper_lb(self): + tau = 0.9 + x = np.array([0, 0, 0, 0], dtype=np.double) + xl = np.array([-np.inf, -1, -np.inf, -1], dtype=np.double) + + delta_x = np.array([-0.1, -0.1, -0.1, -0.1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl) + self.assertAlmostEqual(alpha, 1) + + delta_x = np.array([-1, -1, -1, -1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl) + self.assertAlmostEqual(alpha, 0.9) + + delta_x = np.array([-10, -10, -10, -10], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl) + self.assertAlmostEqual(alpha, 0.09) + + delta_x = np.array([1, 1, 1, 1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl) + self.assertAlmostEqual(alpha, 1) + + delta_x = np.array([-10, 1, -10, 1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl) + self.assertAlmostEqual(alpha, 1) + + delta_x = np.array([-10, -1, -10, -1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl) + self.assertAlmostEqual(alpha, 0.9) + + delta_x = np.array([1, -10, 1, -1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl) + self.assertAlmostEqual(alpha, 0.09) + + def test_fraction_to_the_boundary_helper_ub(self): + tau = 0.9 + x = np.array([0, 0, 0, 0], dtype=np.double) + xu = np.array([np.inf, 1, np.inf, 1], dtype=np.double) + + delta_x = np.array([0.1, 0.1, 0.1, 0.1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu) + self.assertAlmostEqual(alpha, 1) + + delta_x = np.array([1, 1, 1, 1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu) + self.assertAlmostEqual(alpha, 0.9) + + delta_x = np.array([10, 10, 10, 10], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu) + self.assertAlmostEqual(alpha, 0.09) + + delta_x = np.array([-1, -1, -1, -1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu) + self.assertAlmostEqual(alpha, 1) + + delta_x = np.array([10, -1, 10, -1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu) + self.assertAlmostEqual(alpha, 1) + + delta_x = np.array([10, 1, 10, 1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu) + self.assertAlmostEqual(alpha, 0.9) + + delta_x = np.array([-1, 10, -1, 1], dtype=np.double) + alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu) + self.assertAlmostEqual(alpha, 0.09) diff --git a/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py b/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py new file mode 100644 index 00000000000..5a894aa8bd3 --- /dev/null +++ b/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py @@ -0,0 +1,136 @@ +import pyutilib.th as unittest +import pyomo.environ as pe +from pyomo.opt import check_optimal_termination +from pyomo.common.dependencies import attempt_import +from pyomo.contrib.interior_point.inverse_reduced_hessian import inv_reduced_hessian_barrier + +np, numpy_available = attempt_import('numpy', 'inverse_reduced_hessian numpy', + minimum_version='1.13.0') +scipy, scipy_available = attempt_import('scipy', 'inverse_reduced_hessian requires scipy') +from pyomo.contrib.pynumero.asl import AmplInterface +asl_available = AmplInterface.available() +if not (numpy_available and scipy_available and asl_available): + raise unittest.SkipTest('inverse_reduced_hessian tests require numpy, scipy, and asl') +from pyomo.common.dependencies import(pandas as pd, pandas_available) +import pyomo.environ as pe +ipopt_solver = pe.SolverFactory('ipopt') +if not ipopt_solver.available(exception_flag=False): + raise unittest.SkipTest('ipopt is not available') + +numdiff_available = True +try: + import numdifftools as nd +except: + numdiff_available = False + + +class TestInverseReducedHessian(unittest.TestCase): + # the original test + def test_invrh_zavala_thesis(self): + m = pe.ConcreteModel() + m.x = pe.Var([1,2,3]) + m.obj = pe.Objective(expr=(m.x[1]-1)**2 + (m.x[2]-2)**2 + (m.x[3]-3)**2) + m.c1 = pe.Constraint(expr=m.x[1] + 2*m.x[2] + 3*m.x[3]==0) + + status, invrh = inv_reduced_hessian_barrier(m, [m.x[2], m.x[3]]) + expected_invrh = np.asarray([[ 0.35714286, -0.21428571], + [-0.21428571, 0.17857143]]) + np.testing.assert_array_almost_equal(invrh, expected_invrh) + + # test by DLW, April 2020 + def _simple_model(self, add_constraint=False): + # Hardwired to have two x columns and one y + # if add_constraint is true, there is a binding constraint on b0 + data = pd.DataFrame([[1, 1.1, 0.365759306], + [2, 1.2, 4], + [3, 1.3, 4.8876684], + [4, 1.4, 5.173455561], + [5, 1.5, 2.093799081], + [6, 1.6, 9], + [7, 1.7, 6.475045106], + [8, 1.8, 8.127111268], + [9, 1.9, 6], + [10, 1.21, 10.20642714], + [11, 1.22, 13.08211636], + [12, 1.23, 10], + [13, 1.24, 15.38766047], + [14, 1.25, 14.6587746], + [15, 1.26, 13.68608604], + [16, 1.27, 14.70707893], + [17, 1.28, 18.46192779], + [18, 1.29, 15.60649164]], + columns=['tofu','chard', 'y']) + + model = pe.ConcreteModel() + + model.b0 = pe.Var(initialize = 0) + model.bindexes = pe.Set(initialize=['tofu', 'chard']) + model.b = pe.Var(model.bindexes, initialize = 1) + + # try to make trouble + if add_constraint: + model.binding_constraint = pe.Constraint(expr=model.b0>=10) + + # The columns need to have unique values (or you get warnings) + def response_rule(m, t, c): + expr = m.b0 + m.b['tofu']*t + m.b['chard']*c + return expr + model.response_function = pe.Expression(data.tofu, data.chard, rule = response_rule) + + def SSE_rule(m): + return sum((data.y[i] - m.response_function[data.tofu[i], data.chard[i]])**2\ + for i in data.index) + model.SSE = pe.Objective(rule = SSE_rule, sense=pe.minimize) + + return model + + @unittest.skipIf(not numdiff_available, "numdiff missing") + @unittest.skipIf(not pandas_available, "pandas missing") + def test_3x3_using_linear_regression(self): + """ simple linear regression with two x columns, so 3x3 Hessian""" + + model = self._simple_model() + solver = pe.SolverFactory("ipopt") + status = solver.solve(model) + self.assertTrue(check_optimal_termination(status)) + tstar = [pe.value(model.b0), + pe.value(model.b['tofu']), pe.value(model.b['chard'])] + + def _ndwrap(x): + # wrapper for numdiff call + model.b0.fix(x[0]) + model.b["tofu"].fix(x[1]) + model.b["chard"].fix(x[2]) + rval = pe.value(model.SSE) + return rval + + H = nd.Hessian(_ndwrap)(tstar) + HInv = np.linalg.inv(H) + + model.b0.fixed = False + model.b["tofu"].fixed = False + model.b["chard"].fixed = False + status, H_inv_red_hess = inv_reduced_hessian_barrier(model, + [model.b0, + model.b["tofu"], + model.b["chard"]]) + # this passes at decimal=6, BTW + np.testing.assert_array_almost_equal(HInv, H_inv_red_hess, decimal=3) + + + @unittest.skipIf(not numdiff_available, "numdiff missing") + @unittest.skipIf(not pandas_available, "pandas missing") + def test_with_binding_constraint(self): + """ there is a binding constraint""" + + model = self._simple_model(add_constraint=True) + + status, H_inv_red_hess = inv_reduced_hessian_barrier(model, + [model.b0, + model.b["tofu"], + model.b["chard"]]) + print("test_with_binding_constraint should see an error raised.") + + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/contrib/interior_point/tests/test_reg.py b/pyomo/contrib/interior_point/tests/test_reg.py new file mode 100644 index 00000000000..fdf8c7145e5 --- /dev/null +++ b/pyomo/contrib/interior_point/tests/test_reg.py @@ -0,0 +1,120 @@ +import pyutilib.th as unittest +import pyomo.environ as pe +from pyomo.core.base import ConcreteModel, Var, Constraint, Objective +from pyomo.common.dependencies import attempt_import + +np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', + minimum_version='1.13.0') +scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy') +mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps') +if not (numpy_available and scipy_available): + raise unittest.SkipTest('Interior point tests require numpy and scipy') + +from pyomo.contrib.pynumero.asl import AmplInterface +asl_available = AmplInterface.available() +if not asl_available: + raise unittest.SkipTest('Regularization tests require ASL') +import pyomo.contrib.interior_point as ip +from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface +ma27_available = MA27Interface.available() + + +def make_model(): + m = ConcreteModel() + m.x = Var([1,2,3], initialize=0) + m.f = Var([1,2,3], initialize=0) + m.F = Var(initialize=0) + m.f[1].fix(1) + m.f[2].fix(2) + + m.sum_con = Constraint(expr= + (1 == m.x[1] + m.x[2] + m.x[3])) + def bilin_rule(m, i): + return m.F*m.x[i] == m.f[i] + m.bilin_con = Constraint([1,2,3], rule=bilin_rule) + + m.obj = Objective(expr=m.F**2) + + return m + + +def make_model_2(): + m = ConcreteModel() + m.x = Var(initialize=0.1, bounds=(0, 1)) + m.y = Var(initialize=0.1, bounds=(0, 1)) + m.obj = Objective(expr=-m.x**2 - m.y**2) + m.c = Constraint(expr=m.y <= pe.exp(-m.x)) + return m + + +class TestRegularization(unittest.TestCase): + def _test_regularization(self, linear_solver): + m = make_model() + interface = ip.InteriorPointInterface(m) + ip_solver = ip.InteriorPointSolver(linear_solver) + ip_solver.set_interface(interface) + + interface.set_barrier_parameter(1e-1) + + # Evaluate KKT matrix before any iterations + kkt = interface.evaluate_primal_dual_kkt_matrix() + reg_coef = ip_solver.factorize(kkt) + + # Expected regularization coefficient: + self.assertAlmostEqual(reg_coef, 1e-4) + + desired_n_neg_evals = (ip_solver.interface.n_eq_constraints() + + ip_solver.interface.n_ineq_constraints()) + + # Expected inertia: + n_pos_evals, n_neg_evals, n_null_evals = linear_solver.get_inertia() + self.assertEqual(n_null_evals, 0) + self.assertEqual(n_neg_evals, desired_n_neg_evals) + + @unittest.skipIf(not mumps_available, 'Mumps is not available') + def test_mumps(self): + solver = ip.linalg.MumpsInterface() + self._test_regularization(solver) + + def test_scipy(self): + solver = ip.linalg.ScipyInterface(compute_inertia=True) + self._test_regularization(solver) + + @unittest.skipIf(not ma27_available, 'MA27 is not available') + def test_ma27(self): + solver = ip.linalg.InteriorPointMA27Interface(icntl_options={1: 0, 2: 0}) + self._test_regularization(solver) + + def _test_regularization_2(self, linear_solver): + m = make_model_2() + interface = ip.InteriorPointInterface(m) + ip_solver = ip.InteriorPointSolver(linear_solver) + + status = ip_solver.solve(interface) + self.assertEqual(status, ip.InteriorPointStatus.optimal) + interface.load_primals_into_pyomo_model() + self.assertAlmostEqual(m.x.value, 1) + self.assertAlmostEqual(m.y.value, pe.exp(-1)) + + @unittest.skipIf(not mumps_available, 'Mumps is not available') + def test_mumps_2(self): + solver = ip.linalg.MumpsInterface() + self._test_regularization_2(solver) + + def test_scipy_2(self): + solver = ip.linalg.ScipyInterface(compute_inertia=True) + self._test_regularization_2(solver) + + @unittest.skipIf(not ma27_available, 'MA27 is not available') + def test_ma27_2(self): + solver = ip.linalg.InteriorPointMA27Interface(icntl_options={1: 0, 2: 0}) + self._test_regularization_2(solver) + + +if __name__ == '__main__': + # + unittest.main() + # test_reg = TestRegularization() + # test_reg.test_regularize_mumps() + # test_reg.test_regularize_scipy() + diff --git a/pyomo/contrib/mcpp/pyomo_mcpp.py b/pyomo/contrib/mcpp/pyomo_mcpp.py index 983ae988c47..7cb8ab6fcfb 100644 --- a/pyomo/contrib/mcpp/pyomo_mcpp.py +++ b/pyomo/contrib/mcpp/pyomo_mcpp.py @@ -310,7 +310,7 @@ def exitNode(self, node, data): return ans - def beforeChild(self, node, child): + def beforeChild(self, node, child, child_idx): if type(child) in nonpyomo_leaf_types: # This means the child is POD # i.e., int, float, string @@ -322,7 +322,7 @@ def beforeChild(self, node, child): # this is an expression node return True, None - def acceptChildResult(self, node, data, child_result): + def acceptChildResult(self, node, data, child_result, child_idx): self.refs.add(child_result) data.append(child_result) return data diff --git a/pyomo/contrib/mindtpy/MindtPy.py b/pyomo/contrib/mindtpy/MindtPy.py index 1f490a08ce5..0cff242922a 100644 --- a/pyomo/contrib/mindtpy/MindtPy.py +++ b/pyomo/contrib/mindtpy/MindtPy.py @@ -93,12 +93,6 @@ class MindtPySolver(object): "covering problem (max_binary), and fix the initial value for " "the integer variables (initial_binary)" )) - CONFIG.declare("integer_cuts", ConfigValue( - default=True, - domain=bool, - description="Integer cuts", - doc="Add integer cuts after finding a feasible solution to the MINLP" - )) CONFIG.declare("max_slack", ConfigValue( default=1000.0, domain=PositiveFloat, @@ -124,7 +118,7 @@ class MindtPySolver(object): )) CONFIG.declare("nlp_solver", ConfigValue( default="ipopt", - domain=In(["ipopt"]), + domain=In(["ipopt", "gams"]), description="NLP subsolver name", doc="Which NLP subsolver is going to be used for solving the nonlinear" "subproblems" @@ -137,7 +131,8 @@ class MindtPySolver(object): )) CONFIG.declare("mip_solver", ConfigValue( default="gurobi", - domain=In(["gurobi", "cplex", "cbc", "glpk", "gams"]), + domain=In(["gurobi", "cplex", "cbc", "glpk", "gams", + "gurobi_persistent", "cplex_persistent"]), description="MIP subsolver name", doc="Which MIP subsolver is going to be used for solving the mixed-" "integer master problems" @@ -196,7 +191,7 @@ class MindtPySolver(object): description="Tolerance on variable bounds." )) CONFIG.declare("zero_tolerance", ConfigValue( - default=1E-15, + default=1E-8, description="Tolerance on variable equal to zero." )) CONFIG.declare("initial_feas", ConfigValue( @@ -220,6 +215,37 @@ class MindtPySolver(object): "Note that 'integer_to_binary' flag needs to be used to apply it to actual integers and not just binaries.", domain=bool )) + CONFIG.declare("single_tree", ConfigValue( + default=False, + description="Use single tree implementation in solving the MILP master problem.", + domain=bool + )) + CONFIG.declare("solution_pool", ConfigValue( + default=False, + description="Use solution pool in solving the MILP master problem.", + domain=bool + )) + CONFIG.declare("add_slack", ConfigValue( + default=False, + description="whether add slack variable here." + "slack variables here are used to deal with nonconvex MINLP", + domain=bool + )) + CONFIG.declare("continuous_var_bound", ConfigValue( + default=1e10, + description="default bound added to unbounded continuous variables in nonlinear constraint if single tree is activated.", + domain=PositiveFloat + )) + CONFIG.declare("integer_var_bound", ConfigValue( + default=1e9, + description="default bound added to unbounded integral variables in nonlinear constraint if single tree is activated.", + domain=PositiveFloat + )) + CONFIG.declare("cycling_check", ConfigValue( + default=True, + description="check if OA algorithm is stalled in a cycle and terminate.", + domain=bool + )) def available(self, exception_flag=True): """Check if solver is available. @@ -246,9 +272,24 @@ def solve(self, model, **kwds): """ config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) + + # configration confirmation + if config.single_tree: + config.iteration_limit = 1 + config.add_slack = False + config.add_integer_cuts = False + config.mip_solver = 'cplex_persistent' + config.logger.info( + "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'") + # if the slacks fix to zero, just don't add them + if config.max_slack == 0.0: + config.add_slack = False + solve_data = MindtPySolveData() solve_data.results = SolverResults() solve_data.timing = Container() + solve_data.curr_int_sol = [] + solve_data.prev_int_sol = [] solve_data.original_model = model solve_data.working_model = model.clone() @@ -256,16 +297,15 @@ def solve(self, model, **kwds): TransformationFactory('contrib.integer_to_binary'). \ apply_to(solve_data.working_model) - new_logging_level = logging.INFO if config.tee else None with time_code(solve_data.timing, 'total', is_main_timer=True), \ - lower_logger_level_to(config.logger, new_logging_level), \ - create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): + lower_logger_level_to(config.logger, new_logging_level), \ + create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): config.logger.info("---Starting MindtPy---") MindtPy = solve_data.working_model.MindtPy_utils setup_results_object(solve_data, config) - process_objective(solve_data, config) + process_objective(solve_data, config, use_mcpp=False) # Save model initial values. solve_data.initial_var_values = list( @@ -345,7 +385,9 @@ def solve(self, model, **kwds): # MindtPy.feas_inverse_map[n] = c # Create slack variables for OA cuts - lin.slack_vars = VarList(bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals) + if config.add_slack: + lin.slack_vars = VarList( + bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals) # Create slack variables for feasibility problem feas.slack_var = Var(feas.constraint_set, domain=NonNegativeReals, initialize=1) @@ -391,6 +433,10 @@ def solve(self, model, **kwds): solve_data.results.solver.iterations = solve_data.mip_iter + if config.single_tree: + solve_data.results.solver.num_nodes = solve_data.nlp_iter - \ + (1 if config.init_strategy == 'rNLP' else 0) + return solve_data.results # diff --git a/pyomo/contrib/mindtpy/cut_generation.py b/pyomo/contrib/mindtpy/cut_generation.py index 349deddce77..78f85677c0c 100644 --- a/pyomo/contrib/mindtpy/cut_generation.py +++ b/pyomo/contrib/mindtpy/cut_generation.py @@ -28,18 +28,17 @@ def add_objective_linearization(solve_data, config): expr=sign_adjust * sum( value(MindtPy.jacs[obj][id(var)]) * (var - value(var)) for var in list(EXPR.identify_variables(obj.body))) + - value(obj.body) <= 0) + value(obj.body) <= 0) MindtPy.ECP_constr_map[obj, solve_data.mip_iter] = c def add_oa_cuts(target_model, dual_values, solve_data, config, linearize_active=True, linearize_violated=True, - linearize_inactive=False, - use_slack_var=False): + linearize_inactive=False): """Linearizes nonlinear constraints. - For nonconvex problems, turn on 'use_slack_var'. Slack variables will + For nonconvex problems, turn on 'config.add_slack'. Slack variables will always be used for nonlinear equality constraints. """ for (constr, dual_value) in zip(target_model.MindtPy_utils.constraint_list, @@ -56,98 +55,99 @@ def add_oa_cuts(target_model, dual_values, solve_data, config, rhs = ((0 if constr.upper is None else constr.upper) + (0 if constr.lower is None else constr.lower)) rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs - slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add() + if config.add_slack: + slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add() target_model.MindtPy_utils.MindtPy_linear_cuts.oa_cuts.add( expr=copysign(1, sign_adjust * dual_value) - * (sum(value(jacs[constr][var]) * (var - value(var)) - for var in list(EXPR.identify_variables(constr.body))) - + value(constr.body) - rhs) - - slack_var <= 0) + * (sum(value(jacs[constr][var]) * (var - value(var)) + for var in list(EXPR.identify_variables(constr.body))) + + value(constr.body) - rhs) + - (slack_var if config.add_slack else 0) <= 0) else: # Inequality constraint (possibly two-sided) if constr.has_ub() \ - and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) \ + and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) \ or (linearize_violated and constr.uslack() < 0) \ or (linearize_inactive and constr.uslack() > 0): - if use_slack_var: + if config.add_slack: slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add() target_model.MindtPy_utils.MindtPy_linear_cuts.oa_cuts.add( expr=(sum(value(jacs[constr][var])*(var - var.value) - for var in constr_vars) - - (slack_var if use_slack_var else 0) + for var in constr_vars) + value(constr.body) + - (slack_var if config.add_slack else 0) <= constr.upper) ) if constr.has_lb() \ - and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) \ + and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) \ or (linearize_violated and constr.lslack() < 0) \ or (linearize_inactive and constr.lslack() > 0): - if use_slack_var: + if config.add_slack: slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add() target_model.MindtPy_utils.MindtPy_linear_cuts.oa_cuts.add( expr=(sum(value(jacs[constr][var])*(var - var.value) - for var in constr_vars) - + (slack_var if use_slack_var else 0) + for var in constr_vars) + value(constr.body) + + (slack_var if config.add_slack else 0) >= constr.lower) ) -def add_oa_equality_relaxation(var_values, duals, solve_data, config, ignore_integrality=False): - """More general case for outer approximation - - This method covers nonlinear inequalities g(x)<=b and g(x)>=b as well as - equalities g(x)=b all in the same linearization call. It combines the dual - with the objective sense to figure out how to generate the cut. - Note that the dual sign is defined as follows (according to IPOPT): - sgn | min | max - -------|-----|----- - g(x)<=b| +1 | -1 - g(x)>=b| -1 | +1 - - Note additionally that the dual value is not strictly neccesary for inequality - constraints, but definitely neccesary for equality constraints. For equality - constraints the cut will always be generated so that the side with the worse objective - function is the 'interior'. - - ignore_integrality: Accepts float values for discrete variables. - Useful for cut in initial relaxation - """ - - m = solve_data.mip - MindtPy = m.MindtPy_utils - MindtPy.MindtPy_linear_cuts.nlp_iters.add(solve_data.nlp_iter) - sign_adjust = -1 if solve_data.objective_sense == minimize else 1 - - copy_var_list_values(from_list=var_values, - to_list=MindtPy.variable_list, - config=config, - ignore_integrality=ignore_integrality) - - # generate new constraints - # TODO some kind of special handling if the dual is phenomenally small? - # TODO-romeo conditional for 'global' option, i.e. slack or no slack - jacs = solve_data.jacobians - for constr, dual_value in zip(MindtPy.constraint_list, duals): - if constr.body.polynomial_degree() in (1, 0): - continue - rhs = ((0 if constr.upper is None else constr.upper) - + (0 if constr.lower is None else constr.lower)) - # Properly handle equality constraints and ranged inequalities - # TODO special handling for ranged inequalities? a <= x <= b - rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs - slack_var = MindtPy.MindtPy_linear_cuts.slack_vars.add() - MindtPy.MindtPy_linear_cuts.oa_cuts.add( - expr=copysign(1, sign_adjust * dual_value) - * (sum(value(jacs[constr][var]) * (var - value(var)) - for var in list(EXPR.identify_variables(constr.body))) - + value(constr.body) - rhs) - - slack_var <= 0) +# def add_oa_equality_relaxation(var_values, duals, solve_data, config, ignore_integrality=False): +# """More general case for outer approximation + +# This method covers nonlinear inequalities g(x)<=b and g(x)>=b as well as +# equalities g(x)=b all in the same linearization call. It combines the dual +# with the objective sense to figure out how to generate the cut. +# Note that the dual sign is defined as follows (according to IPOPT): +# sgn | min | max +# -------|-----|----- +# g(x)<=b| +1 | -1 +# g(x)>=b| -1 | +1 + +# Note additionally that the dual value is not strictly neccesary for inequality +# constraints, but definitely neccesary for equality constraints. For equality +# constraints the cut will always be generated so that the side with the worse objective +# function is the 'interior'. + +# ignore_integrality: Accepts float values for discrete variables. +# Useful for cut in initial relaxation +# """ + +# m = solve_data.mip +# MindtPy = m.MindtPy_utils +# MindtPy.MindtPy_linear_cuts.nlp_iters.add(solve_data.nlp_iter) +# sign_adjust = -1 if solve_data.objective_sense == minimize else 1 + +# copy_var_list_values(from_list=var_values, +# to_list=MindtPy.variable_list, +# config=config, +# ignore_integrality=ignore_integrality) + +# # generate new constraints +# # TODO some kind of special handling if the dual is phenomenally small? +# # TODO-romeo conditional for 'global' option, i.e. slack or no slack +# jacs = solve_data.jacobians +# for constr, dual_value in zip(MindtPy.constraint_list, duals): +# if constr.body.polynomial_degree() in (1, 0): +# continue +# rhs = ((0 if constr.upper is None else constr.upper) +# + (0 if constr.lower is None else constr.lower)) +# # Properly handle equality constraints and ranged inequalities +# # TODO special handling for ranged inequalities? a <= x <= b +# rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs +# slack_var = MindtPy.MindtPy_linear_cuts.slack_vars.add() +# MindtPy.MindtPy_linear_cuts.oa_cuts.add( +# expr=copysign(1, sign_adjust * dual_value) +# * (sum(value(jacs[constr][var]) * (var - value(var)) +# for var in list(EXPR.identify_variables(constr.body))) +# + value(constr.body) - rhs) +# - slack_var <= 0) def add_int_cut(var_values, solve_data, config, feasible=False): - if not config.integer_cuts: + if not config.add_integer_cuts: return config.logger.info("Adding integer cuts") diff --git a/pyomo/contrib/mindtpy/initialization.py b/pyomo/contrib/mindtpy/initialization.py index 8d5d2fdabfb..3c02bf3b465 100644 --- a/pyomo/contrib/mindtpy/initialization.py +++ b/pyomo/contrib/mindtpy/initialization.py @@ -11,6 +11,11 @@ TransformationFactory, maximize, minimize, value, Var) from pyomo.opt import TerminationCondition as tc from pyomo.opt import SolverFactory +from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver +from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem, + handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible, + handle_NLP_subproblem_other_termination) +from pyomo.contrib.mindtpy.util import var_bound_add def MindtPy_initialize_master(solve_data, config): @@ -18,10 +23,13 @@ def MindtPy_initialize_master(solve_data, config): This includes generating the initial cuts require to build the master problem. """ + # if single tree is activated, we need to add bounds for unbounded variables in nonlinear constraints to avoid unbounded master problem. + if config.single_tree: + var_bound_add(solve_data, config) + m = solve_data.mip = solve_data.working_model.clone() MindtPy = m.MindtPy_utils - - m.dual.activate() + m.dual.deactivate() if config.strategy == 'OA': calc_jacobians(solve_data, config) # preload jacobians @@ -53,7 +61,15 @@ def MindtPy_initialize_master(solve_data, config): # if config.strategy == 'ECP': # add_ecp_cut(solve_data, config) # else: - solve_NLP_subproblem(solve_data, config) + + fixed_nlp, fixed_nlp_result = solve_NLP_subproblem(solve_data, config) + if fixed_nlp_result.solver.termination_condition is tc.optimal or fixed_nlp_result.solver.termination_condition is tc.locallyOptimal: + handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config) + elif fixed_nlp_result.solver.termination_condition is tc.infeasible: + handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config) + else: + handle_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition, + solve_data, config) def init_rNLP(solve_data, config): @@ -63,12 +79,12 @@ def init_rNLP(solve_data, config): config.logger.info( "NLP %s: Solve relaxed integrality" % (solve_data.nlp_iter,)) MindtPy = m.MindtPy_utils - TransformationFactory('core.relax_integrality').apply_to(m) + TransformationFactory('core.relax_integer_vars').apply_to(m) with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( m, **config.nlp_solver_args) subprob_terminate_cond = results.solver.termination_condition - if subprob_terminate_cond is tc.optimal: + if subprob_terminate_cond is tc.optimal or subprob_terminate_cond is tc.locallyOptimal: main_objective = next(m.component_data_objects(Objective, active=True)) nlp_solution_values = list(v.value for v in MindtPy.variable_list) dual_values = list(m.dual[c] for c in MindtPy.constraint_list) @@ -82,10 +98,14 @@ def init_rNLP(solve_data, config): % (solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB)) if config.strategy == 'OA': - copy_var_list_values(m.MindtPy_utils.variable_list, + copy_var_list_values(m.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config, ignore_integrality=True) add_oa_cuts(solve_data.mip, dual_values, solve_data, config) + # TODO check if value of the binary or integer varibles is 0/1 or integer value. + for var in solve_data.mip.component_data_objects(ctype=Var): + if var.is_integer(): + var.value = int(round(var.value)) elif subprob_terminate_cond is tc.infeasible: # TODO fail? try something else? config.logger.info( @@ -106,6 +126,7 @@ def init_max_binaries(solve_data, config): """ m = solve_data.working_model.clone() + m.dual.deactivate() MindtPy = m.MindtPy_utils solve_data.mip_subiter += 1 config.logger.info( @@ -125,7 +146,14 @@ def init_max_binaries(solve_data, config): getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate() getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate() - results = SolverFactory(config.mip_solver).solve(m, options=config.mip_solver_args) + opt = SolverFactory(config.mip_solver) + if isinstance(opt, PersistentSolver): + opt.set_instance(m) + mip_args = dict(config.mip_solver_args) + if config.mip_solver == 'gams': + mip_args['add_options'] = mip_args.get('add_options', []) + mip_args['add_options'].append('option optcr=0.0;') + results = opt.solve(m, **mip_args) solve_terminate_cond = results.solver.termination_condition if solve_terminate_cond is tc.optimal: @@ -133,6 +161,7 @@ def init_max_binaries(solve_data, config): MindtPy.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) + pass # good elif solve_terminate_cond is tc.infeasible: raise ValueError( diff --git a/pyomo/contrib/mindtpy/iterate.py b/pyomo/contrib/mindtpy/iterate.py index cfbe38950bb..415bc813690 100644 --- a/pyomo/contrib/mindtpy/iterate.py +++ b/pyomo/contrib/mindtpy/iterate.py @@ -2,30 +2,33 @@ from __future__ import division from pyomo.contrib.mindtpy.mip_solve import (solve_OA_master, - handle_master_mip_optimal, handle_master_mip_other_conditions) + handle_master_mip_optimal, handle_master_mip_other_conditions) from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem, - handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible, - handle_NLP_subproblem_other_termination) -from pyomo.core import minimize, Objective + handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible, + handle_NLP_subproblem_other_termination) +from pyomo.core import minimize, Objective, Var from pyomo.opt import TerminationCondition as tc from pyomo.contrib.gdpopt.util import get_main_elapsed_time def MindtPy_iteration_loop(solve_data, config): working_model = solve_data.working_model - main_objective = next(working_model.component_data_objects(Objective, active=True)) + main_objective = next( + working_model.component_data_objects(Objective, active=True)) while solve_data.mip_iter < config.iteration_limit: + config.logger.info( '---MindtPy Master Iteration %s---' % solve_data.mip_iter) - if algorithm_should_terminate(solve_data, config): + if algorithm_should_terminate(solve_data, config, check_cycling=False): break solve_data.mip_subiter = 0 # solve MILP master problem if config.strategy == 'OA': - master_mip, master_mip_results = solve_OA_master(solve_data, config) + master_mip, master_mip_results = solve_OA_master( + solve_data, config) if master_mip_results.solver.termination_condition is tc.optimal: handle_master_mip_optimal(master_mip, solve_data, config) else: @@ -36,59 +39,61 @@ def MindtPy_iteration_loop(solve_data, config): else: raise NotImplementedError() - if algorithm_should_terminate(solve_data, config): + if algorithm_should_terminate(solve_data, config, check_cycling=True): break - # Solve NLP subproblem - # The constraint linearization happens in the handlers - fix_nlp, fix_nlp_result = solve_NLP_subproblem(solve_data, config) - if fix_nlp_result.solver.termination_condition is tc.optimal: - handle_NLP_subproblem_optimal(fix_nlp, solve_data, config) - elif fix_nlp_result.solver.termination_condition is tc.infeasible: - handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config) - else: - handle_NLP_subproblem_other_termination(fix_nlp, fix_nlp_result.solver.termination_condition, - solve_data, config) - # Call the NLP post-solve callback - config.call_after_subproblem_solve(fix_nlp, solve_data) - - if config.strategy == 'PSC': - # If the hybrid algorithm is not making progress, switch to OA. - progress_required = 1E-6 - if main_objective.sense == minimize: - log = solve_data.LB_progress - sign_adjust = 1 + if config.single_tree is False: # if we don't use lazy callback, i.e. LP_NLP + # Solve NLP subproblem + # The constraint linearization happens in the handlers + fixed_nlp, fixed_nlp_result = solve_NLP_subproblem( + solve_data, config) + if fixed_nlp_result.solver.termination_condition is tc.optimal or fixed_nlp_result.solver.termination_condition is tc.locallyOptimal: + handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config) + elif fixed_nlp_result.solver.termination_condition is tc.infeasible: + handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config) else: - log = solve_data.UB_progress - sign_adjust = -1 - # Maximum number of iterations in which the lower (optimistic) - # bound does not improve before switching to OA - max_nonimprove_iter = 5 - making_progress = True - # TODO-romeo Unneccesary for OA and LOA, right? - for i in range(1, max_nonimprove_iter + 1): - try: - if (sign_adjust * log[-i] - <= (log[-i - 1] + progress_required) - * sign_adjust): - making_progress = False - else: - making_progress = True - break - except IndexError: - # Not enough history yet, keep going. - making_progress = True - break - if not making_progress and ( - config.strategy == 'hPSC' or - config.strategy == 'PSC'): - config.logger.info( - 'Not making enough progress for {} iterations. ' - 'Switching to OA.'.format(max_nonimprove_iter)) - config.strategy = 'OA' - - -def algorithm_should_terminate(solve_data, config): + handle_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition, + solve_data, config) + # Call the NLP post-solve callback + config.call_after_subproblem_solve(fixed_nlp, solve_data) + + # if config.strategy == 'PSC': + # # If the hybrid algorithm is not making progress, switch to OA. + # progress_required = 1E-6 + # if main_objective.sense == minimize: + # log = solve_data.LB_progress + # sign_adjust = 1 + # else: + # log = solve_data.UB_progress + # sign_adjust = -1 + # # Maximum number of iterations in which the lower (optimistic) + # # bound does not improve before switching to OA + # max_nonimprove_iter = 5 + # making_progress = True + # # TODO-romeo Unneccesary for OA and LOA, right? + # for i in range(1, max_nonimprove_iter + 1): + # try: + # if (sign_adjust * log[-i] + # <= (log[-i - 1] + progress_required) + # * sign_adjust): + # making_progress = False + # else: + # making_progress = True + # break + # except IndexError: + # # Not enough history yet, keep going. + # making_progress = True + # break + # if not making_progress and ( + # config.strategy == 'hPSC' or + # config.strategy == 'PSC'): + # config.logger.info( + # 'Not making enough progress for {} iterations. ' + # 'Switching to OA.'.format(max_nonimprove_iter)) + # config.strategy = 'OA' + + +def algorithm_should_terminate(solve_data, config, check_cycling): """Check if the algorithm should terminate. Termination conditions based on solver options and progress. @@ -128,6 +133,30 @@ def algorithm_should_terminate(solve_data, config): format(solve_data.LB, solve_data.UB)) solve_data.results.solver.termination_condition = tc.maxTimeLimit return True + + # Cycling check + if config.cycling_check == True and solve_data.mip_iter >= 1 and check_cycling: + temp = [] + for var in solve_data.mip.component_data_objects(ctype=Var): + if var.is_integer(): + temp.append(int(round(var.value))) + solve_data.curr_int_sol = temp + + if solve_data.curr_int_sol == solve_data.prev_int_sol: + config.logger.info( + 'Cycling happens after {} master iterations. ' + 'This issue happens when the NLP subproblem violates constraint qualification. ' + 'Convergence to optimal solution is not guaranteed.' + .format(solve_data.mip_iter)) + config.logger.info( + 'Final bound values: LB: {} UB: {}'. + format(solve_data.LB, solve_data.UB)) + # TODO determine solve_data.LB, solve_data.UB is inf or -inf. + solve_data.results.solver.termination_condition = tc.feasible + return True + + solve_data.prev_int_sol = solve_data.curr_int_sol + # if not algorithm_is_making_progress(solve_data, config): # config.logger.debug( # 'Algorithm is not making enough progress. ' diff --git a/pyomo/contrib/mindtpy/mip_solve.py b/pyomo/contrib/mindtpy/mip_solve.py index 7c8cd671794..7bd04930478 100644 --- a/pyomo/contrib/mindtpy/mip_solve.py +++ b/pyomo/contrib/mindtpy/mip_solve.py @@ -2,17 +2,36 @@ from __future__ import division from pyomo.contrib.gdpopt.util import copy_var_list_values -from pyomo.core import Constraint, Expression, Objective, minimize, value +from pyomo.core import Constraint, Expression, Objective, minimize, value, Var from pyomo.opt import TerminationCondition as tc from pyomo.opt import SolutionStatus, SolverFactory from pyomo.contrib.gdpopt.util import SuppressInfeasibleWarning, _DoNothing from pyomo.contrib.gdpopt.mip_solve import distinguish_mip_infeasible_or_unbounded +from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver + +from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem, + handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible, + handle_NLP_subproblem_other_termination, solve_NLP_feas) +from pyomo.contrib.mindtpy.cut_generation import (add_oa_cuts, + add_int_cut) +from pyomo.contrib.gdpopt.util import copy_var_list_values, identify_variables +from math import copysign +from pyomo.environ import * +from pyomo.core import Constraint, minimize, value +from pyomo.core.expr import current as EXPR +from math import fabs + +from pyomo.repn import generate_standard_repn + +from pyomo.common.dependencies import attempt_import + +single_tree, single_tree_available = attempt_import( + 'pyomo.contrib.mindtpy.single_tree') def solve_OA_master(solve_data, config): solve_data.mip_iter += 1 - master_mip = solve_data.mip.clone() - MindtPy = master_mip.MindtPy_utils + MindtPy = solve_data.mip.MindtPy_utils config.logger.info( 'MIP %s: Solve master problem.' % (solve_data.mip_iter,)) @@ -22,40 +41,88 @@ def solve_OA_master(solve_data, config): c.deactivate() MindtPy.MindtPy_linear_cuts.activate() - main_objective = next(master_mip.component_data_objects(Objective, active=True)) + main_objective = next( + solve_data.mip.component_data_objects(Objective, active=True)) main_objective.deactivate() - sign_adjust = 1 if main_objective.sense == minimize else -1 - MindtPy.MindtPy_penalty_expr = Expression( - expr=sign_adjust * config.OA_penalty_factor * sum( - v for v in MindtPy.MindtPy_linear_cuts.slack_vars[...])) + sign_adjust = 1 if main_objective.sense == minimize else - 1 + MindtPy.del_component('MindtPy_oa_obj') - MindtPy.MindtPy_oa_obj = Objective( - expr=main_objective.expr + MindtPy.MindtPy_penalty_expr, - sense=main_objective.sense) + if config.add_slack: + MindtPy.del_component('MindtPy_penalty_expr') - # Deactivate extraneous IMPORT/EXPORT suffixes - getattr(master_mip, 'ipopt_zL_out', _DoNothing()).deactivate() - getattr(master_mip, 'ipopt_zU_out', _DoNothing()).deactivate() + MindtPy.MindtPy_penalty_expr = Expression( + expr=sign_adjust * config.OA_penalty_factor * sum( + v for v in MindtPy.MindtPy_linear_cuts.slack_vars[...])) - # master_mip.pprint() #print oa master problem for debugging - with SuppressInfeasibleWarning(): - master_mip_results = SolverFactory(config.mip_solver).solve( - master_mip, **config.mip_solver_args) - if master_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: + MindtPy.MindtPy_oa_obj = Objective( + expr=main_objective.expr + MindtPy.MindtPy_penalty_expr, + sense=main_objective.sense) + else: + MindtPy.MindtPy_oa_obj = Objective( + expr=main_objective.expr, + sense=main_objective.sense) + # Deactivate extraneous IMPORT/EXPORT suffixes + getattr(solve_data.mip, 'ipopt_zL_out', _DoNothing()).deactivate() + getattr(solve_data.mip, 'ipopt_zU_out', _DoNothing()).deactivate() + + masteropt = SolverFactory(config.mip_solver) + # determine if persistent solver is called. + if isinstance(masteropt, PersistentSolver): + masteropt.set_instance(solve_data.mip, symbolic_solver_labels=True) + if config.single_tree: + # Configuration of lazy callback + lazyoa = masteropt._solver_model.register_callback( + single_tree.LazyOACallback_cplex) + # pass necessary data and parameters to lazyoa + lazyoa.master_mip = solve_data.mip + lazyoa.solve_data = solve_data + lazyoa.config = config + lazyoa.opt = masteropt + masteropt._solver_model.set_warning_stream(None) + masteropt._solver_model.set_log_stream(None) + masteropt._solver_model.set_error_stream(None) + masteropt.options['timelimit'] = config.time_limit + mip_args = dict(config.mip_solver_args) + if config.mip_solver == 'gams': + mip_args['add_options'] = mip_args.get('add_options', []) + mip_args['add_options'].append('option optcr=0.0;') + master_mip_results = masteropt.solve( + solve_data.mip, **mip_args) # , tee=True) + + if master_mip_results.solver.termination_condition is tc.optimal: + if config.single_tree: + if main_objective.sense == minimize: + solve_data.LB = max( + master_mip_results.problem.lower_bound, solve_data.LB) + solve_data.LB_progress.append(solve_data.LB) + else: + solve_data.UB = min( + master_mip_results.problem.upper_bound, solve_data.UB) + solve_data.UB_progress.append(solve_data.UB) + + elif master_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: # Linear solvers will sometimes tell me that it's infeasible or # unbounded during presolve, but fails to distinguish. We need to # resolve with a solver option flag on. - master_mip_results, _ = distinguish_mip_infeasible_or_unbounded(master_mip, config) + master_mip_results, _ = distinguish_mip_infeasible_or_unbounded( + solve_data.mip, config) - return master_mip, master_mip_results + return solve_data.mip, master_mip_results -def handle_master_mip_optimal(master_mip, solve_data, config): +def handle_master_mip_optimal(master_mip, solve_data, config, copy=True): """Copy the result to working model and update upper or lower bound""" # proceed. Just need integer values MindtPy = master_mip.MindtPy_utils - main_objective = next(master_mip.component_data_objects(Objective, active=True)) + main_objective = next( + master_mip.component_data_objects(Objective, active=True)) + # check if the value of binary variable is valid + for var in MindtPy.variable_list: + if var.value == None and var.is_integer(): + config.logger.warning( + "Integer variable {} not initialized. It is set to it's lower bound when using the initial_binary initialization method".format(var.name)) + var.value = var.lb # nlp_var.bounds[0] copy_var_list_values( master_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, @@ -114,22 +181,21 @@ def handle_master_mip_other_conditions(master_mip, master_mip_results, solve_dat def handle_master_mip_infeasible(master_mip, solve_data, config): - config.logger.info( - 'MILP master problem is infeasible. ' - 'Problem may have no more feasible ' - 'binary configurations.') - if solve_data.mip_iter == 1: - config.logger.warn( - 'MindtPy initialization may have generated poor ' - 'quality cuts.') - # set optimistic bound to infinity - main_objective = next(master_mip.component_data_objects(Objective, active=True)) - if main_objective.sense == minimize: - solve_data.LB = float('inf') - solve_data.LB_progress.append(solve_data.UB) - else: - solve_data.UB = float('-inf') - solve_data.UB_progress.append(solve_data.UB) + config.logger.info( + 'MILP master problem is infeasible. ' + 'Problem may have no more feasible ' + 'binary configurations.') + if solve_data.mip_iter == 1: + config.logger.warning( + 'MindtPy initialization may have generated poor ' + 'quality cuts.') + # set optimistic bound to infinity + main_objective = next( + master_mip.component_data_objects(Objective, active=True)) + if main_objective.sense == minimize: + solve_data.LB_progress.append(solve_data.LB) + else: + solve_data.UB_progress.append(solve_data.UB) def handle_master_mip_max_timelimit(master_mip, solve_data, config): @@ -166,8 +232,13 @@ def handle_master_mip_unbounded(master_mip, solve_data, config): 'Master MILP was unbounded. ' 'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) on the objective. ' 'You can change this bound with the option obj_bound.'.format(config.obj_bound)) - main_objective = next(master_mip.component_data_objects(Objective, active=True)) - MindtPy.objective_bound = Constraint(expr=(-config.obj_bound, main_objective.expr, config.obj_bound)) + main_objective = next( + master_mip.component_data_objects(Objective, active=True)) + MindtPy.objective_bound = Constraint( + expr=(-config.obj_bound, main_objective.expr, config.obj_bound)) with SuppressInfeasibleWarning(): - master_mip_results = SolverFactory(config.mip_solver).solve( + opt = SolverFactory(config.mip_solver) + if isinstance(opt, PersistentSolver): + opt.set_instance(master_mip) + master_mip_results = opt.solve( master_mip, **config.mip_solver_args) diff --git a/pyomo/contrib/mindtpy/nlp_solve.py b/pyomo/contrib/mindtpy/nlp_solve.py index b5153f5ee44..a1c97f85e3e 100644 --- a/pyomo/contrib/mindtpy/nlp_solve.py +++ b/pyomo/contrib/mindtpy/nlp_solve.py @@ -2,11 +2,11 @@ from __future__ import division from pyomo.contrib.mindtpy.cut_generation import (add_oa_cuts, - add_int_cut) + add_int_cut) from pyomo.contrib.mindtpy.util import add_feas_slacks from pyomo.contrib.gdpopt.util import copy_var_list_values from pyomo.core import (Constraint, Objective, TransformationFactory, Var, - minimize, value) + minimize, value) from pyomo.core.kernel.component_map import ComponentMap from pyomo.opt import TerminationCondition as tc from pyomo.opt import SolverFactory @@ -16,7 +16,7 @@ def solve_NLP_subproblem(solve_data, config): """ Solves fixed NLP with fixed working model binaries - Sets up local working model `fix_nlp` + Sets up local working model `fixed_nlp` Fixes binaries Sets continuous variables to initial var values Precomputes dual values @@ -26,55 +26,76 @@ def solve_NLP_subproblem(solve_data, config): Returns the fixed-NLP model and the solver results """ - fix_nlp = solve_data.working_model.clone() - MindtPy = fix_nlp.MindtPy_utils - main_objective = next(fix_nlp.component_data_objects(Objective, active=True)) + fixed_nlp = solve_data.working_model.clone() + MindtPy = fixed_nlp.MindtPy_utils solve_data.nlp_iter += 1 config.logger.info('NLP %s: Solve subproblem for fixed binaries.' % (solve_data.nlp_iter,)) # Set up NLP - TransformationFactory('core.fix_discrete').apply_to(fix_nlp) - - # restore original variable values - for nlp_var, orig_val in zip( - MindtPy.variable_list, - solve_data.initial_var_values): - if not nlp_var.fixed and not nlp_var.is_binary(): - nlp_var.value = orig_val + TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) MindtPy.MindtPy_linear_cuts.deactivate() - fix_nlp.tmp_duals = ComponentMap() - for c in fix_nlp.component_data_objects(ctype=Constraint, active=True, - descend_into=True): - rhs = ((0 if c.upper is None else c.upper) - + (0 if c.lower is None else c.lower)) - sign_adjust = 1 if value(c.upper) is None else -1 - fix_nlp.tmp_duals[c] = sign_adjust * max(0, - sign_adjust * (rhs - value(c.body))) - # TODO check sign_adjust + fixed_nlp.tmp_duals = ComponentMap() + # tmp_duals are the value of the dual variables stored before using deactivate trivial contraints + # The values of the duals are computed as follows: (Complementary Slackness) + # + # | constraint | c_geq | status at x1 | tmp_dual (violation) | + # |------------|-------|--------------|----------------------| + # | g(x) <= b | -1 | g(x1) <= b | 0 | + # | g(x) <= b | -1 | g(x1) > b | g(x1) - b | + # | g(x) >= b | +1 | g(x1) >= b | 0 | + # | g(x) >= b | +1 | g(x1) < b | b - g(x1) | + evaluation_error = False + for c in fixed_nlp.component_data_objects(ctype=Constraint, active=True, + descend_into=True): + # We prefer to include the upper bound as the right hand side since we are + # considering c by default a (hopefully) convex function, which would make + # c >= lb a nonconvex inequality which we wouldn't like to add linearizations + # if we don't have to + rhs = c.upper if c.has_ub() else c.lower + c_geq = -1 if c.has_ub() else 1 + # c_leq = 1 if c.has_ub else -1 + try: + fixed_nlp.tmp_duals[c] = c_geq * max( + 0, c_geq*(rhs - value(c.body))) + except (ValueError, OverflowError) as error: + fixed_nlp.tmp_duals[c] = None + evaluation_error = True + if evaluation_error: + for nlp_var, orig_val in zip( + MindtPy.variable_list, + solve_data.initial_var_values): + if not nlp_var.fixed and not nlp_var.is_binary(): + nlp_var.value = orig_val + # fixed_nlp.tmp_duals[c] = c_leq * max( + # 0, c_leq*(value(c.body) - rhs)) + # TODO: change logic to c_leq based on benchmarking + TransformationFactory('contrib.deactivate_trivial_constraints')\ - .apply_to(fix_nlp, tmp=True, ignore_infeasible=True) + .apply_to(fixed_nlp, tmp=True, ignore_infeasible=True) # Solve the NLP with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( - fix_nlp, **config.nlp_solver_args) - return fix_nlp, results + fixed_nlp, **config.nlp_solver_args) + return fixed_nlp, results -def handle_NLP_subproblem_optimal(fix_nlp, solve_data, config): +def handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config): """Copies result to working model, updates bound, adds OA and integer cut, stores best solution if new one is best""" copy_var_list_values( - fix_nlp.MindtPy_utils.variable_list, + fixed_nlp.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) - for c in fix_nlp.tmp_duals: - if fix_nlp.dual.get(c, None) is None: - fix_nlp.dual[c] = fix_nlp.tmp_duals[c] - dual_values = list(fix_nlp.dual[c] for c in fix_nlp.MindtPy_utils.constraint_list) - - main_objective = next(fix_nlp.component_data_objects(Objective, active=True)) + for c in fixed_nlp.tmp_duals: + if fixed_nlp.dual.get(c, None) is None: + fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] + dual_values = list(fixed_nlp.dual[c] + for c in fixed_nlp.MindtPy_utils.constraint_list) + + main_objective = next( + fixed_nlp.component_data_objects(Objective, active=True)) if main_objective.sense == minimize: solve_data.UB = min(value(main_objective.expr), solve_data.UB) solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1] @@ -91,11 +112,11 @@ def handle_NLP_subproblem_optimal(fix_nlp, solve_data, config): solve_data.LB, solve_data.UB)) if solve_data.solution_improved: - solve_data.best_solution_found = fix_nlp.clone() + solve_data.best_solution_found = fixed_nlp.clone() # Add the linear cut if config.strategy == 'OA': - copy_var_list_values(fix_nlp.MindtPy_utils.variable_list, + copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) add_oa_cuts(solve_data.mip, dual_values, solve_data, config) @@ -108,14 +129,14 @@ def handle_NLP_subproblem_optimal(fix_nlp, solve_data, config): # ConstraintList, which is not activated by default. However, it # may be activated as needed in certain situations or for certain # values of option flags. - var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list) + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_integer_cuts: add_int_cut(var_values, solve_data, config, feasible=True) - config.call_after_subproblem_feasible(fix_nlp, solve_data) + config.call_after_subproblem_feasible(fixed_nlp, solve_data) -def handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config): +def handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config): """Solve feasibility problem, add cut according to strategy. The solution of the feasibility problem is copied to the working model. @@ -123,27 +144,27 @@ def handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config): # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') - for c in fix_nlp.component_data_objects(ctype=Constraint): - rhs = ((0 if c.upper is None else c.upper) - + (0 if c.lower is None else c.lower)) - sign_adjust = 1 if value(c.upper) is None else -1 - fix_nlp.dual[c] = (sign_adjust - * max(0, sign_adjust * (rhs - value(c.body)))) - dual_values = list(fix_nlp.dual[c] for c in fix_nlp.MindtPy_utils.constraint_list) - - if config.strategy == 'PSC' or config.strategy == 'GBD': - for var in fix_nlp.component_data_objects(ctype=Var, descend_into=True): - fix_nlp.ipopt_zL_out[var] = 0 - fix_nlp.ipopt_zU_out[var] = 0 - if var.ub is not None and abs(var.ub - value(var)) < config.bound_tolerance: - fix_nlp.ipopt_zL_out[var] = 1 - elif var.lb is not None and abs(value(var) - var.lb) < config.bound_tolerance: - fix_nlp.ipopt_zU_out[var] = -1 - - elif config.strategy == 'OA': + for c in fixed_nlp.component_data_objects(ctype=Constraint): + rhs = c.upper if c. has_ub() else c.lower + c_geq = -1 if c.has_ub() else 1 + fixed_nlp.dual[c] = (c_geq + * max(0, c_geq * (rhs - value(c.body)))) + dual_values = list(fixed_nlp.dual[c] + for c in fixed_nlp.MindtPy_utils.constraint_list) + + # if config.strategy == 'PSC' or config.strategy == 'GBD': + # for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True): + # fixed_nlp.ipopt_zL_out[var] = 0 + # fixed_nlp.ipopt_zU_out[var] = 0 + # if var.has_ub() and abs(var.ub - value(var)) < config.bound_tolerance: + # fixed_nlp.ipopt_zL_out[var] = 1 + # elif var.has_lb() and abs(value(var) - var.lb) < config.bound_tolerance: + # fixed_nlp.ipopt_zU_out[var] = -1 + + if config.strategy == 'OA': config.logger.info('Solving feasibility problem') if config.initial_feas: - # add_feas_slacks(fix_nlp, solve_data) + # add_feas_slacks(fixed_nlp, solve_data) # config.initial_feas = False feas_NLP, feas_NLP_results = solve_NLP_feas(solve_data, config) copy_var_list_values(feas_NLP.MindtPy_utils.variable_list, @@ -151,21 +172,24 @@ def handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config): config) add_oa_cuts(solve_data.mip, dual_values, solve_data, config) # Add an integer cut to exclude this discrete option - var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list) + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_integer_cuts: - add_int_cut(var_values, solve_data, config) # excludes current discrete option + # excludes current discrete option + add_int_cut(var_values, solve_data, config) -def handle_NLP_subproblem_other_termination(fix_nlp, termination_condition, +def handle_NLP_subproblem_other_termination(fixed_nlp, termination_condition, solve_data, config): """Case that fix-NLP is neither optimal nor infeasible (i.e. max_iterations)""" if termination_condition is tc.maxIterations: # TODO try something else? Reinitialize with different initial value? config.logger.info( 'NLP subproblem failed to converge within iteration limit.') - var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list) + var_values = list( + v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_integer_cuts: - add_int_cut(var_values, solve_data, config) # excludes current discrete option + # excludes current discrete option + add_int_cut(var_values, solve_data, config) else: raise ValueError( 'MindtPy unable to handle NLP subproblem termination ' @@ -177,26 +201,26 @@ def solve_NLP_feas(solve_data, config): Returns: Result values and dual values """ - fix_nlp = solve_data.working_model.clone() - add_feas_slacks(fix_nlp) - MindtPy = fix_nlp.MindtPy_utils - next(fix_nlp.component_data_objects(Objective, active=True)).deactivate() - for constr in fix_nlp.component_data_objects( + fixed_nlp = solve_data.working_model.clone() + add_feas_slacks(fixed_nlp) + MindtPy = fixed_nlp.MindtPy_utils + next(fixed_nlp.component_data_objects(Objective, active=True)).deactivate() + for constr in fixed_nlp.component_data_objects( ctype=Constraint, active=True, descend_into=True): - if constr.body.polynomial_degree() not in [0,1]: + if constr.body.polynomial_degree() not in [0, 1]: constr.deactivate() MindtPy.MindtPy_feas.activate() MindtPy.MindtPy_feas_obj = Objective( expr=sum(s for s in MindtPy.MindtPy_feas.slack_var[...]), sense=minimize) - TransformationFactory('core.fix_discrete').apply_to(fix_nlp) + TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) with SuppressInfeasibleWarning(): feas_soln = SolverFactory(config.nlp_solver).solve( - fix_nlp, **config.nlp_solver_args) + fixed_nlp, **config.nlp_solver_args) subprob_terminate_cond = feas_soln.solver.termination_condition - if subprob_terminate_cond is tc.optimal: + if subprob_terminate_cond is tc.optimal or subprob_terminate_cond is tc.locallyOptimal: copy_var_list_values( MindtPy.variable_list, solve_data.working_model.MindtPy_utils.variable_list, @@ -212,17 +236,14 @@ def solve_NLP_feas(solve_data, config): var_values = [v.value for v in MindtPy.variable_list] duals = [0 for _ in MindtPy.constraint_list] - for i, constr in enumerate(MindtPy.constraint_list): - # TODO rhs only works if constr.upper and constr.lower do not both have values. - # Sometimes you might have 1 <= expr <= 1. This would give an incorrect rhs of 2. - rhs = ((0 if constr.upper is None else constr.upper) - + (0 if constr.lower is None else constr.lower)) - sign_adjust = 1 if value(constr.upper) is None else -1 - duals[i] = sign_adjust * max( - 0, sign_adjust * (rhs - value(constr.body))) + for i, c in enumerate(MindtPy.constraint_list): + rhs = c.upper if c. has_ub() else c.lower + c_geq = -1 if c.has_ub() else 1 + duals[i] = c_geq * max( + 0, c_geq * (rhs - value(c.body))) if value(MindtPy.MindtPy_feas_obj.expr) == 0: raise ValueError( 'Problem is not feasible, check NLP solver') - return fix_nlp, feas_soln + return fixed_nlp, feas_soln diff --git a/pyomo/contrib/mindtpy/single_tree.py b/pyomo/contrib/mindtpy/single_tree.py new file mode 100644 index 00000000000..6dd0508bd6b --- /dev/null +++ b/pyomo/contrib/mindtpy/single_tree.py @@ -0,0 +1,240 @@ +from __future__ import division + + +from pyomo.core import Constraint, Expression, Objective, minimize, value, Var +from pyomo.opt import TerminationCondition as tc +from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem, + handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible, + handle_NLP_subproblem_other_termination, solve_NLP_feas) +from pyomo.contrib.gdpopt.util import copy_var_list_values, identify_variables +from math import copysign +from pyomo.environ import * +from pyomo.core.expr import current as EXPR +from math import fabs +from pyomo.repn import generate_standard_repn +import logging +from pyomo.common.dependencies import attempt_import +import cplex +from cplex.callbacks import LazyConstraintCallback + + +class LazyOACallback_cplex(LazyConstraintCallback): + """Inherent class in Cplex to call Lazy callback.""" + + def copy_lazy_var_list_values(self, opt, from_list, to_list, config, + skip_stale=False, skip_fixed=True, + ignore_integrality=False): + """Copy variable values from one list to another. + + Rounds to Binary/Integer if neccessary + Sets to zero for NonNegativeReals if neccessary + """ + for v_from, v_to in zip(from_list, to_list): + if skip_stale and v_from.stale: + continue # Skip stale variable values. + if skip_fixed and v_to.is_fixed(): + continue # Skip fixed variables. + try: + v_val = self.get_values( + opt._pyomo_var_to_solver_var_map[v_from]) + v_to.set_value(v_val) + if skip_stale: + v_to.stale = False + except ValueError: + # Snap the value to the bounds + if v_to.has_lb() and v_val < v_to.lb and v_to.lb - v_val <= config.zero_tolerance: + v_to.set_value(v_to.lb) + elif v_to.has_ub() and v_val > v_to.ub and v_val - v_to.ub <= config.zero_tolerance: + v_to.set_value(v_to.ub) + # ... or the nearest integer + elif v_to.is_integer(): + rounded_val = int(round(v_val)) + if (ignore_integrality or fabs(v_val - rounded_val) <= config.integer_tolerance) \ + and rounded_val in v_to.domain: + v_to.set_value(rounded_val) + else: + raise + + def add_lazy_oa_cuts(self, target_model, dual_values, solve_data, config, opt, + linearize_active=True, + linearize_violated=True, + linearize_inactive=False): + """Add oa_cuts through Cplex inherent function self.add()""" + + for (constr, dual_value) in zip(target_model.MindtPy_utils.constraint_list, + dual_values): + if constr.body.polynomial_degree() in (0, 1): + continue + + constr_vars = list(identify_variables(constr.body)) + jacs = solve_data.jacobians + + # Equality constraint (makes the problem nonconvex) + if constr.has_ub() and constr.has_lb() and constr.upper == constr.lower: + sign_adjust = -1 if solve_data.objective_sense == minimize else 1 + rhs = ((0 if constr.upper is None else constr.upper) + + (0 if constr.lower is None else constr.lower)) + rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs + + # since the cplex requires the lazy cuts in cplex type, we need to transform the pyomo expression into cplex expression + pyomo_expr = copysign(1, sign_adjust * dual_value) * (sum(value(jacs[constr][var]) * ( + var - value(var)) for var in list(EXPR.identify_variables(constr.body))) + value(constr.body) - rhs) + cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr) + cplex_rhs = -generate_standard_repn(pyomo_expr).constant + self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients), + sense="L", + rhs=cplex_rhs) + else: # Inequality constraint (possibly two-sided) + if constr.has_ub() \ + and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) \ + or (linearize_violated and constr.uslack() < 0) \ + or (linearize_inactive and constr.uslack() > 0): + + pyomo_expr = sum( + value(jacs[constr][var])*(var - var.value) for var in constr_vars) + value(constr.body) + cplex_rhs = -generate_standard_repn(pyomo_expr).constant + cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr) + self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients), + sense="L", + rhs=constr.upper.value+cplex_rhs) + if constr.has_lb() \ + and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) \ + or (linearize_violated and constr.lslack() < 0) \ + or (linearize_inactive and constr.lslack() > 0): + pyomo_expr = sum(value(jacs[constr][var]) * (var - self.get_values( + opt._pyomo_var_to_solver_var_map[var])) for var in constr_vars) + value(constr.body) + cplex_rhs = -generate_standard_repn(pyomo_expr).constant + cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr) + self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients), + sense="G", + rhs=constr.lower.value + cplex_rhs) + + def handle_lazy_master_mip_feasible_sol(self, master_mip, solve_data, config, opt): + """ This function is called during the branch and bound of master mip, more exactly when a feasible solution is found and LazyCallback is activated. + Copy the result to working model and update upper or lower bound + In LP-NLP, upper or lower bound are updated during solving the master problem + """ + # proceed. Just need integer values + MindtPy = master_mip.MindtPy_utils + main_objective = next( + master_mip.component_data_objects(Objective, active=True)) + + # this value copy is useful since we need to fix subproblem based on the solution of the master problem + self.copy_lazy_var_list_values(opt, + master_mip.MindtPy_utils.variable_list, + solve_data.working_model.MindtPy_utils.variable_list, + config) + config.logger.info( + 'MIP %s: OBJ: %s LB: %s UB: %s' + % (solve_data.mip_iter, value(MindtPy.MindtPy_oa_obj.expr), + solve_data.LB, solve_data.UB)) + + def handle_lazy_NLP_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): + """Copies result to mip(explaination see below), updates bound, adds OA and integer cut, + stores best solution if new one is best""" + for c in fixed_nlp.tmp_duals: + if fixed_nlp.dual.get(c, None) is None: + fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] + dual_values = list(fixed_nlp.dual[c] + for c in fixed_nlp.MindtPy_utils.constraint_list) + + main_objective = next( + fixed_nlp.component_data_objects(Objective, active=True)) + if main_objective.sense == minimize: + solve_data.UB = min(value(main_objective.expr), solve_data.UB) + solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1] + solve_data.UB_progress.append(solve_data.UB) + else: + solve_data.LB = max(value(main_objective.expr), solve_data.LB) + solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[-1] + solve_data.LB_progress.append(solve_data.LB) + + config.logger.info( + 'NLP {}: OBJ: {} LB: {} UB: {}' + .format(solve_data.nlp_iter, + value(main_objective.expr), + solve_data.LB, solve_data.UB)) + + if solve_data.solution_improved: + solve_data.best_solution_found = fixed_nlp.clone() + + if config.strategy == 'OA': + # In OA algorithm, OA cuts are generated based on the solution of the subproblem + # We need to first copy the value of variables from the subproblem and then add cuts + # since value(constr.body), value(jacs[constr][var]), value(var) are used in self.add_lazy_oa_cuts() + copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, + solve_data.mip.MindtPy_utils.variable_list, + config) + self.add_lazy_oa_cuts( + solve_data.mip, dual_values, solve_data, config, opt) + + def handle_lazy_NLP_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt): + """Solve feasibility problem, add cut according to strategy. + + The solution of the feasibility problem is copied to the working model. + """ + # TODO try something else? Reinitialize with different initial + # value? + config.logger.info('NLP subproblem was locally infeasible.') + for c in fixed_nlp.component_data_objects(ctype=Constraint): + rhs = ((0 if c.upper is None else c.upper) + + (0 if c.lower is None else c.lower)) + sign_adjust = 1 if value(c.upper) is None else -1 + fixed_nlp.dual[c] = (sign_adjust + * max(0, sign_adjust * (rhs - value(c.body)))) + dual_values = list(fixed_nlp.dual[c] + for c in fixed_nlp.MindtPy_utils.constraint_list) + + if config.strategy == 'OA': + config.logger.info('Solving feasibility problem') + if config.initial_feas: + # config.initial_feas = False + feas_NLP, feas_NLP_results = solve_NLP_feas(solve_data, config) + # In OA algorithm, OA cuts are generated based on the solution of the subproblem + # We need to first copy the value of variables from the subproblem and then add cuts + copy_var_list_values(feas_NLP.MindtPy_utils.variable_list, + solve_data.mip.MindtPy_utils.variable_list, + config) + self.add_lazy_oa_cuts( + solve_data.mip, dual_values, solve_data, config, opt) + + def handle_lazy_NLP_subproblem_other_termination(self, fixed_nlp, termination_condition, + solve_data, config): + """Case that fix-NLP is neither optimal nor infeasible (i.e. max_iterations)""" + if termination_condition is tc.maxIterations: + # TODO try something else? Reinitialize with different initial value? + config.logger.info( + 'NLP subproblem failed to converge within iteration limit.') + var_values = list( + v.value for v in fixed_nlp.MindtPy_utils.variable_list) + else: + raise ValueError( + 'MindtPy unable to handle NLP subproblem termination ' + 'condition of {}'.format(termination_condition)) + + def __call__(self): + solve_data = self.solve_data + config = self.config + opt = self.opt + master_mip = self.master_mip + cpx = opt._solver_model # Cplex model + + self.handle_lazy_master_mip_feasible_sol( + master_mip, solve_data, config, opt) + + # solve subproblem + # Solve NLP subproblem + # The constraint linearization happens in the handlers + fixed_nlp, fixed_nlp_result = solve_NLP_subproblem(solve_data, config) + + # add oa cuts + if fixed_nlp_result.solver.termination_condition is tc.optimal or fixed_nlp_result.solver.termination_condition is tc.locallyOptimal: + self.handle_lazy_NLP_subproblem_optimal( + fixed_nlp, solve_data, config, opt) + elif fixed_nlp_result.solver.termination_condition is tc.infeasible: + self.handle_lazy_NLP_subproblem_infeasible( + fixed_nlp, solve_data, config, opt) + else: + self.handle_lazy_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition, + solve_data, config) + diff --git a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py index 454a035c051..b91a1a264ce 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py @@ -54,7 +54,7 @@ def __init__(self, *args, **kwargs): # DISCRETE VARIABLES Y = m.Y = Var(J, domain=Binary, initialize=initY) # CONTINUOUS VARIABLES - X = m.X = Var(I, domain=NonNegativeReals, initialize=initX) + X = m.X = Var(I, domain=NonNegativeReals, initialize=initX, bounds=(0, 2)) """Constraint definitions""" # CONSTRAINTS diff --git a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py index f335ca7614d..5d0151e2926 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py @@ -47,7 +47,7 @@ def __init__(self, *args, **kwargs): # DISCRETE VARIABLES Y = m.Y = Var(J, domain=Binary, initialize=initY) # CONTINUOUS VARIABLES - X = m.X = Var(I, domain=Reals, initialize=initX) + X = m.X = Var(I, domain=Reals, initialize=initX, bounds=(-1, 50)) """Constraint definitions""" # CONSTRAINTS diff --git a/pyomo/contrib/mindtpy/tests/alan.py b/pyomo/contrib/mindtpy/tests/alan.py deleted file mode 100644 index 7348e535362..00000000000 --- a/pyomo/contrib/mindtpy/tests/alan.py +++ /dev/null @@ -1,51 +0,0 @@ -# MINLP written by GAMS Convert from alan.gms instance in MINLPLib (http://www.minlplib.org/alan.html) -# Original problem appearing in Manne, Alan S, GAMS/MINOS: Three examples, Tech. Rep., -# Department of Operations Research, Stanford University, 1986. -# -# Equation counts -# Total E G L N X C B -# 8 3 0 5 0 0 0 0 -# -# Variable counts -# x b i s1s s2s sc si -# Total cont binary integer sos1 sos2 scont sint -# 9 5 4 0 0 0 0 0 -# FX 0 0 0 0 0 0 0 0 -# -# Nonzero counts -# Total const NL DLL -# 24 21 3 0 -# -# Reformulation has removed 1 variable and 1 equation - - -from pyomo.environ import * - -model = m = ConcreteModel() - -m.x1 = Var(within=Reals, bounds=(0, None), initialize=0.302884615384618) -m.x2 = Var(within=Reals, bounds=(0, None), initialize=0.0865384615384593) -m.x3 = Var(within=Reals, bounds=(0, None), initialize=0.504807692307693) -m.x4 = Var(within=Reals, bounds=(0, None), initialize=0.10576923076923) -m.b6 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b7 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b8 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b9 = Var(within=Binary, bounds=(0, 1), initialize=0) - -m.obj = Objective( - expr=m.x1 * (4 * m.x1 + 3 * m.x2 - m.x3) + m.x2 * (3 * m.x1 + 6 * m.x2 + m.x3) + m.x3 * (m.x2 - m.x1 + 10 * m.x3) - , sense=minimize) - -m.c1 = Constraint(expr=m.x1 + m.x2 + m.x3 + m.x4 == 1) - -m.c2 = Constraint(expr=8 * m.x1 + 9 * m.x2 + 12 * m.x3 + 7 * m.x4 == 10) - -m.c4 = Constraint(expr=m.x1 - m.b6 <= 0) - -m.c5 = Constraint(expr=m.x2 - m.b7 <= 0) - -m.c6 = Constraint(expr=m.x3 - m.b8 <= 0) - -m.c7 = Constraint(expr=m.x4 - m.b9 <= 0) - -m.c8 = Constraint(expr=m.b6 + m.b7 + m.b8 + m.b9 <= 3) diff --git a/pyomo/contrib/mindtpy/tests/batchdes.py b/pyomo/contrib/mindtpy/tests/batchdes.py deleted file mode 100644 index 49b270cf15c..00000000000 --- a/pyomo/contrib/mindtpy/tests/batchdes.py +++ /dev/null @@ -1,85 +0,0 @@ -# MINLP written by GAMS Convert from batchdes.gms instance in MINLPLib (http://www.minlplib.org/batchdes.html) -# Original problem appearing in Kocis, Gary R and Grossmann, I E, Global Optimization of Nonconvex MINLP -# Problems in Process Synthesis, Industrial and Engineering Chemistry Research, 27:8, 1988, 1407-1421. -# -# Equation counts -# Total E G L N X C B -# 20 7 12 1 0 0 0 0 -# -# Variable counts -# x b i s1s s2s sc si -# Total cont binary integer sos1 sos2 scont sint -# 20 11 9 0 0 0 0 0 -# FX 0 0 0 0 0 0 0 0 -# -# Nonzero counts -# Total const NL DLL -# 53 43 10 0 -# -# Reformulation has removed 1 variable and 1 equation - - -from pyomo.environ import * - -model = m = ConcreteModel() - -m.b1 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b2 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b3 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b4 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b5 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b6 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b7 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b8 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b9 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.x10 = Var(within=Reals, bounds=(5.52146091786225, 7.82404601085629), initialize=6.70502272492805) -m.x11 = Var(within=Reals, bounds=(5.52146091786225, 7.82404601085629), initialize=7.11048783303622) -m.x12 = Var(within=Reals, bounds=(5.52146091786225, 7.82404601085629), initialize=7.30700912709102) -m.x13 = Var(within=Reals, bounds=(5.40367788220586, 6.4377516497364), initialize=5.92071476597113) -m.x14 = Var(within=Reals, bounds=(4.60517018598809, 6.03228654162824), initialize=5.31872836380816) -m.x15 = Var(within=Reals, bounds=(1.89711998488588, 2.99573227355399), initialize=1.89711998488588) -m.x16 = Var(within=Reals, bounds=(1.38629436111989, 2.484906649788), initialize=1.38629436111989) -m.x17 = Var(within=Reals, bounds=(0, 1.09861228866811), initialize=0) -m.x18 = Var(within=Reals, bounds=(0, 1.09861228866811), initialize=0) -m.x19 = Var(within=Reals, bounds=(0, 1.09861228866811), initialize=0) - -m.obj = Objective(expr=250 * exp(0.6 * m.x10 + m.x17) + 500 * exp(0.6 * m.x11 + m.x18) + 340 * exp(0.6 * m.x12 + m.x19) - , sense=minimize) - -m.c1 = Constraint(expr=m.x10 - m.x13 >= 0.693147180559945) - -m.c2 = Constraint(expr=m.x11 - m.x13 >= 1.09861228866811) - -m.c3 = Constraint(expr=m.x12 - m.x13 >= 1.38629436111989) - -m.c4 = Constraint(expr=m.x10 - m.x14 >= 1.38629436111989) - -m.c5 = Constraint(expr=m.x11 - m.x14 >= 1.79175946922805) - -m.c6 = Constraint(expr=m.x12 - m.x14 >= 1.09861228866811) - -m.c7 = Constraint(expr=m.x15 + m.x17 >= 2.07944154167984) - -m.c8 = Constraint(expr=m.x15 + m.x18 >= 2.99573227355399) - -m.c9 = Constraint(expr=m.x15 + m.x19 >= 1.38629436111989) - -m.c10 = Constraint(expr=m.x16 + m.x17 >= 2.30258509299405) - -m.c11 = Constraint(expr=m.x16 + m.x18 >= 2.484906649788) - -m.c12 = Constraint(expr=m.x16 + m.x19 >= 1.09861228866811) - -m.c13 = Constraint(expr=200000 * exp(m.x15 - m.x13) + 150000 * exp(m.x16 - m.x14) <= 6000) - -m.c14 = Constraint(expr=- 0.693147180559945 * m.b4 - 1.09861228866811 * m.b7 + m.x17 == 0) - -m.c15 = Constraint(expr=- 0.693147180559945 * m.b5 - 1.09861228866811 * m.b8 + m.x18 == 0) - -m.c16 = Constraint(expr=- 0.693147180559945 * m.b6 - 1.09861228866811 * m.b9 + m.x19 == 0) - -m.c17 = Constraint(expr=m.b1 + m.b4 + m.b7 == 1) - -m.c18 = Constraint(expr=m.b2 + m.b5 + m.b8 == 1) - -m.c19 = Constraint(expr=m.b3 + m.b6 + m.b9 == 1) diff --git a/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py new file mode 100644 index 00000000000..3b14090b6ce --- /dev/null +++ b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py @@ -0,0 +1,30 @@ +""" Example of constraint qualification. + +The expected optimal solution value is 3. + + Problem type: convex MINLP + size: 1 binary variable + 1 continuous variables + 2 constraints + +""" +from __future__ import division + +from six import iteritems + +from pyomo.environ import (Binary, ConcreteModel, Constraint, Reals, + Objective, Param, RangeSet, Var, exp, minimize, log) + + +class ConstraintQualificationExample(ConcreteModel): + + def __init__(self, *args, **kwargs): + """Create the problem.""" + kwargs.setdefault('name', 'ConstraintQualificationExample') + super(ConstraintQualificationExample, self).__init__(*args, **kwargs) + model = self + model.x = Var(bounds=(1.0, 10.0), initialize=5.0) + model.y = Var(within=Binary) + model.c1 = Constraint(expr=(model.x-3.0)**2 <= 50.0*(1-model.y)) + model.c2 = Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y)) + model.objective = Objective(expr=model.x, sense=minimize) diff --git a/pyomo/contrib/mindtpy/tests/example_PSE.py b/pyomo/contrib/mindtpy/tests/example_PSE.py deleted file mode 100644 index c5ca498e0e1..00000000000 --- a/pyomo/contrib/mindtpy/tests/example_PSE.py +++ /dev/null @@ -1,13 +0,0 @@ -from pyomo.environ import SolverFactory -import time -from pyomo.contrib.mindtpy.tests.flay03m import * -# from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet -# model = EightProcessFlowsheet() -# with SolverFactory('mindtpy') as opt: -with SolverFactory('mindtpy') as opt: - print('\n Solving problem with Outer Approximation') - start = time.time() - # opt.solve(model, strategy='OA', init_strategy = 'rNLP') - opt.solve(model) -# model.pprint() - print(time.time()-start) \ No newline at end of file diff --git a/pyomo/contrib/mindtpy/tests/flay03m.py b/pyomo/contrib/mindtpy/tests/flay03m.py deleted file mode 100644 index 5a4e201f7a0..00000000000 --- a/pyomo/contrib/mindtpy/tests/flay03m.py +++ /dev/null @@ -1,101 +0,0 @@ -# MINLP written by GAMS Convert from flay03m.gms instance in MINLPLib (http://www.minlplib.org/flay03m.html) -# Original problem appearing in Sawaya, Nicolas W, Reformulations, relaxations and cutting planes -# for generalized disjunctive programming, PhD thesis, Carnegie Mellon University, 2006. -# -# Equation counts -# Total E G L N X C B -# 25 4 6 15 0 0 0 0 -# -# Variable counts -# x b i s1s s2s sc si -# Total cont binary integer sos1 sos2 scont sint -# 27 15 12 0 0 0 0 0 -# FX 0 0 0 0 0 0 0 0 -# -# Nonzero counts -# Total const NL DLL -# 87 84 3 0 -# -# Reformulation has removed 1 variable and 1 equation - - -from pyomo.environ import * - -model = m = ConcreteModel() - -m.x1 = Var(within=Reals, bounds=(0, 29), initialize=0) -m.x2 = Var(within=Reals, bounds=(0, 29), initialize=0) -m.x3 = Var(within=Reals, bounds=(0, 29), initialize=0) -m.x4 = Var(within=Reals, bounds=(0, 29), initialize=0) -m.x5 = Var(within=Reals, bounds=(0, 29), initialize=0) -m.x6 = Var(within=Reals, bounds=(0, 29), initialize=0) -m.x7 = Var(within=Reals, bounds=(1, 40), initialize=1) -m.x8 = Var(within=Reals, bounds=(1, 50), initialize=1) -m.x9 = Var(within=Reals, bounds=(1, 60), initialize=1) -m.x10 = Var(within=Reals, bounds=(1, 40), initialize=1) -m.x11 = Var(within=Reals, bounds=(1, 50), initialize=1) -m.x12 = Var(within=Reals, bounds=(1, 60), initialize=1) -m.x13 = Var(within=Reals, bounds=(0, 30), initialize=0) -m.x14 = Var(within=Reals, bounds=(0, 30), initialize=0) -m.b15 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b16 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b17 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b18 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b19 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b20 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b21 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b22 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b23 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b24 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b25 = Var(within=Binary, bounds=(0, 1), initialize=0) -m.b26 = Var(within=Binary, bounds=(0, 1), initialize=0) - -m.obj = Objective(expr=2 * m.x13 + 2 * m.x14, sense=minimize) - -m.c2 = Constraint(expr=- m.x1 - m.x7 + m.x13 >= 0) - -m.c3 = Constraint(expr=- m.x2 - m.x8 + m.x13 >= 0) - -m.c4 = Constraint(expr=- m.x3 - m.x9 + m.x13 >= 0) - -m.c5 = Constraint(expr=- m.x4 - m.x10 + m.x14 >= 0) - -m.c6 = Constraint(expr=- m.x5 - m.x11 + m.x14 >= 0) - -m.c7 = Constraint(expr=- m.x6 - m.x12 + m.x14 >= 0) - -m.c8 = Constraint(expr=40 / m.x10 - m.x7 <= 0) - -m.c9 = Constraint(expr=50 / m.x11 - m.x8 <= 0) - -m.c10 = Constraint(expr=60 / m.x12 - m.x9 <= 0) - -m.c11 = Constraint(expr=m.x1 - m.x2 + m.x7 + 69 * m.b15 <= 69) - -m.c12 = Constraint(expr=m.x1 - m.x3 + m.x7 + 69 * m.b16 <= 69) - -m.c13 = Constraint(expr=m.x2 - m.x3 + m.x8 + 79 * m.b17 <= 79) - -m.c14 = Constraint(expr=- m.x1 + m.x2 + m.x8 + 79 * m.b18 <= 79) - -m.c15 = Constraint(expr=- m.x1 + m.x3 + m.x9 + 89 * m.b19 <= 89) - -m.c16 = Constraint(expr=- m.x2 + m.x3 + m.x9 + 89 * m.b20 <= 89) - -m.c17 = Constraint(expr=m.x4 - m.x5 + m.x10 + 69 * m.b21 <= 69) - -m.c18 = Constraint(expr=m.x4 - m.x6 + m.x10 + 69 * m.b22 <= 69) - -m.c19 = Constraint(expr=m.x5 - m.x6 + m.x11 + 79 * m.b23 <= 79) - -m.c20 = Constraint(expr=- m.x4 + m.x5 + m.x11 + 79 * m.b24 <= 79) - -m.c21 = Constraint(expr=- m.x4 + m.x6 + m.x12 + 89 * m.b25 <= 89) - -m.c22 = Constraint(expr=- m.x5 + m.x6 + m.x12 + 89 * m.b26 <= 89) - -m.c23 = Constraint(expr=m.b15 + m.b18 + m.b21 + m.b24 == 1) - -m.c24 = Constraint(expr=m.b16 + m.b19 + m.b22 + m.b25 == 1) - -m.c25 = Constraint(expr=m.b17 + m.b20 + m.b23 + m.b26 == 1) diff --git a/pyomo/contrib/mindtpy/tests/from_proposal.py b/pyomo/contrib/mindtpy/tests/from_proposal.py index 797915f620e..517a5cdf49e 100644 --- a/pyomo/contrib/mindtpy/tests/from_proposal.py +++ b/pyomo/contrib/mindtpy/tests/from_proposal.py @@ -22,4 +22,4 @@ def __init__(self, *args, **kwargs): m.c3 = Constraint(expr=m.y - 10*sqrt(m.x+0.1) <= 0) m.c4 = Constraint(expr=-m.x-m.y <= -5) - m.obj = Objective(expr=m.x - m.y / 4.5 +2, sense=minimize) + m.obj = Objective(expr=m.x - m.y / 4.5 + 2, sense=minimize) diff --git a/pyomo/contrib/mindtpy/tests/online_doc_example.py b/pyomo/contrib/mindtpy/tests/online_doc_example.py new file mode 100644 index 00000000000..a7199eadffa --- /dev/null +++ b/pyomo/contrib/mindtpy/tests/online_doc_example.py @@ -0,0 +1,31 @@ +""" Example in the online doc. + +The expected optimal solution value is 2.438447187191098. + + Problem type: convex MINLP + size: 1 binary variable + 1 continuous variables + 2 constraints + +""" +from __future__ import division + +from six import iteritems + +from pyomo.environ import (Binary, ConcreteModel, Constraint, Reals, + Objective, Param, RangeSet, Var, exp, minimize, log) + + +class OnlineDocExample(ConcreteModel): + + def __init__(self, *args, **kwargs): + """Create the problem.""" + kwargs.setdefault('name', 'OnlineDocExample') + super(OnlineDocExample, self).__init__(*args, **kwargs) + model = self + model.x = Var(bounds=(1.0, 10.0), initialize=5.0) + model.y = Var(within=Binary) + model.c1 = Constraint(expr=(model.x-4.0)**2 - + model.x <= 50.0*(1-model.y)) + model.c2 = Constraint(expr=model.x*log(model.x) + 5 <= 50.0*(model.y)) + model.objective = Objective(expr=model.x, sense=minimize) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy.py b/pyomo/contrib/mindtpy/tests/test_mindtpy.py index a28482c1765..9479039a59d 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy.py @@ -1,6 +1,5 @@ """Tests for the MINDT solver plugin.""" from math import fabs - import pyomo.core.base.symbolic import pyutilib.th as unittest from pyomo.contrib.mindtpy.tests.eight_process_problem import \ @@ -9,9 +8,17 @@ from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.environ import SolverFactory, value +from pyomo.environ import * +from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded +from pyomo.solvers.tests.models.QCP_simple import QCP_simple +from pyomo.solvers.tests.models.MIQCP_simple import MIQCP_simple +from pyomo.opt import TerminationCondition required_solvers = ('ipopt', 'glpk') +# required_solvers = ('gams', 'gams') if all(SolverFactory(s).available() for s in required_solvers): subsolvers_available = True else: @@ -30,29 +37,30 @@ def test_OA_8PP(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: model = EightProcessFlowsheet() - print('\n Solving problem with Outer Approximation') - opt.solve(model, strategy='OA', - init_strategy='rNLP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0]) + print('\n Solving 8PP problem with Outer Approximation') + results = opt.solve(model, strategy='OA', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + bound_tolerance=1E-5) - # self.assertIs(results.solver.termination_condition, - # TerminationCondition.optimal) - self.assertTrue(fabs(value(model.cost.expr) - 68) <= 1E-2) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 68, places=1) def test_OA_8PP_init_max_binary(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: model = EightProcessFlowsheet() - print('\n Solving problem with Outer Approximation') - opt.solve(model, strategy='OA', - init_strategy='max_binary', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0]) + print('\n Solving 8PP problem with Outer Approximation(max_binary)') + results = opt.solve(model, strategy='OA', + init_strategy='max_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0]) - # self.assertIs(results.solver.termination_condition, - # TerminationCondition.optimal) - self.assertTrue(fabs(value(model.cost.expr) - 68) <= 1E-2) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 68, places=1) # def test_PSC(self): # """Test the partial surrogate cuts decomposition algorithm.""" @@ -97,75 +105,214 @@ def test_OA_MINLP_simple(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: model = SimpleMINLP() - print('\n Solving problem with Outer Approximation') - opt.solve(model, strategy='OA', init_strategy='initial_binary', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - obj_bound=10) - - # self.assertIs(results.solver.termination_condition, - # TerminationCondition.optimal) - self.assertTrue(abs(value(model.cost.expr) - 3.5) <= 1E-2) + print('\n Solving MINLP_simple problem with Outer Approximation') + results = opt.solve(model, strategy='OA', + init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + obj_bound=10) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 3.5, places=2) def test_OA_MINLP2_simple(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: model = SimpleMINLP2() - print('\n Solving problem with Outer Approximation') - opt.solve(model, strategy='OA', init_strategy='initial_binary', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - obj_bound=10) - - # self.assertIs(results.solver.termination_condition, - # TerminationCondition.optimal) - self.assertTrue(abs(value(model.cost.expr) - 6.00976) <= 1E-2) + print('\n Solving MINLP2_simple problem with Outer Approximation') + results = opt.solve(model, strategy='OA', + init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + obj_bound=10) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 6.00976, places=2) def test_OA_MINLP3_simple(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: model = SimpleMINLP3() - print('\n Solving problem with Outer Approximation') - opt.solve(model, strategy='OA', init_strategy='initial_binary', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - obj_bound=10) - - # self.assertIs(results.solver.termination_condition, - # TerminationCondition.optimal) - self.assertTrue(abs(value(model.cost.expr) - (-5.512)) <= 1E-2) + print('\n Solving MINLP3_simple problem with Outer Approximation') + results = opt.solve(model, strategy='OA', init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + obj_bound=10) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), -5.512, places=2) def test_OA_Proposal(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: model = ProposalModel() - print('\n Solving problem with Outer Approximation') + print('\n Solving Proposal problem with Outer Approximation') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0]) + + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.obj.expr), 0.66555, places=2) + + def test_OA_Proposal_with_int_cuts(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = ProposalModel() + print('\n Solving Proposal problem with Outer Approximation(integer cuts)') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_integer_cuts=True, + integer_to_binary=True # if we use lazy callback, we cannot set integer_to_binary True + ) + + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.obj.expr), 0.66555, places=2) + + def test_OA_ConstraintQualificationExample(self): + with SolverFactory('mindtpy') as opt: + model = ConstraintQualificationExample() + print('\n Solving Constraint Qualification Example with Outer Approximation') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0] + ) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.objective.expr), 3, places=2) + + def test_OA_ConstraintQualificationExample_integer_cut(self): + with SolverFactory('mindtpy') as opt: + model = ConstraintQualificationExample() + print( + '\n Solving Constraint Qualification Example with Outer Approximation(integer cut)') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_integer_cuts=True + ) + self.assertIs(results.solver.termination_condition, + TerminationCondition.feasible) + self.assertAlmostEqual(value(model.objective.expr), 3, places=2) + + def test_OA_OnlineDocExample(self): + with SolverFactory('mindtpy') as opt: + model = OnlineDocExample() + print('\n Solving Online Doc Example with Outer Approximation') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0] + ) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual( + value(model.objective.expr), 2.438447, places=2) + + # the following tests are used to improve code coverage + + def test_iteration_limit(self): + with SolverFactory('mindtpy') as opt: + model = ConstraintQualificationExample() + print('\n test iteration_limit to improve code coverage') opt.solve(model, strategy='OA', + iteration_limit=1, mip_solver=required_solvers[1], - nlp_solver=required_solvers[0]) + nlp_solver=required_solvers[0] + ) + # self.assertAlmostEqual(value(model.objective.expr), 3, places=2) - # self.assertIs(results.solver.termination_condition, - # TerminationCondition.optimal) - self.assertTrue(abs(value(model.obj.expr) - 0.66555) <= 1E-2) + def test_time_limit(self): + with SolverFactory('mindtpy') as opt: + model = ConstraintQualificationExample() + print('\n test time_limit to improve code coverage') + opt.solve(model, strategy='OA', + time_limit=1, + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0] + ) + def test_LP_case(self): + with SolverFactory('mindtpy') as opt: + m_class = LP_unbounded() + m_class._generate_model() + model = m_class.model + print('\n Solving LP case with Outer Approximation') + opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) - def test_OA_Proposal_with_int_cuts(self): + def test_QCP_case(self): + with SolverFactory('mindtpy') as opt: + m_class = QCP_simple() + m_class._generate_model() + model = m_class.model + print('\n Solving QCP case with Outer Approximation') + opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + + def test_maximize_obj(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: model = ProposalModel() - print('\n Solving problem with Outer Approximation') + model.obj.sense = maximize + print('\n test maximize case to improve code coverage') opt.solve(model, strategy='OA', mip_solver=required_solvers[1], nlp_solver=required_solvers[0], - add_integer_cuts=True, - integer_to_binary=True) + # mip_solver_args={'timelimit': 0.9} + ) + self.assertAlmostEqual(value(model.obj.expr), 14.83, places=1) - # self.assertIs(results.solver.termination_condition, - # TerminationCondition.optimal) - self.assertAlmostEquals(value(model.obj.expr), 0.66555, places=2) + def test_rNLP_add_slack(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = EightProcessFlowsheet() + print( + '\n Test rNLP initialize strategy and add_slack to improve code coverage') + opt.solve(model, strategy='OA', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + bound_tolerance=1E-5, + add_slack=True) + self.assertAlmostEqual(value(model.cost.expr), 68, places=1) + + def test_initial_binary_add_slack(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = SimpleMINLP() + print( + '\n Test initial_binary initialize strategy and add_slack to improve code coverage') + results = opt.solve(model, strategy='OA', + init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + obj_bound=10, + add_slack=True) + + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 3.5, places=2) + + # def test_OA_OnlineDocExample4(self): + # with SolverFactory('mindtpy') as opt: + # m = ConcreteModel() + # m.x = Var(within=Binary) + # m.y = Var(within=Reals) + # m.o = Objective(expr=m.x*m.y) + # print('\n Solving problem with Outer Approximation') + # opt.solve(m, strategy='OA', + # mip_solver=required_solvers[1], + # nlp_solver=required_solvers[0], + # ) # def test_PSC(self): # """Test the partial surrogate cuts decomposition algorithm.""" diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py new file mode 100644 index 00000000000..6cf764b0b37 --- /dev/null +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py @@ -0,0 +1,171 @@ +"""Tests for the MINDT solver plugin.""" +from math import fabs +import pyomo.core.base.symbolic +import pyutilib.th as unittest +from pyomo.contrib.mindtpy.tests.eight_process_problem import \ + EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP +from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 +from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 +from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample +from pyomo.environ import SolverFactory, value +from pyomo.opt import TerminationCondition + +required_solvers = ('ipopt', 'cplex_persistent') +if all(SolverFactory(s).available(False) for s in required_solvers): + subsolvers_available = True +else: + subsolvers_available = False + + +@unittest.skipIf(not subsolvers_available, + "Required subsolvers %s are not available" + % (required_solvers,)) +@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available, + "Symbolic differentiation is not available") +class TestMindtPy(unittest.TestCase): + """Tests for the MindtPy solver plugin.""" + + # lazy callback tests + + def test_lazy_OA_8PP(self): + """Test the LP/NLP decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = EightProcessFlowsheet() + print('\n Solving 8PP problem with LP/NLP') + results = opt.solve(model, strategy='OA', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + bound_tolerance=1E-5, + single_tree=True) + + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 68, places=1) + + def test_lazy_OA_8PP_init_max_binary(self): + """Test the LP/NLP decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = EightProcessFlowsheet() + print('\n Solving 8PP_init_max_binary problem with LP/NLP') + results = opt.solve(model, strategy='OA', + init_strategy='max_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + single_tree=True) + + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 68, places=1) + + def test_lazy_OA_MINLP_simple(self): + """Test the LP/NLP decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = SimpleMINLP() + print('\n Solving MINLP_simple problem with LP/NLP') + results = opt.solve(model, strategy='OA', + init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + obj_bound=10, + single_tree=True) + + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 3.5, places=2) + + def test_lazy_OA_MINLP2_simple(self): + """Test the LP/NLP decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = SimpleMINLP2() + print('\n Solving MINLP2_simple problem with LP/NLP') + results = opt.solve(model, strategy='OA', + init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + single_tree=True, + bound_tolerance=1E-2) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), 6.00976, places=2) + + def test_lazy_OA_MINLP3_simple(self): + """Test the LP/NLP decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = SimpleMINLP3() + print('\n Solving MINLP3_simple problem with LP/NLP') + results = opt.solve(model, strategy='OA', init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + obj_bound=10, + single_tree=True) + # TODO: fix the bug of bound here + # self.assertIs(results.solver.termination_condition, + # TerminationCondition.optimal) + self.assertAlmostEqual(value(model.cost.expr), -5.512, places=2) + + def test_lazy_OA_Proposal(self): + """Test the LP/NLP decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = ProposalModel() + print('\n Solving Proposal problem with LP/NLP') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + single_tree=True) + + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual(value(model.obj.expr), 0.66555, places=2) + + def test_lazy_OA_ConstraintQualificationExample(self): + with SolverFactory('mindtpy') as opt: + model = ConstraintQualificationExample() + print('\n Solving ConstraintQualificationExample with LP/NLP') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + single_tree=True + ) + self.assertIs(results.solver.termination_condition, + TerminationCondition.maxIterations) + self.assertAlmostEqual(value(model.objective.expr), 3, places=2) + + def test_OA_OnlineDocExample(self): + with SolverFactory('mindtpy') as opt: + model = OnlineDocExample() + print('\n Solving OnlineDocExample with LP/NLP') + results = opt.solve(model, strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + single_tree=True + ) + self.assertIs(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertAlmostEqual( + value(model.objective.expr), 2.438447, places=2) + + # TODO fix the bug with integer_to_binary + # def test_OA_Proposal_with_int_cuts(self): + # """Test the outer approximation decomposition algorithm.""" + # with SolverFactory('mindtpy') as opt: + # model = ProposalModel() + # print('\n Solving problem with Outer Approximation') + # opt.solve(model, strategy='OA', + # mip_solver=required_solvers[1], + # nlp_solver=required_solvers[0], + # add_integer_cuts=True, + # integer_to_binary=True, # if we use lazy callback, we cannot set integer_to_binary True + # lazy_callback=True, + # iteration_limit=1) + + # # self.assertIs(results.solver.termination_condition, + # # TerminationCondition.optimal) + # self.assertAlmostEquals(value(model.obj.expr), 0.66555, places=2) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mindtpy/util.py b/pyomo/contrib/mindtpy/util.py index fc239399608..d8c7c6c852f 100644 --- a/pyomo/contrib/mindtpy/util.py +++ b/pyomo/contrib/mindtpy/util.py @@ -13,6 +13,7 @@ from pyomo.core.kernel.component_set import ComponentSet from pyomo.opt import SolverFactory from pyomo.opt.results import ProblemSense +from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver class MindtPySolveData(object): @@ -40,19 +41,24 @@ def model_is_valid(solve_data, config): prob.number_of_integer_variables == 0 and prob.number_of_disjunctions == 0): config.logger.info('Problem has no discrete decisions.') - if len(MindtPy.working_nonlinear_constraints) > 0: + obj = next(m.component_data_objects(ctype=Objective, active=True)) + if (any(c.body.polynomial_degree() not in (1, 0) for c in MindtPy.constraint_list) or + obj.expr.polynomial_degree() not in (1, 0)): config.logger.info( "Your model is an NLP (nonlinear program). " - "Using NLP solver %s to solve." % config.nlp) - SolverFactory(config.nlp).solve( - solve_data.original_model, **config.nlp_options) + "Using NLP solver %s to solve." % config.nlp_solver) + SolverFactory(config.nlp_solver).solve( + solve_data.original_model, **config.nlp_solver_args) return False else: config.logger.info( "Your model is an LP (linear program). " - "Using LP solver %s to solve." % config.mip) - SolverFactory(config.mip).solve( - solve_data.original_model, **config.mip_options) + "Using LP solver %s to solve." % config.mip_solver) + mipopt = SolverFactory(config.mip_solver) + if isinstance(mipopt, PersistentSolver): + mipopt.set_instance(solve_data.original_model) + + mipopt.solve(solve_data.original_model, **config.mip_solver_args) return False if not hasattr(m, 'dual'): # Set up dual value reporting @@ -72,7 +78,8 @@ def calc_jacobians(solve_data, config): if c.body.polynomial_degree() in (1, 0): continue # skip linear constraints vars_in_constr = list(EXPR.identify_variables(c.body)) - jac_list = differentiate(c.body, wrt_list=vars_in_constr, mode=differentiate.Modes.sympy) + jac_list = differentiate( + c.body, wrt_list=vars_in_constr, mode=differentiate.Modes.sympy) solve_data.jacobians[c] = ComponentMap( (var, jac_wrt_var) for var, jac_wrt_var in zip(vars_in_constr, jac_list)) @@ -82,10 +89,31 @@ def add_feas_slacks(m): MindtPy = m.MindtPy_utils # generate new constraints for i, constr in enumerate(MindtPy.constraint_list, 1): - rhs = ((0 if constr.upper is None else constr.upper) + - (0 if constr.lower is None else constr.lower)) - c = MindtPy.MindtPy_feas.feas_constraints.add( - constr.body - rhs - <= MindtPy.MindtPy_feas.slack_var[i]) + if constr.body.polynomial_degree() not in [0, 1]: + rhs = constr.upper if constr.has_ub() else constr.lower + c = MindtPy.MindtPy_feas.feas_constraints.add( + constr.body - rhs + <= MindtPy.MindtPy_feas.slack_var[i]) +def var_bound_add(solve_data, config): + """This function will add bound for variables in nonlinear constraints if they are not bounded. + This is to avoid an unbound master problem in the LP/NLP algorithm. + """ + m = solve_data.working_model + MindtPy = m.MindtPy_utils + for c in MindtPy.constraint_list: + if c.body.polynomial_degree() not in (1, 0): + for var in list(EXPR.identify_variables(c.body)): + if var.has_lb() and var.has_ub(): + continue + elif not var.has_lb(): + if var.is_integer(): + var.setlb(-config.integer_var_bound - 1) + else: + var.setlb(-config.continuous_var_bound - 1) + elif not var.has_ub(): + if var.is_integer(): + var.setub(config.integer_var_bound) + else: + var.setub(config.continuous_var_bound) diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/parmest_example.py b/pyomo/contrib/parmest/examples/rooney_biegler/parmest_example.py index 7e1d34bd216..19438444aaf 100644 --- a/pyomo/contrib/parmest/examples/rooney_biegler/parmest_example.py +++ b/pyomo/contrib/parmest/examples/rooney_biegler/parmest_example.py @@ -29,7 +29,10 @@ def SSE(model, data): expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) return expr -pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE) + +solver_options = {"max_iter": 6000} # not really needed in this case + +pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE, solver_options) obj, theta = pest.theta_est() print(obj) print(theta) diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py index 50db22bb2ec..72a60799bf4 100644 --- a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py +++ b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py @@ -35,7 +35,8 @@ def SSE_rule(m): if __name__ == '__main__': - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],[4,16.0],[5,15.6],[6,19.8]], + # These were taken from Table A1.4 in Bates and Watts (1988). + data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],[4,16.0],[5,15.6],[7,19.8]], columns=['hour', 'y']) model = rooney_biegler_model(data) diff --git a/pyomo/contrib/parmest/examples/semibatch/scencreate.py b/pyomo/contrib/parmest/examples/semibatch/scencreate.py new file mode 100644 index 00000000000..61a21e530e1 --- /dev/null +++ b/pyomo/contrib/parmest/examples/semibatch/scencreate.py @@ -0,0 +1,42 @@ +# scenario creation example; DLW March 2020 + +import os +import json +import pyomo.contrib.parmest.parmest as parmest +from pyomo.contrib.parmest.examples.semibatch.semibatch import generate_model +import pyomo.contrib.parmest.scenariocreator as sc + +def main(dirname): + """ dirname gives the location of the experiment input files""" + # Semibatch Vars to estimate in parmest + theta_names = ['k1', 'k2', 'E1', 'E2'] + + # Semibatch data: list of dictionaries + data = [] + for exp_num in range(10): + fname = os.path.join(dirname, 'exp'+str(exp_num+1)+'.out') + with open(fname,'r') as infile: + d = json.load(infile) + data.append(d) + + pest = parmest.Estimator(generate_model, data, theta_names) + + scenmaker = sc.ScenarioCreator(pest, "ipopt") + + ofile = "delme_exp.csv" + print("Make one scenario per experiment and write to {}".format(ofile)) + experimentscens = sc.ScenarioSet("Experiments") + scenmaker.ScenariosFromExperiments(experimentscens) + ###experimentscens.write_csv(ofile) + + numtomake = 3 + print("\nUse the bootstrap to make {} scenarios and print.".format(numtomake)) + bootscens = sc.ScenarioSet("Bootstrap") + scenmaker.ScenariosFromBoostrap(bootscens, numtomake) + for s in bootscens.ScensIterator(): + print("{}, {}".format(s.name, s.probability)) + for n,v in s.ThetaVals.items(): + print(" {}={}".format(n, v)) + +if __name__ == "__main__": + main(".") diff --git a/pyomo/contrib/parmest/parmest.py b/pyomo/contrib/parmest/parmest.py index e9701769715..0de5a6646d8 100644 --- a/pyomo/contrib/parmest/parmest.py +++ b/pyomo/contrib/parmest/parmest.py @@ -328,9 +328,11 @@ class Estimator(object): Indicates that ef solver output should be teed diagnostic_mode: bool, optional If True, print diagnostics from the solver + solver_options: dict, optional + Provides options to the solver (also the name of an attribute) """ def __init__(self, model_function, data, theta_names, obj_function=None, - tee=False, diagnostic_mode=False): + tee=False, diagnostic_mode=False, solver_options=None): self.model_function = model_function self.callback_data = data @@ -343,6 +345,7 @@ def __init__(self, model_function, data, theta_names, obj_function=None, self.obj_function = obj_function self.tee = tee self.diagnostic_mode = diagnostic_mode + self.solver_options = solver_options self._second_stage_cost_exp = "SecondStageCost" self._numbers_list = list(range(len(data))) @@ -411,7 +414,8 @@ def _instance_creation_callback(self, experiment_number=None, cb_data=None): return model - def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", return_values=[], bootlist=None): + def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", + return_values=[], bootlist=None): """ Set up all thetas as first stage Vars, return resulting theta values as well as the objective function value. @@ -451,9 +455,9 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", return_values=[], bootlist=N tree_model = tree_model) if solver == "ef_ipopt": - sopts = {} - sopts['max_iter'] = 6000 - ef_sol = stsolver.solve_ef('ipopt', sopts=sopts, tee=self.tee) + ef_sol = stsolver.solve_ef('ipopt', + sopts=self.solver_options, + tee=self.tee) if self.diagnostic_mode: print(' Solver termination condition = ', str(ef_sol.solver.termination_condition)) diff --git a/pyomo/contrib/parmest/scenariocreator.py b/pyomo/contrib/parmest/scenariocreator.py new file mode 100644 index 00000000000..46e946c555f --- /dev/null +++ b/pyomo/contrib/parmest/scenariocreator.py @@ -0,0 +1,160 @@ +# ScenariosCreator.py - Class to create and deliver scenarios using parmest +# DLW March 2020 + +import json +import pyomo.contrib.parmest.parmest as parmest +import pyomo.environ as pyo + + +class ScenarioSet(object): + """ + Class to hold scenario sets + + Args: + name (str): name of the set (might be "") + + """ + + def __init__(self, name): + # Note: If there was a use-case, the list could be a dataframe. + self._scens = list() # use a df instead? + self.name = name # might be "" + + + def _firstscen(self): + # Return the first scenario for testing and to get Theta names. + assert(len(self._scens) > 0) + return self._scens[0] + + + def ScensIterator(self): + """ Usage: for scenario in ScensIterator()""" + return iter(self._scens) + + + def ScenarioNumber(self, scennum): + """ Returns the scenario with the given, zero-based number""" + return self._scens[scennum] + + + def addone(self, scen): + """ Add a scenario to the set + + Args: + scen (ParmestScen): the scenario to add + """ + assert(isinstance(self._scens, list)) + self._scens.append(scen) + + + def append_bootstrap(self, bootstrap_theta): + """ Append a boostrap theta df to the scenario set; equally likely + + Args: + boostrap_theta (dataframe): created by the bootstrap + Note: this can be cleaned up a lot with the list becomes a df, + which is why I put it in the ScenarioSet class. + """ + assert(len(bootstrap_theta) > 0) + prob = 1. / len(bootstrap_theta) + + # dict of ThetaVal dicts + dfdict = bootstrap_theta.to_dict(orient='index') + + for index, ThetaVals in dfdict.items(): + name = "Boostrap"+str(index) + self.addone(ParmestScen(name, ThetaVals, prob)) + + + def write_csv(self, filename): + """ write a csv file with the scenarios in the set + + Args: + filename (str): full path and full name of file + """ + if len(self._scens) == 0: + print ("Empty scenario set, not writing file={}".format(filename)) + return + with open(filename, "w") as f: + f.write("Name,Probability") + for n in self._firstscen().ThetaVals.keys(): + f.write(",{}".format(n)) + f.write('\n') + for s in self.ScensIterator(): + f.write("{},{}".format(s.name, s.probability)) + for v in s.ThetaVals.values(): + f.write(",{}".format(v)) + f.write('\n') + + +class ParmestScen(object): + """ A little container for scenarios; the Args are the attributes. + + Args: + name (str): name for reporting; might be "" + ThetaVals (dict): ThetaVals[name]=val + probability (float): probability of occurance "near" these ThetaVals + """ + + def __init__(self, name, ThetaVals, probability): + self.name = name + assert(isinstance(ThetaVals, dict)) + self.ThetaVals = ThetaVals + self.probability = probability + +############################################################ + + +class ScenarioCreator(object): + """ Create scenarios from parmest. + + Args: + pest (Estimator): the parmest object + solvername (str): name of the solver (e.g. "ipopt") + + """ + + def __init__(self, pest, solvername): + self.pest = pest + self.solvername = solvername + self.experiment_numbers = pest._numbers_list + + + def ScenariosFromExperiments(self, addtoSet): + """Creates new self.Scenarios list using the experiments only. + + Args: + addtoSet (ScenarioSet): the scenarios will be added to this set + Returns: + a ScenarioSet + """ + + assert(isinstance(addtoSet, ScenarioSet)) + prob = 1. / len(self.pest._numbers_list) + for exp_num in self.pest._numbers_list: + ##print("Experiment number=", exp_num) + model = self.pest._instance_creation_callback(exp_num, + self.pest.callback_data) + opt = pyo.SolverFactory(self.solvername) + results = opt.solve(model) # solves and updates model + ## pyo.check_termination_optimal(results) + ThetaVals = dict() + for theta in self.pest.theta_names: + tvar = eval('model.'+theta) + tval = pyo.value(tvar) + ##print(" theta, tval=", tvar, tval) + ThetaVals[theta] = tval + addtoSet.addone(ParmestScen("ExpScen"+str(exp_num), ThetaVals, prob)) + + def ScenariosFromBoostrap(self, addtoSet, numtomake, seed=None): + """Creates new self.Scenarios list using the experiments only. + + Args: + addtoSet (ScenarioSet): the scenarios will be added to this set + numtomake (int) : number of scenarios to create + """ + + assert(isinstance(addtoSet, ScenarioSet)) + + bootstrap_thetas = self.pest.theta_est_bootstrap(numtomake, seed=seed) + addtoSet.append_bootstrap(bootstrap_thetas) diff --git a/pyomo/contrib/parmest/tests/test_parmest.py b/pyomo/contrib/parmest/tests/test_parmest.py index d949e811b06..7e58909f878 100644 --- a/pyomo/contrib/parmest/tests/test_parmest.py +++ b/pyomo/contrib/parmest/tests/test_parmest.py @@ -13,12 +13,12 @@ matplotlib.use('Agg') except: pass -try: - import numpy as np - import pandas as pd - imports_not_present = False -except: - imports_not_present = True +from pyomo.common.dependencies import ( + numpy as np, numpy_available, + pandas as pd, pandas_available, + scipy, scipy_available, +) +imports_present = numpy_available & pandas_available & scipy_available import platform is_osx = platform.mac_ver()[0] != '' @@ -49,7 +49,7 @@ def setUp(self): self.instance.IDX = pyo.Set(initialize=['a', 'b', 'c']) self.instance.x = pyo.Var(self.instance.IDX, initialize=1134) # TBD add a block - if not imports_not_present: + if imports_present: np.random.seed(1134) def tearDown(self): @@ -205,7 +205,7 @@ def test_theta_k_aug_for_Hessian(self): self.assertAlmostEqual(objval, 4.4675, places=2) -@unittest.skipIf(imports_not_present, "Cannot test parmest: required dependencies are missing") +@unittest.skipIf(not imports_present, "Cannot test parmest: required dependencies are missing") @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") class parmest_object_Tester_reactor_design(unittest.TestCase): @@ -242,8 +242,11 @@ def SSE(model, data): (float(data['cc']) - model.cc)**2 + \ (float(data['cd']) - model.cd)**2 return expr + + solver_options = {"max_iter": 6000} - self.pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE) + self.pest = parmest.Estimator(reactor_design_model, data, + theta_names, SSE, solver_options) def test_theta_est(self): objval, thetavals = self.pest.theta_est() diff --git a/pyomo/contrib/parmest/tests/test_scenariocreator.py b/pyomo/contrib/parmest/tests/test_scenariocreator.py new file mode 100644 index 00000000000..5a0aa43ecab --- /dev/null +++ b/pyomo/contrib/parmest/tests/test_scenariocreator.py @@ -0,0 +1,146 @@ +# the matpolotlib stuff is to avoid $DISPLAY errors on Travis (DLW Oct 2018) +try: + import matplotlib + matplotlib.use('Agg') +except: + pass +from pyomo.common.dependencies import ( + numpy as np, numpy_available, + pandas as pd, pandas_available, + scipy, scipy_available, +) +imports_present = numpy_available & pandas_available & scipy_available + +uuid_available = True +try: + import uuid +except: + uuid_available = False + +import pyutilib.th as unittest +import os +import sys + +import pyomo.contrib.parmest.parmest as parmest +import pyomo.contrib.parmest.scenariocreator as sc +import pyomo.contrib.parmest.graphics as graphics +import pyomo.contrib.parmest as parmestbase +import pyomo.environ as pyo +import pyomo.contrib.parmest.examples.semibatch.scencreate as sbc + +from pyomo.opt import SolverFactory +ipopt_available = SolverFactory('ipopt').available() + +testdir = os.path.dirname(os.path.abspath(__file__)) + + +@unittest.skipIf(not imports_present, "Cannot test parmest: required dependencies are missing") +@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") +class pamest_Scenario_creator_reactor_design(unittest.TestCase): + + def setUp(self): + from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model + + # Data from the design + data = pd.DataFrame(data=[[1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5], + [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4], + [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8], + [1.20, 10000, 3680.7, 1070.0, 1486.1, 1881.6], + [1.25, 10000, 3750.0, 1071.4, 1428.6, 1875.0], + [1.30, 10000, 3817.1, 1072.2, 1374.6, 1868.0], + [1.35, 10000, 3882.2, 1072.4, 1324.0, 1860.7], + [1.40, 10000, 3945.4, 1072.1, 1276.3, 1853.1], + [1.45, 10000, 4006.7, 1071.3, 1231.4, 1845.3], + [1.50, 10000, 4066.4, 1070.1, 1189.0, 1837.3], + [1.55, 10000, 4124.4, 1068.5, 1148.9, 1829.1], + [1.60, 10000, 4180.9, 1066.5, 1111.0, 1820.8], + [1.65, 10000, 4235.9, 1064.3, 1075.0, 1812.4], + [1.70, 10000, 4289.5, 1061.8, 1040.9, 1803.9], + [1.75, 10000, 4341.8, 1059.0, 1008.5, 1795.3], + [1.80, 10000, 4392.8, 1056.0, 977.7, 1786.7], + [1.85, 10000, 4442.6, 1052.8, 948.4, 1778.1], + [1.90, 10000, 4491.3, 1049.4, 920.5, 1769.4], + [1.95, 10000, 4538.8, 1045.8, 893.9, 1760.8]], + columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd']) + + theta_names = ['k1', 'k2', 'k3'] + + def SSE(model, data): + expr = (float(data['ca']) - model.ca)**2 + \ + (float(data['cb']) - model.cb)**2 + \ + (float(data['cc']) - model.cc)**2 + \ + (float(data['cd']) - model.cd)**2 + return expr + + self.pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE) + + def test_scen_from_exps(self): + scenmaker = sc.ScenarioCreator(self.pest, "ipopt") + experimentscens = sc.ScenarioSet("Experiments") + scenmaker.ScenariosFromExperiments(experimentscens) + experimentscens.write_csv("delme_exp_csv.csv") + df = pd.read_csv("delme_exp_csv.csv") + os.remove("delme_exp_csv.csv") + # March '20: all reactor_design experiments have the same theta values! + k1val = df.loc[5].at["k1"] + self.assertAlmostEqual(k1val, 5.0/6.0, places=2) + tval = experimentscens.ScenarioNumber(0).ThetaVals["k1"] + self.assertAlmostEqual(tval, 5.0/6.0, places=2) + + + @unittest.skipIf(not uuid_available, "The uuid module is not available") + def test_no_csv_if_empty(self): + # low level test of scenario sets + # verify that nothing is written, but no errors with empty set + + emptyset = sc.ScenarioSet("empty") + tfile = uuid.uuid4().hex+".csv" + emptyset.write_csv(tfile) + self.assertFalse(os.path.exists(tfile), + "ScenarioSet wrote csv in spite of empty set") + + + + +@unittest.skipIf(not imports_present, "Cannot test parmest: required dependencies are missing") +@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") +class pamest_Scenario_creator_semibatch(unittest.TestCase): + + def setUp(self): + import pyomo.contrib.parmest.examples.semibatch.semibatch as sb + import json + + # Vars to estimate in parmest + theta_names = ['k1', 'k2', 'E1', 'E2'] + + self.fbase = os.path.join(testdir,"..","examples","semibatch") + # Data, list of dictionaries + data = [] + for exp_num in range(10): + fname = "exp"+str(exp_num+1)+".out" + fullname = os.path.join(self.fbase, fname) + with open(fullname,'r') as infile: + d = json.load(infile) + data.append(d) + + # Note, the model already includes a 'SecondStageCost' expression + # for the sum of squared error that will be used in parameter estimation + + self.pest = parmest.Estimator(sb.generate_model, data, theta_names) + + + def test_semibatch_bootstrap(self): + + scenmaker = sc.ScenarioCreator(self.pest, "ipopt") + bootscens = sc.ScenarioSet("Bootstrap") + numtomake = 2 + scenmaker.ScenariosFromBoostrap(bootscens, numtomake, seed=1134) + tval = bootscens.ScenarioNumber(0).ThetaVals["k1"] + self.assertAlmostEqual(tval, 20.64, places=1) + + def test_semibatch_example(self): + # this is referenced in the documentation so at least look for smoke + sbc.main(self.fbase) + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/contrib/preprocessing/plugins/constraint_tightener.py b/pyomo/contrib/preprocessing/plugins/constraint_tightener.py index 4fbdbc6d8fc..56de3cf8399 100644 --- a/pyomo/contrib/preprocessing/plugins/constraint_tightener.py +++ b/pyomo/contrib/preprocessing/plugins/constraint_tightener.py @@ -1,6 +1,8 @@ import logging -import textwrap +from six.moves import zip + +from pyomo.common import deprecated from pyomo.core import Constraint, value, TransformationFactory from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.repn.standard_repn import generate_standard_repn @@ -8,8 +10,9 @@ logger = logging.getLogger('pyomo.contrib.preprocessing') -@TransformationFactory.register('core.tighten_constraints_from_vars', - doc="Tightens upper and lower bound on linear constraints.") +@TransformationFactory.register( + 'core.tighten_constraints_from_vars', + doc="Tightens upper and lower bound on linear constraints.") class TightenContraintFromVars(IsomorphicTransformation): """Tightens upper and lower bound on constraints based on variable bounds. @@ -20,8 +23,17 @@ class TightenContraintFromVars(IsomorphicTransformation): """ - def _apply_to(self, instance): - for constr in instance.component_data_objects( + @deprecated( + "Use of the constraint tightener transformation is deprecated. " + "Its functionality may be partially replicated using " + "`pyomo.contrib.fbbt.compute_bounds_on_expr(constraint.body)`.", + version='TBD', remove_in='TBD') + def __init__(self): + super(TightenContraintFromVars, self).__init__() + + def _apply_to(self, model): + """Apply the transformation.""" + for constr in model.component_data_objects( ctype=Constraint, active=True, descend_into=True): repn = generate_standard_repn(constr.body) if not repn.is_linear(): @@ -31,33 +43,34 @@ def _apply_to(self, instance): LB = UB = 0 if repn.constant: LB = UB = repn.constant + # loop through each coefficent and variable pair - for i, coef in enumerate(repn.linear_coefs): - # TODO: Rounding issues + for var, coef in zip(repn.linear_vars, repn.linear_coefs): # Calculate bounds using interval arithmetic if coef >= 0: - if repn.linear_vars[i].has_ub(): - UB = UB + coef * value(repn.linear_vars[i].ub) + if var.has_ub(): + UB = UB + coef * value(var.ub) else: UB = float('Inf') - if repn.linear_vars[i].has_lb(): - LB = LB + coef * value(repn.linear_vars[i].lb) + if var.has_lb(): + LB = LB + coef * value(var.lb) else: LB = float('-Inf') else: # coef is negative, so signs switch - if repn.linear_vars[i].has_lb(): - UB = UB + coef * value(repn.linear_vars[i].lb) - else: - LB = float('-Inf') - if repn.linear_vars[i].has_ub(): - LB = LB + coef * value(repn.linear_vars[i].ub) + if var.has_lb(): + UB = UB + coef * value(var.lb) else: UB = float('Inf') + if var.has_ub(): + LB = LB + coef * value(var.ub) + else: + LB = float('-Inf') # if inferred bound is tighter, replace bound new_ub = min(value(constr.upper), UB) if constr.has_ub() else UB new_lb = max(value(constr.lower), LB) if constr.has_lb() else LB + constr.set_value((new_lb, constr.body, new_ub)) if UB < LB: diff --git a/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py b/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py index 00c94932859..2855518cbd3 100644 --- a/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py +++ b/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py @@ -80,6 +80,22 @@ def test_unbounded_one_direction(self): self.assertEqual(value(m.c1.upper), -1) self.assertFalse(m.c1.has_lb()) + def test_negative_coeff(self): + """Unbounded in one direction with negative coefficient""" + m = ConcreteModel() + m.v1 = Var(initialize=7, bounds=(1, float('inf'))) + m.v2 = Var(initialize=2, bounds=(2, 5)) + m.v3 = Var(initialize=6, bounds=(6, 9)) + m.v4 = Var(initialize=1, bounds=(1, 1)) + m.c1 = Constraint(expr=2 * m.v2 + m.v3 + m.v4 - m.v1 <= 50) + + self.assertEqual(value(m.c1.upper), 50) + self.assertTrue(m.c1.has_ub()) + self.assertFalse(m.c1.has_lb()) + TransformationFactory('core.tighten_constraints_from_vars').apply_to(m) + self.assertEqual(value(m.c1.upper), 19) + self.assertFalse(m.c1.has_lb()) + def test_ignore_nonlinear(self): m = ConcreteModel() m.v1 = Var() diff --git a/pyomo/contrib/pynumero/README.md b/pyomo/contrib/pynumero/README.md index 593d4dc947b..0d165dbc39c 100644 --- a/pyomo/contrib/pynumero/README.md +++ b/pyomo/contrib/pynumero/README.md @@ -6,35 +6,50 @@ nonlinear optimization algorithms without large sacrifices on computational performance. PyNumero dramatically reduces the time required to prototype new NLP -algorithms and parallel decomposition while minimizing the performance -penalty. +algorithms and parallel decomposition approaches with minimal +performance penalties. PyNumero libraries ================== PyNumero relies on C/C++ extensions for expensive computing operations. -If you installed Pyomo using Anaconda (from conda-forge), then you can +If you installed Pyomo using conda (from conda-forge), then you can obtain precompiled versions of the redistributable interfaces (pynumero_ASL) using conda. Through Pyomo 5.6.9 these libraries are available by installing the `pynumero_libraries` package from conda-forge. Beginning in Pyomo 5.7, the redistributable pynumero -libraries are included in the pyomo conda-forge package. +libraries (pynumero_ASL) are included in the pyomo conda-forge package. If you are not using conda or want to build the nonredistributable -interfaces, you can build the extensions locally one of three ways: +interfaces (pynumero_MA27, pynumero_MA57), you can build the extensions +locally one of three ways: 1. By running the `build.py` Python script in this directory. This script will automatically drive the `cmake` build harness to compile the libraries and install them into your local Pyomo configuration -directory. +directory. Cmake options may be specified in the command. For example, + + python build.py -DBUILD_ASL=ON + +If you have compiled Ipopt, and you would like to link against the +libraries built with Ipopt, you can. For example, + + python build.py -DBUILD_ASL=ON -DBUILD_MA27=ON -DIPOPT_DIR=/lib/ + +If you do so, you will likely need to update an environment variable +for the path to shared libraries. For example, on Linux, + + export LD_LIBRARY_PATH=/lib/ + 2. By running `pyomo build-extensions`. This will build all registered Pyomo binary extensions, including PyNumero (using the `build.py` script from option 1). + 3. By manually running cmake to build the libraries. You will need to ensure that the libraries are then installed into a location that Pyomo (and PyNumero) can find them (e.g., in the Pyomo configuration -directory, or in a common system location, or in a location included in +`lib` directory, in a common system location, or in a location included in the LD_LIBRARY_PATH environment variable). Prerequisites @@ -48,4 +63,11 @@ Prerequisites this library) 2. `pynumero_MA27`: - - *TODO* + - cmake + - a C/C++ compiler + - MA27 library, COIN-HSL Archive, or COIN-HSL Full + +2. `pynumero_MA57`: + - cmake + - a C/C++ compiler + - MA57 library or COIN-HSL Full diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py index 3a79d15193d..dfdc612082d 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py @@ -17,7 +17,7 @@ if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") -from pyomo.contrib.pynumero.extensions.asl import AmplInterface +from pyomo.contrib.pynumero.asl import AmplInterface if not AmplInterface.available(): raise unittest.SkipTest( "Pynumero needs the ASL extension to run CyIpoptSolver tests") diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py index ba3841c0202..2f9a09ed8ff 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py @@ -17,7 +17,7 @@ if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") -from pyomo.contrib.pynumero.extensions.asl import AmplInterface +from pyomo.contrib.pynumero.asl import AmplInterface if not AmplInterface.available(): raise unittest.SkipTest( "Pynumero needs the ASL extension to run CyIpoptSolver tests") diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py index d853aa8f029..ac67cbeab09 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py @@ -17,7 +17,7 @@ if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") -from pyomo.contrib.pynumero.extensions.asl import AmplInterface +from pyomo.contrib.pynumero.asl import AmplInterface if not AmplInterface.available(): raise unittest.SkipTest( "Pynumero needs the ASL extension to run CyIpoptSolver tests") diff --git a/pyomo/contrib/pynumero/extensions/asl.py b/pyomo/contrib/pynumero/asl.py similarity index 100% rename from pyomo/contrib/pynumero/extensions/asl.py rename to pyomo/contrib/pynumero/asl.py diff --git a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py index 28bd1dc602c..b802309d2ba 100644 --- a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py +++ b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py @@ -15,7 +15,7 @@ ) if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") -from pyomo.contrib.pynumero.extensions.asl import AmplInterface +from pyomo.contrib.pynumero.asl import AmplInterface from pyomo.contrib.pynumero.interfaces.nlp import NLP from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP from pyomo.contrib.pynumero.examples.structured.nlp_compositions import TwoStageStochasticNLP diff --git a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py index ca4e5937778..80b0442620b 100644 --- a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py +++ b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py @@ -18,7 +18,7 @@ if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") -from pyomo.contrib.pynumero.extensions.asl import AmplInterface +from pyomo.contrib.pynumero.asl import AmplInterface if not AmplInterface.available(): raise unittest.SkipTest( diff --git a/pyomo/contrib/pynumero/extensions/lib/Darwin/README b/pyomo/contrib/pynumero/extensions/lib/Darwin/README deleted file mode 100644 index 838ddd9b809..00000000000 --- a/pyomo/contrib/pynumero/extensions/lib/Darwin/README +++ /dev/null @@ -1 +0,0 @@ -Copy PyNumero libraries here. \ No newline at end of file diff --git a/pyomo/contrib/pynumero/extensions/lib/Linux/README b/pyomo/contrib/pynumero/extensions/lib/Linux/README deleted file mode 100644 index 838ddd9b809..00000000000 --- a/pyomo/contrib/pynumero/extensions/lib/Linux/README +++ /dev/null @@ -1 +0,0 @@ -Copy PyNumero libraries here. \ No newline at end of file diff --git a/pyomo/contrib/pynumero/extensions/lib/Windows/README b/pyomo/contrib/pynumero/extensions/lib/Windows/README deleted file mode 100644 index 838ddd9b809..00000000000 --- a/pyomo/contrib/pynumero/extensions/lib/Windows/README +++ /dev/null @@ -1 +0,0 @@ -Copy PyNumero libraries here. \ No newline at end of file diff --git a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py index a39f691d94e..b862eb935c3 100644 --- a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py @@ -12,7 +12,7 @@ the Ampl Solver Library (ASL) implementation """ try: - import pyomo.contrib.pynumero.extensions.asl as _asl + import pyomo.contrib.pynumero.asl as _asl except ImportError as e: print('{}'.format(e)) raise ImportError('Error importing asl.' @@ -503,6 +503,7 @@ def _evaluate_jacobians_and_cache_if_necessary(self): # this computation into one if not self._jac_full_is_cached: self._asl.eval_jac_g(self._primals, self._cached_jac_full.data) + self._jac_full_is_cached = True # overloaded from NLP def evaluate_jacobian(self, out=None): diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py index 263ff666d8a..7d434031611 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py @@ -16,7 +16,7 @@ if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") -from pyomo.contrib.pynumero.extensions.asl import AmplInterface +from pyomo.contrib.pynumero.asl import AmplInterface if not AmplInterface.available(): raise unittest.SkipTest( "Pynumero needs the ASL extension to run NLP tests") diff --git a/pyomo/contrib/pynumero/interfaces/utils.py b/pyomo/contrib/pynumero/interfaces/utils.py index 0df36aa3731..7ca7195c0bd 100644 --- a/pyomo/contrib/pynumero/interfaces/utils.py +++ b/pyomo/contrib/pynumero/interfaces/utils.py @@ -9,6 +9,10 @@ # ___________________________________________________________________________ import numpy as np from scipy.sparse import coo_matrix +from pyomo.contrib.pynumero.sparse import BlockVector, BlockMatrix +from pyomo.common.dependencies import attempt_import +mpi_block_vector, mpi_block_vector_available = attempt_import('pyomo.contrib.pynumero.sparse.mpi_block_vector') + def build_bounds_mask(vector): """ @@ -18,18 +22,50 @@ def build_bounds_mask(vector): """ return build_compression_mask_for_finite_values(vector) + def build_compression_matrix(compression_mask): """ Return a sparse matrix CM of ones such that compressed_vector = CM*full_vector based on the compression mask + + Parameters + ---------- + compression_mask: np.ndarray or pyomo.contrib.pynumero.sparse.block_vector.BlockVector + + Returns + ------- + cm: coo_matrix or BlockMatrix + The compression matrix """ - cols = compression_mask.nonzero()[0] - nnz = len(cols) - rows = np.arange(nnz, dtype=np.int) - data = np.ones(nnz) - return coo_matrix((data, (rows, cols)), shape=(nnz, len(compression_mask))) - + if isinstance(compression_mask, BlockVector): + n = compression_mask.nblocks + res = BlockMatrix(nbrows=n, nbcols=n) + for ndx, block in enumerate(compression_mask): + sub_matrix = build_compression_matrix(block) + res.set_block(ndx, ndx, sub_matrix) + return res + elif type(compression_mask) is np.ndarray: + cols = compression_mask.nonzero()[0] + nnz = len(cols) + rows = np.arange(nnz, dtype=np.int) + data = np.ones(nnz) + return coo_matrix((data, (rows, cols)), shape=(nnz, len(compression_mask))) + elif isinstance(compression_mask, mpi_block_vector.MPIBlockVector): + from pyomo.contrib.pynumero.sparse.mpi_block_matrix import MPIBlockMatrix + n = compression_mask.nblocks + rank_ownership = np.ones((n, n), dtype=np.int64) * -1 + for i in range(n): + rank_ownership[i, i] = compression_mask.rank_ownership[i] + res = MPIBlockMatrix(nbrows=n, nbcols=n, rank_ownership=rank_ownership, mpi_comm=compression_mask.mpi_comm) + for ndx in compression_mask.owned_blocks: + block = compression_mask.get_block(ndx) + sub_matrix = build_compression_matrix(block) + res.set_block(ndx, ndx, sub_matrix) + res.broadcast_block_sizes() + return res + + def build_compression_mask_for_finite_values(vector): """ Creates masks for converting from the full vector of diff --git a/pyomo/contrib/pynumero/linalg/ma27.py b/pyomo/contrib/pynumero/linalg/ma27.py new file mode 100644 index 00000000000..abc60124c34 --- /dev/null +++ b/pyomo/contrib/pynumero/linalg/ma27.py @@ -0,0 +1,174 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +from pyomo.common.fileutils import find_library +from pyomo.contrib.pynumero.linalg.utils import (validate_index, + validate_value, _NotSet) +import numpy.ctypeslib as npct +import numpy as np +import ctypes +import os + + +class MA27Interface(object): + + libname = _NotSet + + @classmethod + def available(cls): + if cls.libname is _NotSet: + cls.libname = find_library('pynumero_MA27') + if cls.libname is None: + return False + return os.path.exists(cls.libname) + + def __init__(self, + iw_factor=None, + a_factor=None): + + if not MA27Interface.available(): + raise RuntimeError( + 'Could not find pynumero_MA27 library.') + + self.iw_factor = iw_factor + self.a_factor = a_factor + + self.lib = ctypes.cdll.LoadLibrary(self.libname) + + array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS') + array_2d_double = npct.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS') + array_1d_int = npct.ndpointer(dtype=np.intc, ndim=1, flags='CONTIGUOUS') + + # Declare arg and res types of functions: + + # Do I need to specify that this function takes no argument? + self.lib.new_MA27_struct.restype = ctypes.c_void_p + + self.lib.free_MA27_struct.argtypes = [ctypes.c_void_p] + + self.lib.set_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int] + # Do I need to specify that this function returns nothing? + self.lib.get_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.get_icntl.restype = ctypes.c_int + + self.lib.set_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double] + self.lib.get_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.get_cntl.restype = ctypes.c_double + + self.lib.get_info.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.get_info.restype = ctypes.c_int + + self.lib.alloc_iw_a.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.alloc_iw_b.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.alloc_a.argtypes = [ctypes.c_void_p, ctypes.c_int] + + self.lib.do_symbolic_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_int, array_1d_int, array_1d_int] + self.lib.do_numeric_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_int, array_1d_int, array_1d_int, + array_1d_double] + self.lib.do_backsolve.argtypes = [ctypes.c_void_p, ctypes.c_int, array_1d_double] + + self.icntl_len = 30 + self.cntl_len = 5 + self.info_len = 20 + + self._ma27 = self.lib.new_MA27_struct() + + def __del__(self): + self.lib.free_MA27_struct(self._ma27) + + + def set_icntl(self, i, val): + validate_index(i, self.icntl_len, 'ICNTL') + validate_value(i, int, 'ICNTL') + # NOTE: Use the FORTRAN indexing (same as documentation) to + # set and access info/cntl arrays from Python, whereas C + # functions use C indexing. Maybe this is too confusing. + self.lib.set_icntl(self._ma27, i-1, val) + + + def get_icntl(self, i): + validate_index(i, self.icntl_len, 'ICNTL') + return self.lib.get_icntl(self._ma27, i-1) + + + def set_cntl(self, i, val): + validate_index(i, self.cntl_len, 'CNTL') + validate_value(val, float, 'CNTL') + self.lib.set_cntl(self._ma27, i-1, val) + + + def get_cntl(self, i): + validate_index(i, self.cntl_len, 'CNTL') + return self.lib.get_cntl(self._ma27, i-1) + + + def get_info(self, i): + validate_index(i, self.info_len, 'INFO') + return self.lib.get_info(self._ma27, i-1) + + + def do_symbolic_factorization(self, dim, irn, icn): + irn = irn.astype(np.intc, casting='safe', copy=True) + icn = icn.astype(np.intc, casting='safe', copy=True) + ne = irn.size + self.ne_cached = ne + self.dim_cached = dim + assert ne == icn.size, 'Dimension mismatch in row and column arrays' + + if self.iw_factor is not None: + min_size = 2*ne + 3*dim + 1 + self.lib.alloc_iw_a(self._ma27, + int(self.iw_factor*min_size)) + + self.lib.do_symbolic_factorization(self._ma27, + dim, ne, irn, icn) + return self.get_info(1) + + + def do_numeric_factorization(self, irn, icn, dim, entries): + irn = irn.astype(np.intc, casting='safe', copy=True) + icn = icn.astype(np.intc, casting='safe', copy=True) + assert (self.ne_cached == icn.size) and self.ne_cached == irn.size,\ + 'Dimension mismatch in row or column array' + + ent = entries.astype(np.double, casting='safe', copy=True) + + ne = ent.size + assert ne == self.ne_cached,\ + ('Wrong number of entries in matrix. Please re-run symbolic' + 'factorization with correct nonzero coordinates.') + assert dim == self.dim_cached,\ + ('Dimension mismatch between symbolic and numeric factorization.' + 'Please re-run symbolic factorization with the correct ' + 'dimension.') + if self.a_factor is not None: + min_size = self.get_info(5) + self.lib.alloc_a(self._ma27, + int(self.a_factor*min_size)) + if self.iw_factor is not None: + min_size = self.get_info(6) + self.lib.alloc_iw_b(self._ma27, + int(self.iw_factor*min_size)) + + self.lib.do_numeric_factorization(self._ma27, dim, ne, + irn, icn, ent) + return self.get_info(1) + + + def do_backsolve(self, rhs): + rhs = rhs.astype(np.double, casting='safe', copy=True) + rhs_dim = rhs.size + assert rhs_dim == self.dim_cached,\ + 'Dimension mismatch in right hand side. Please correct.' + + self.lib.do_backsolve(self._ma27, rhs_dim, rhs) + + return rhs diff --git a/pyomo/contrib/pynumero/linalg/ma57.py b/pyomo/contrib/pynumero/linalg/ma57.py new file mode 100644 index 00000000000..26a13e092f6 --- /dev/null +++ b/pyomo/contrib/pynumero/linalg/ma57.py @@ -0,0 +1,217 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +from pyomo.common.fileutils import find_library +from pyomo.contrib.pynumero.linalg.utils import (validate_index, + validate_value, _NotSet) +import numpy.ctypeslib as npct +import numpy as np +import ctypes +import sys +import os + +class MA57Interface(object): + + libname = _NotSet + + @classmethod + def available(cls): + if cls.libname is _NotSet: + cls.libname = find_library('pynumero_MA57') + if cls.libname is None: + return False + return os.path.exists(cls.libname) + + def __init__(self, + work_factor=None, + fact_factor=None, + ifact_factor=None): + + if not MA57Interface.available(): + raise RuntimeError( + 'Could not find pynumero_MA57 library.') + + self.work_factor = work_factor + self.fact_factor = fact_factor + self.ifact_factor = ifact_factor + + self.lib = ctypes.cdll.LoadLibrary(self.libname) + + array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS') + array_2d_double = npct.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS') + array_1d_int = npct.ndpointer(dtype=np.intc, ndim=1, flags='CONTIGUOUS') + + # Declare arg and res types of functions: + + # Do I need to specify that this function takes no argument? + self.lib.new_MA57_struct.restype = ctypes.c_void_p + # return type is pointer to MA57_struct. Why do I use c_void_p here? + + self.lib.free_MA57_struct.argtypes = [ctypes.c_void_p] + + self.lib.set_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int] + # Do I need to specify that this function returns nothing? + self.lib.get_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.get_icntl.restype = ctypes.c_int + + self.lib.set_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double] + self.lib.get_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.get_cntl.restype = ctypes.c_double + + self.lib.get_info.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.get_info.restype = ctypes.c_int + + self.lib.get_rinfo.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.get_rinfo.restype = ctypes.c_double + + self.lib.alloc_keep.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.alloc_work.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.alloc_fact.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.alloc_ifact.argtypes = [ctypes.c_void_p, ctypes.c_int] + + self.lib.set_nrhs.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.set_lrhs.argtypes = [ctypes.c_void_p, ctypes.c_int] + self.lib.set_job.argtypes = [ctypes.c_void_p, ctypes.c_int] + + self.lib.do_symbolic_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_int, array_1d_int, array_1d_int] + self.lib.do_numeric_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_int, array_1d_double] + self.lib.do_backsolve.argtypes = [ctypes.c_void_p, ctypes.c_int, array_2d_double] + self.lib.do_iterative_refinement.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_int, array_1d_double, array_1d_int, array_1d_int, + array_1d_double, array_1d_double, array_1d_double] + self.lib.do_reallocation.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double, + ctypes.c_int] + + self.icntl_len = 20 + self.cntl_len = 5 + self.info_len = 40 + self.rinfo_len = 20 + + self._ma57 = self.lib.new_MA57_struct() + + + def __del__(self): + self.lib.free_MA57_struct(self._ma57) + + + def set_icntl(self, i, val): + validate_index(i, self.icntl_len, 'ICNTL') + validate_value(i, int, 'ICNTL') + # NOTE: Use the FORTRAN indexing (same as documentation) to + # set and access info/cntl arrays from Python, whereas C + # functions use C indexing. Maybe this is too confusing. + self.lib.set_icntl(self._ma57, i-1, val) + + + def get_icntl(self, i): + validate_index(i, self.icntl_len, 'ICNTL') + return self.lib.get_icntl(self._ma57, i-1) + + + def set_cntl(self, i, val): + validate_index(i, self.cntl_len, 'CNTL') + validate_value(val, float, 'CNTL') + self.lib.set_cntl(self._ma57, i-1, val) + + + def get_cntl(self, i): + validate_index(i, self.cntl_len, 'CNTL') + return self.lib.get_cntl(self._ma57, i-1) + + + def get_info(self, i): + validate_index(i, self.info_len, 'INFO') + return self.lib.get_info(self._ma57, i-1) + + + def get_rinfo(self, i): + validate_index(i, self.rinfo_len, 'RINFO') + return self.lib.get_info(self._ma57, i-1) + + + def do_symbolic_factorization(self, dim, irn, jcn): + irn = irn.astype(np.intc, casting='safe', copy=True) + jcn = jcn.astype(np.intc, casting='safe', copy=True) + # TODO: maybe allow user the option to specify size of KEEP + ne = irn.size + self.ne_cached = ne + self.dim_cached = dim + assert ne == jcn.size, 'Dimension mismatch in row and column arrays' + self.lib.do_symbolic_factorization(self._ma57, + dim, ne, irn, jcn) + return self.get_info(1) + + + def do_numeric_factorization(self, dim, entries): + entries = entries.astype(np.float64, casting='safe', copy=True) + ne = entries.size + assert ne == self.ne_cached,\ + ('Wrong number of entries in matrix. Please re-run symbolic' + 'factorization with correct nonzero coordinates.') + assert dim == self.dim_cached,\ + ('Dimension mismatch between symbolic and numeric factorization.' + 'Please re-run symbolic factorization with the correct ' + 'dimension.') + if self.fact_factor is not None: + min_size = self.get_info(9) + self.lib.alloc_fact(self._ma57, + int(self.fact_factor*min_size)) + if self.ifact_factor is not None: + min_size = self.get_info(10) + self.lib.alloc_ifact(self._ma57, + int(self.ifact_factor*min_size)) + + self.lib.do_numeric_factorization(self._ma57, + dim, ne, entries) + return self.get_info(1) + + + def do_backsolve(self, rhs): + rhs = rhs.astype(np.double, casting='safe', copy=True) + shape = rhs.shape + if len(shape) == 1: + rhs_dim = rhs.size + nrhs = 1 + rhs = np.array([rhs]) + elif len(shape) == 2: + # FIXME + raise NotImplementedError( + 'Funcionality for solving a matrix of right hand ' + 'is buggy and needs fixing.') + rhs_dim = rhs.shape[0] + nrhs = rhs.shape[1] + else: + raise ValueError( + 'Right hand side must be a one or two-dimensional array') + # This does not necessarily need to be true; each RHS could have length + # larger than N (for some reason). In the C interface, however, I assume + # that LRHS == N + assert self.dim_cached == rhs_dim, 'Dimension mismatch in RHS' + # TODO: Option to specify a JOB other than 1. By my understanding, + # different JOBs allow partial factorizations to be performed. + # Currently not supported - unclear if it should be. + + if nrhs > 1: + self.lib.set_nrhs(self._ma57, nrhs) + + if self.work_factor is not None: + self.lib.alloc_work(self._ma57, + int(self.work_factor*nrhs*rhs_dim)) + + self.lib.do_backsolve(self._ma57, + rhs_dim, rhs) + + if len(shape) == 1: + # If the user input rhs as a 1D array, return the solution + # as a 1D array. + rhs = rhs[0, :] + + return rhs diff --git a/pyomo/contrib/pynumero/linalg/mumps_solver.py b/pyomo/contrib/pynumero/linalg/mumps_interface.py similarity index 78% rename from pyomo/contrib/pynumero/linalg/mumps_solver.py rename to pyomo/contrib/pynumero/linalg/mumps_interface.py index d57990dca8e..15037695fbe 100644 --- a/pyomo/contrib/pynumero/linalg/mumps_solver.py +++ b/pyomo/contrib/pynumero/linalg/mumps_interface.py @@ -16,8 +16,7 @@ raise ImportError('Error importing mumps. Install pymumps ' 'conda install -c conda-forge pymumps') -from pyomo.contrib.pynumero.sparse.utils import is_symmetric_sparse -from pyomo.contrib.pynumero.sparse import BlockMatrix, BlockVector +from pyomo.contrib.pynumero.sparse import BlockVector class MumpsCentralizedAssembledLinearSolver(object): @@ -46,8 +45,11 @@ class MumpsCentralizedAssembledLinearSolver(object): def __init__(self, sym=0, par=1, comm=None, cntl_options=None, icntl_options=None): self._nnz = None self._dim = None - self.mumps = mumps.DMumpsContext(sym=sym, par=par, comm=comm) - self.mumps.set_silent() + self._mumps = mumps.DMumpsContext(sym=sym, par=par, comm=comm) + self._mumps.set_silent() + self._icntl_options = dict() + self._cntl_options = dict() + if cntl_options is None: cntl_options = dict() if icntl_options is None: @@ -56,6 +58,17 @@ def __init__(self, sym=0, par=1, comm=None, cntl_options=None, icntl_options=Non self.set_cntl(k, v) for k, v in icntl_options.items(): self.set_icntl(k, v) + + def _init(self): + """ + The purpose of this method is to address issue #12 from pymumps + """ + self._mumps.run(job=-1) + self._mumps.set_silent() + for k, v in self._cntl_options.items(): + self.set_cntl(k, v) + for k, v in self._icntl_options.items(): + self.set_icntl(k, v) def do_symbolic_factorization(self, matrix): """ @@ -69,6 +82,7 @@ def do_symbolic_factorization(self, matrix): is not already in coo format. If sym is 1 or 2, the matrix must be lower or upper triangular. """ + self._init() if type(matrix) == np.ndarray: matrix = coo_matrix(matrix) if not isspmatrix_coo(matrix): @@ -78,9 +92,9 @@ def do_symbolic_factorization(self, matrix): raise ValueError('matrix is not square') self._dim = nrows self._nnz = matrix.nnz - self.mumps.set_shape(nrows) - self.mumps.set_centralized_assembled_rows_cols(matrix.row + 1, matrix.col + 1) - self.mumps.run(job=1) + self._mumps.set_shape(nrows) + self._mumps.set_centralized_assembled_rows_cols(matrix.row + 1, matrix.col + 1) + self._mumps.run(job=1) def do_numeric_factorization(self, matrix): """ @@ -108,8 +122,8 @@ def do_numeric_factorization(self, matrix): raise ValueError('The shape of the matrix changed between symbolic and numeric factorization') if self._nnz != matrix.nnz: raise ValueError('The number of nonzeros changed between symbolic and numeric factorization') - self.mumps.set_centralized_assembled_values(matrix.data) - self.mumps.run(job=2) + self._mumps.set_centralized_assembled_values(matrix.data) + self._mumps.run(job=2) def do_back_solve(self, rhs): """ @@ -133,8 +147,8 @@ def do_back_solve(self, rhs): else: result = rhs.copy() - self.mumps.set_rhs(result) - self.mumps.run(job=3) + self._mumps.set_rhs(result) + self._mumps.run(job=3) if isinstance(rhs, BlockVector): _result = rhs.copy_structure() @@ -144,27 +158,35 @@ def do_back_solve(self, rhs): return result def __del__(self): - self.mumps.destroy() + self._mumps.destroy() def set_icntl(self, key, value): - self.mumps.set_icntl(key, value) + self._icntl_options[key] = value + self._mumps.set_icntl(key, value) def set_cntl(self, key, value): - self.mumps.id.cntl[key-1] = value + self._cntl_options[key] = value + self._mumps.id.cntl[key - 1] = value def solve(self, matrix, rhs): self.do_symbolic_factorization(matrix) self.do_numeric_factorization(matrix) return self.do_back_solve(rhs) + def get_icntl(self, key): + return self._mumps.id.icntl[key - 1] + + def get_cntl(self, key): + return self._mumps.id.cntl[key - 1] + def get_info(self, key): - return self.mumps.id.info[key-1] + return self._mumps.id.info[key - 1] def get_infog(self, key): - return self.mumps.id.infog[key-1] + return self._mumps.id.infog[key - 1] def get_rinfo(self, key): - return self.mumps.id.rinfo[key-1] + return self._mumps.id.rinfo[key - 1] def get_rinfog(self, key): - return self.mumps.id.rinfog[key-1] + return self._mumps.id.rinfog[key - 1] diff --git a/pyomo/contrib/pynumero/linalg/tests/test_ma27.py b/pyomo/contrib/pynumero/linalg/tests/test_ma27.py new file mode 100644 index 00000000000..7f831b67dae --- /dev/null +++ b/pyomo/contrib/pynumero/linalg/tests/test_ma27.py @@ -0,0 +1,148 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +import pyutilib.th as unittest +from pyomo.contrib.pynumero.dependencies import numpy as np, numpy_available +if not numpy_available: + raise unittest.SkipTest('pynumero MA27 tests require numpy') +from pyomo.contrib.pynumero.linalg.ma27 import * + + +@unittest.skipIf(not MA27Interface.available(), reason='MA27 not available') +class TestMA27Interface(unittest.TestCase): + + def test_get_cntl(self): + ma27 = MA27Interface() + self.assertEqual(ma27.get_icntl(1), 6) + + self.assertAlmostEqual(ma27.get_cntl(1), 1e-1) # Numerical pivot threshold + self.assertAlmostEqual(ma27.get_cntl(3), 0.0) # Null pivot threshold + + def test_set_icntl(self): + ma27 = MA27Interface() + ma27.set_icntl(5, 4) # Set output printing to max verbosity + ma27.set_icntl(8, 1) # Keep factors when we run out of space + # (so MA27ED can be used) + icntl5 = ma27.get_icntl(5) + icntl8 = ma27.get_icntl(8) + self.assertEqual(icntl5, 4) + self.assertEqual(icntl8, 1) + + with self.assertRaisesRegex(TypeError, 'must be an integer'): + ma27.set_icntl(1.0, 0) + with self.assertRaisesRegex(IndexError, 'is out of range'): + ma27.set_icntl(100, 0) + with self.assertRaises(ctypes.ArgumentError): + ma27.set_icntl(1, 0.0) + + def test_set_cntl(self): + ma27 = MA27Interface() + ma27.set_cntl(1, 1e-8) + ma27.set_cntl(3, 1e-12) + self.assertAlmostEqual(ma27.get_cntl(1), 1e-8) + self.assertAlmostEqual(ma27.get_cntl(3), 1e-12) + + def test_do_symbolic_factorization(self): + ma27 = MA27Interface() + + n = 5 + ne = 7 + irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) + icn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + # These arrays, copied out of HSL docs, contain Fortran indices. + # Interfaces accept C indices as this is what I typically expect. + irn = irn - 1 + icn = icn - 1 + + bad_icn = np.array([1,2,3,5,3,4], dtype=np.intc) + # ^No need to update these indices + + ma27.do_symbolic_factorization(n, irn, icn) + + self.assertEqual(ma27.get_info(1), 0) + self.assertEqual(ma27.get_info(5), 14) # Min required num. integer words + self.assertEqual(ma27.get_info(6), 20) # Min required num. real words + + with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): + ma27.do_symbolic_factorization(n, irn, bad_icn) + + def test_do_numeric_factorization(self): + ma27 = MA27Interface() + + n = 5 + ne = 7 + irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) + icn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = irn - 1 + icn = icn - 1 + ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double) + ma27.do_symbolic_factorization(n, irn, icn) + + status = ma27.do_numeric_factorization(irn, icn, n, ent) + self.assertEqual(status, 0) + + expected_ent = [2.,3.,4.,6.,1.,5.,1.,] + for i in range(ne): + self.assertAlmostEqual(ent[i], expected_ent[i]) + + self.assertEqual(ma27.get_info(15), 2) # 2 negative eigenvalues + self.assertEqual(ma27.get_info(14), 1) # 1 2x2 pivot + + # Check that we can successfully perform another numeric factorization + # with same symbolic factorization + ent2 = np.array([1.5, 5.4, 1.2, 6.1, 4.2, 3.3, 2.0], dtype=np.double) + status = ma27.do_numeric_factorization(irn, icn, n, ent2) + self.assertEqual(ma27.get_info(15), 2) + self.assertEqual(status, 0) + + bad_ent = np.array([2.,3.,4.,6.,1.,5.], dtype=np.double) + with self.assertRaisesRegex(AssertionError, 'Wrong number of entries'): + ma27.do_numeric_factorization(irn, icn, n, bad_ent) + with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): + ma27.do_numeric_factorization(irn, icn, n+1, ent) + + # Check that we can successfully perform another symbolic and + # numeric factorization with the same ma27 struct + # + # n is still 5, ne has changed to 8. + irn = np.array([1,1,2,2,3,3,5,1], dtype=np.intc) + icn = np.array([1,2,3,5,3,4,5,5], dtype=np.intc) + irn = irn - 1 + icn = icn - 1 + ent = np.array([2.,3.,4.,6.,1.,5.,1.,3.], dtype=np.double) + status = ma27.do_symbolic_factorization(n, irn, icn) + self.assertEqual(status, 0) + status = ma27.do_numeric_factorization(irn, icn, n, ent) + self.assertEqual(status, 0) + self.assertEqual(ma27.get_info(15), 3) + + def test_do_backsolve(self): + ma27 = MA27Interface() + + n = 5 + ne = 7 + irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) + icn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = irn - 1 + icn = icn - 1 + ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double) + rhs = np.array([8.,45.,31.,15.,17.], dtype=np.double) + status = ma27.do_symbolic_factorization(n, irn, icn) + status = ma27.do_numeric_factorization(irn, icn, n, ent) + sol = ma27.do_backsolve(rhs) + + expected_sol = [1,2,3,4,5] + old_rhs = np.array([8.,45.,31.,15.,17.]) + for i in range(n): + self.assertAlmostEqual(sol[i], expected_sol[i]) + self.assertEqual(old_rhs[i], rhs[i]) + + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/contrib/pynumero/linalg/tests/test_ma57.py b/pyomo/contrib/pynumero/linalg/tests/test_ma57.py new file mode 100644 index 00000000000..61def1b91b4 --- /dev/null +++ b/pyomo/contrib/pynumero/linalg/tests/test_ma57.py @@ -0,0 +1,160 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +import ctypes +import pyutilib.th as unittest +from pyomo.contrib.pynumero.dependencies import numpy as np, numpy_available +if not numpy_available: + raise unittest.SkipTest('pynumero MA27 tests require numpy') +from pyomo.contrib.pynumero.linalg.ma57 import * + + +@unittest.skipIf(not MA57Interface.available(), reason='MA57 not available') +class TestMA57Interface(unittest.TestCase): + + def test_get_cntl(self): + ma57 = MA57Interface() + self.assertEqual(ma57.get_icntl(1), 6) + self.assertEqual(ma57.get_icntl(7), 1) + + self.assertAlmostEqual(ma57.get_cntl(1), 1e-2) # Numerical pivot threshold + self.assertAlmostEqual(ma57.get_cntl(2), 1e-20) # Null pivot threshold + + def test_set_icntl(self): + ma57 = MA57Interface() + ma57.set_icntl(5, 4) # Set output printing to max verbosity + ma57.set_icntl(8, 1) # Keep factors when we run out of space + # (so MA57ED can be used) + icntl5 = ma57.get_icntl(5) + icntl8 = ma57.get_icntl(8) + self.assertEqual(icntl5, 4) + self.assertEqual(icntl8, 1) + + with self.assertRaisesRegex(TypeError, 'must be an integer'): + ma57.set_icntl(1.0, 0) + with self.assertRaisesRegex(IndexError, 'is out of range'): + ma57.set_icntl(100, 0) + with self.assertRaises(ctypes.ArgumentError): + ma57.set_icntl(1, 0.0) + + def test_set_cntl(self): + ma57 = MA57Interface() + ma57.set_cntl(1, 1e-8) + ma57.set_cntl(2, 1e-12) + self.assertAlmostEqual(ma57.get_cntl(1), 1e-8) + self.assertAlmostEqual(ma57.get_cntl(2), 1e-12) + + def test_do_symbolic_factorization(self): + ma57 = MA57Interface() + + n = 5 + ne = 7 + irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) + jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + # Copied these Fortran-style indices from HSL docs. + # Interface expects C-style indices, as is typical in Python. + irn = irn - 1 + jcn = jcn - 1 + + bad_jcn = np.array([1,2,3,5,3,4], dtype=np.intc) + + ma57.do_symbolic_factorization(n, irn, jcn) + + self.assertEqual(ma57.get_info(1), 0) + self.assertEqual(ma57.get_info(4), 0) + self.assertEqual(ma57.get_info(9), 48) # Min required length of FACT + self.assertEqual(ma57.get_info(10), 53) # Min required length of IFACT + self.assertEqual(ma57.get_info(14), 0) # Should not yet be set + + with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): + ma57.do_symbolic_factorization(n, irn, bad_jcn) + + def test_do_numeric_factorization(self): + ma57 = MA57Interface() + + n = 5 + ne = 7 + irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) + jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = irn - 1 + jcn = jcn - 1 + ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double) + ma57.do_symbolic_factorization(n, irn, jcn) + ma57.fact_factor = 1.5 + ma57.ifact_factor = 1.5 + # ^ No way to check whether these are handled properly... Would have to + # access the struct to get LFACT, LIFACT + + status = ma57.do_numeric_factorization(n, ent) + self.assertEqual(status, 0) + + self.assertEqual(ma57.get_info(14), 12) # 12 entries in factors + self.assertEqual(ma57.get_info(24), 2) # 2 negative eigenvalues + self.assertEqual(ma57.get_info(22), 1) # 1 2x2 pivot + self.assertEqual(ma57.get_info(23), 0) # 0 delayed pivots + + ent2 = np.array([1.,5.,1.,6.,4.,3.,2.], dtype=np.double) + ma57.do_numeric_factorization(n, ent2) + self.assertEqual(status, 0) + + bad_ent = np.array([2.,3.,4.,6.,1.,5.], dtype=np.double) + with self.assertRaisesRegex(AssertionError, 'Wrong number of entries'): + ma57.do_numeric_factorization(n, bad_ent) + with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): + ma57.do_numeric_factorization(n+1, ent) + + n = 5 + ne = 8 + irn = np.array([1,1,2,2,3,3,5,5], dtype=np.intc) + jcn = np.array([1,2,3,5,3,4,5,1], dtype=np.intc) + irn = irn - 1 + jcn = jcn - 1 + ent = np.array([2.,3.,4.,6.,1.,5.,1.,-1.3], dtype=np.double) + status = ma57.do_symbolic_factorization(n, irn, jcn) + self.assertEqual(status, 0) + status = ma57.do_numeric_factorization(n, ent) + self.assertEqual(status, 0) + self.assertEqual(ma57.get_info(24), 2) + self.assertEqual(ma57.get_info(23), 0) + + + def test_do_backsolve(self): + ma57 = MA57Interface() + + n = 5 + ne = 7 + irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) + jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = irn - 1 + jcn = jcn - 1 + ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double) + rhs = np.array([8.,45.,31.,15.,17.], dtype=np.double) + status = ma57.do_symbolic_factorization(n, irn, jcn) + status = ma57.do_numeric_factorization(n, ent) + sol = ma57.do_backsolve(rhs) + + expected_sol = [1,2,3,4,5] + old_rhs = np.array([8.,45.,31.,15.,17.]) + for i in range(n): + self.assertAlmostEqual(sol[i], expected_sol[i]) + self.assertEqual(old_rhs[i], rhs[i]) + + #rhs2 = np.array([[8., 17.], + # [45., 15.], + # [31., 31.], + # [15., 45.], + # [17., 8.]], dtype=np.double) + #sol = ma57.do_backsolve(rhs2) + # FIXME + # This gives unexpected (incorrect) results. + # Need to investigate further. + + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/contrib/pynumero/linalg/tests/test_mumps_solver.py b/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py similarity index 96% rename from pyomo/contrib/pynumero/linalg/tests/test_mumps_solver.py rename to pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py index bbcd5b1634c..09d602aedea 100644 --- a/pyomo/contrib/pynumero/linalg/tests/test_mumps_solver.py +++ b/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py @@ -15,7 +15,7 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run linear solver tests") try: - from pyomo.contrib.pynumero.linalg.mumps_solver import MumpsCentralizedAssembledLinearSolver + from pyomo.contrib.pynumero.linalg.mumps_interface import MumpsCentralizedAssembledLinearSolver except ImportError: raise unittest.SkipTest("Pynumero needs pymumps to run linear solver tests") diff --git a/pyomo/contrib/pynumero/linalg/utils.py b/pyomo/contrib/pynumero/linalg/utils.py new file mode 100644 index 00000000000..2c39d990757 --- /dev/null +++ b/pyomo/contrib/pynumero/linalg/utils.py @@ -0,0 +1,32 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +def validate_index(i, array_len, array_name=''): + if not isinstance(i, int): + raise TypeError( + 'Index into %s array must be an integer. Got %s' + % (array_name, type(i))) + if i < 1 or i > array_len: + # NOTE: Use the FORTRAN indexing (same as documentation) to + # set and access info/cntl arrays from Python, whereas C + # functions use C indexing. Maybe this is too confusing. + raise IndexError( + 'Index %s is out of range for %s array of length %s' + % (i, array_name, array_len)) + +def validate_value(val, dtype, array_name=''): + if not isinstance(val, dtype): + raise ValueError( + 'Members of %s array must have type %s. Got %s' + % (array_name, dtype, type(val))) + +class _NotSet: + pass + diff --git a/pyomo/contrib/pynumero/sparse/block_matrix.py b/pyomo/contrib/pynumero/sparse/block_matrix.py index f223f8e8663..6d76d19a3f3 100644 --- a/pyomo/contrib/pynumero/sparse/block_matrix.py +++ b/pyomo/contrib/pynumero/sparse/block_matrix.py @@ -526,6 +526,12 @@ def transpose(self, axes=None, copy=True): m, n = self.bshape mat = BlockMatrix(n, m) + for row in range(m): + if self.is_row_size_defined(row): + mat.set_col_size(row, self.get_row_size(row)) + for col in range(n): + if self.is_col_size_defined(col): + mat.set_row_size(col, self.get_col_size(col)) for i in range(m): for j in range(n): if not self.is_empty_block(i, j): @@ -738,7 +744,14 @@ def copy_structure(self): BlockMatrix """ - result = BlockMatrix(self.bshape[0], self.bshape[1]) + m, n = self.bshape + result = BlockMatrix(m, n) + for row in range(m): + if self.is_row_size_defined(row): + result.set_row_size(row, self.get_row_size(row)) + for col in range(n): + if self.is_col_size_defined(col): + result.set_col_size(col, self.get_col_size(col)) ii, jj = np.nonzero(self._block_mask) for i, j in zip(ii, jj): if isinstance(self._blocks[i, j], BlockMatrix): @@ -751,14 +764,24 @@ def copy_structure(self): def __repr__(self): return '{}{}'.format(self.__class__.__name__, self.bshape) - def __str__(self): - msg = '{}{}\n'.format(self.__class__.__name__, self.bshape) + def _print(self, indent): + msg = '' for idx in range(self.bshape[0]): for jdx in range(self.bshape[1]): - repn = self._blocks[idx, jdx].__repr__() if self._block_mask[idx, jdx] else None - msg += '({}, {}): {}\n'.format(idx, jdx, repn) + if self.is_empty_block(idx, jdx): + msg += indent + str((idx, jdx)) + ': ' + str(None) + '\n' + else: + block = self.get_block(idx, jdx) + if isinstance(block, BlockMatrix): + msg += indent + str((idx, jdx)) + ': ' + block.__class__.__name__ + str(block.bshape) + '\n' + msg += block._print(indent=indent+' ') + else: + msg += indent + str((idx, jdx)) + ': ' + block.__class__.__name__ + str(block.shape) + '\n' return msg + def __str__(self): + return self._print(indent='') + def get_block(self, row, col): assert row >= 0 and col >= 0, 'indices must be positive' assert row < self.bshape[0] and \ @@ -909,8 +932,9 @@ def __mul__(self, other): x = other.get_block(j) A = self._blocks[i, j] blk = result.get_block(i) - blk += A * x - result.set_block(i, blk) + _tmp = A*x + _tmp += blk + result.set_block(i, _tmp) return result elif isinstance(other, np.ndarray): diff --git a/pyomo/contrib/pynumero/sparse/block_vector.py b/pyomo/contrib/pynumero/sparse/block_vector.py index b35858a4469..410c51f97aa 100644 --- a/pyomo/contrib/pynumero/sparse/block_vector.py +++ b/pyomo/contrib/pynumero/sparse/block_vector.py @@ -113,7 +113,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): np.logical_not, np.expm1, np.exp2, np.sign, np.rint, np.square, np.positive, np.negative, np.rad2deg, np.deg2rad, np.conjugate, np.reciprocal, - ] + np.signbit] # functions that take two vectors binary_funcs = [np.add, np.multiply, np.divide, np.subtract, @@ -358,8 +358,11 @@ def max(self, axis=None, out=None, keepdims=False): Returns the largest value stored in this BlockVector """ assert_block_structure(self) - results = np.array([self.get_block(i).max() for i in range(self.nblocks) if self.get_block(i).size > 0]) - return results.max(axis=axis, out=out, keepdims=keepdims) + results = list() + for block in self: + if block.size > 0: + results.append(block.max()) + return max(results) def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): """Copy of the array, cast to a specified type""" @@ -515,8 +518,11 @@ def min(self, axis=None, out=None, keepdims=False): Returns the smallest value stored in the vector """ assert_block_structure(self) - results = np.array([self.get_block(i).min() for i in range(self.nblocks)]) - return results.min(axis=axis, out=out, keepdims=keepdims) + results = list() + for block in self: + if block.size > 0: + results.append(block.min()) + return min(results) def mean(self, axis=None, dtype=None, out=None, keepdims=False): """ @@ -1205,32 +1211,19 @@ def __rdiv__(self, other): def __idiv__(self, other): return self.__itruediv__(other) - def __str__(self): + def _print(self, indent): msg = '' - for idx in range(self.bshape[0]): - if isinstance(self.get_block(idx), BlockVector): - repn = self.get_block(idx).__repr__() - repn += '\n' - for j, vv in enumerate(self.get_block(idx)): - if isinstance(vv, BlockVector): - repn += ' {}: {}\n'.format(j, vv.__repr__()) - repn += '\n' - for jj, vvv in enumerate(vv): - if isinstance(vv, BlockVector): - repn += ' {}: {}\n'.format(jj, vvv.__repr__()) - else: - repn += ' {}: array({})\n'.format(jj, vvv.size) - else: - repn += ' {}: array({})\n'.format(j, vv.size) - elif isinstance(self.get_block(idx), np.ndarray): - repn = "array({})".format(self.get_block(idx).size) - elif self.get_block(idx) is None: - repn = None + for ndx, block in enumerate(self): + if isinstance(block, BlockVector): + msg += indent + str(ndx) + ': ' + block.__class__.__name__ + str(block.bshape) + '\n' + msg += block._print(indent=indent+' ') else: - raise NotImplementedError("Should not get here") - msg += '{}: {}\n'.format(idx, repn) + msg += indent + str(ndx) + ': ' + block.__class__.__name__ + str(block.shape) + '\n' return msg + def __str__(self): + return self._print(indent='') + def __repr__(self): return '{}{}'.format(self.__class__.__name__, self.bshape) @@ -1249,13 +1242,49 @@ def set_block(self, key, value): self._set_block_size(key, value.size) super(BlockVector, self).__setitem__(key, value) + def _has_equal_structure(self, other): + """ + Parameters + ---------- + other: BlockVector + + Returns + ------- + equal_structure: bool + True if self and other have the same block structure (recursive). False otherwise. + """ + if not isinstance(other, BlockVector): + return False + if self.nblocks != other.nblocks: + return False + for ndx, block1 in enumerate(self): + block2 = other.get_block(ndx) + if isinstance(block1, BlockVector): + if not isinstance(block2, BlockVector): + return False + if not block1._has_equal_structure(block2): + return False + elif isinstance(block2, BlockVector): + return False + return True + def __getitem__(self, item): - raise NotImplementedError('BlockVector does not support __getitem__. ' - 'Use get_block or set_block to access sub-blocks.') + if not self._has_equal_structure(item): + raise ValueError('BlockVector.__getitem__ only accepts slices in the form of BlockVectors of the same structure') + res = BlockVector(self.nblocks) + for ndx, block in self: + res.set_block(ndx, block[item.get_block(ndx)]) def __setitem__(self, key, value): - raise NotImplementedError('BlockVector does not support __setitem__. ' - 'Use get_block or set_block to access sub-blocks.') + if not (self._has_equal_structure(key) and (self._has_equal_structure(value) or np.isscalar(value))): + raise ValueError( + 'BlockVector.__setitem__ only accepts slices in the form of BlockVectors of the same structure') + if np.isscalar(value): + for ndx, block in enumerate(self): + block[key.get_block(ndx)] = value + else: + for ndx, block in enumerate(self): + block[key.get_block(ndx)] = value.get_block(ndx) def _comparison_helper(self, other, operation): assert_block_structure(self) diff --git a/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py b/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py index 5ac1ced8902..954c0ba0411 100644 --- a/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py +++ b/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py @@ -271,11 +271,7 @@ def transpose(self, axes=None, copy=True): n = self.bshape[1] assert_block_structure(self) result = MPIBlockMatrix(n, m, self._rank_owner.T, self._mpiw) - - rows, columns = np.nonzero(self.ownership_mask) - for i, j in zip(rows, columns): - if self.get_block(i, j) is not None: - result.set_block(j, i, self.get_block(i, j).transpose(copy=True)) + result._block_matrix = self._block_matrix.transpose() return result def tocoo(self): @@ -336,6 +332,27 @@ def toarray(self): """ raise RuntimeError('Operation not supported by MPIBlockMatrix') + def to_local_array(self): + """ + This method is only for testing/debugging + + Returns + ------- + result: np.ndarray + """ + local_result = self._block_matrix.copy_structure() + rank = self._mpiw.Get_rank() + block_indices = self._unique_owned_mask if rank != 0 else self._owned_mask + + ii, jj = np.nonzero(block_indices) + for i, j in zip(ii, jj): + if not self._block_matrix.is_empty_block(i, j): + local_result.set_block(i, j, self.get_block(i, j)) + local_result = local_result.toarray() + global_result = np.zeros(shape=local_result.shape, dtype=local_result.dtype) + self._mpiw.Allreduce(local_result, global_result) + return global_result + def is_empty_block(self, idx, jdx): """ Indicates if a block is empty @@ -770,7 +787,8 @@ def _block_vector_multiply(self, other): for row_ndx, col_ndx in zip(*np.nonzero(block_indices)): if self.get_block(row_ndx, col_ndx) is not None: res_blk = local_result.get_block(row_ndx) - res_blk += self.get_block(row_ndx, col_ndx) * other.get_block(col_ndx) + _tmp = self.get_block(row_ndx, col_ndx) * other.get_block(col_ndx) + res_blk = _tmp + res_blk local_result.set_block(row_ndx, res_blk) flat_local = local_result.flatten() flat_global = np.zeros(flat_local.size) diff --git a/pyomo/contrib/pynumero/sparse/mpi_block_vector.py b/pyomo/contrib/pynumero/sparse/mpi_block_vector.py index 34710f19b2a..532055263ae 100644 --- a/pyomo/contrib/pynumero/sparse/mpi_block_vector.py +++ b/pyomo/contrib/pynumero/sparse/mpi_block_vector.py @@ -153,7 +153,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): np.logical_not, np.expm1, np.exp2, np.sign, np.rint, np.square, np.positive, np.negative, np.rad2deg, np.deg2rad, np.conjugate, np.reciprocal, - ] + np.signbit] # functions that take two vectors binary_funcs = [np.add, np.multiply, np.divide, np.subtract, np.greater, np.greater_equal, np.less, np.less_equal, @@ -163,17 +163,15 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): np.logaddexp2, np.remainder, np.heaviside, np.hypot] - args = [input_ for i, input_ in enumerate(inputs)] - outputs = kwargs.pop('out', None) if outputs is not None: raise NotImplementedError(str(ufunc) + ' cannot be used with MPIBlockVector if the out keyword argument is given.') if ufunc in unary_funcs: - results = self._unary_operation(ufunc, method, *args, **kwargs) + results = self._unary_operation(ufunc, method, *inputs, **kwargs) return results elif ufunc in binary_funcs: - results = self._binary_operation(ufunc, method, *args, **kwargs) + results = self._binary_operation(ufunc, method, *inputs, **kwargs) return results else: raise NotImplementedError(str(ufunc) + "not supported for MPIBlockVector") @@ -185,7 +183,7 @@ def _unary_operation(self, ufunc, method, *args, **kwargs): if isinstance(x, MPIBlockVector): rank = self._mpiw.Get_rank() - v = MPIBlockVector(self.nblocks, self._rank_owner, self._mpiw) + v = x.copy_structure() for i in self._owned_blocks: _args = [x.get_block(i)] + [args[j] for j in range(1, len(args))] v.set_block(i, self._unary_operation(ufunc, method, *_args, **kwargs)) @@ -213,7 +211,7 @@ def _binary_operation(self, ufunc, method, *args, **kwargs): assert np.array_equal(x1._rank_owner, x2._rank_owner), msg assert x1._mpiw == x2._mpiw, 'Need to have same communicator' - res = MPIBlockVector(x1.nblocks, x1._rank_owner, self._mpiw) + res = x1.copy_structure() for i in x1._owned_blocks: _args = [x1.get_block(i)] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) @@ -223,13 +221,13 @@ def _binary_operation(self, ufunc, method, *args, **kwargs): elif isinstance(x1, MPIBlockVector) and isinstance(x2, BlockVector): raise RuntimeError('Operation not supported by MPIBlockVector') elif isinstance(x1, MPIBlockVector) and np.isscalar(x2): - res = MPIBlockVector(x1.nblocks, x1._rank_owner, self._mpiw) + res = x1.copy_structure() for i in x1._owned_blocks: _args = [x1.get_block(i)] + [x2] + [args[j] for j in range(2, len(args))] res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) return res elif isinstance(x2, MPIBlockVector) and np.isscalar(x1): - res = MPIBlockVector(x2.nblocks, x2._rank_owner, self._mpiw) + res = x2.copy_structure() for i in x2._owned_blocks: _args = [x1] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) @@ -241,6 +239,8 @@ def _binary_operation(self, ufunc, method, *args, **kwargs): elif isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray): # this will take care of blockvector and ndarrays return self._block_vector.__array_ufunc__(ufunc, method, *args, **kwargs) + elif (type(x1)==BlockVector or np.isscalar(x1)) and (type(x2)==BlockVector or np.isscalar(x2)): + return self._block_vector.__array_ufunc__(ufunc, method, *args, **kwargs) elif (type(x1)==np.ndarray or np.isscalar(x1)) and (type(x2)==np.ndarray or np.isscalar(x2)): return super(MPIBlockVector, self).__array_ufunc__(ufunc, method, *args, **kwargs) @@ -825,11 +825,12 @@ def _create_from_serialized_structure(serialized_structure, structure_ndx, resul for ndx in range(result.nblocks): if serialized_structure[structure_ndx] == -1: structure_ndx += 1 - result.set_block(ndx, BlockVector(serialized_structure[structure_ndx])) + block = BlockVector(serialized_structure[structure_ndx]) structure_ndx += 1 structure_ndx = MPIBlockVector._create_from_serialized_structure(serialized_structure, structure_ndx, - result.get_block(ndx)) + block) + result.set_block(ndx, block) elif serialized_structure[structure_ndx] == -2: structure_ndx += 1 result.set_block(ndx, np.zeros(serialized_structure[structure_ndx])) @@ -938,20 +939,20 @@ def make_local_copy(self): def _binary_operation_helper(self, other, operation): assert_block_structure(self) result = self.copy_structure() - if isinstance(other, MPIBlockVector): + if isinstance(other, MPIBlockVector) or isinstance(other, BlockVector): assert self.nblocks == other.nblocks, \ 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) - assert np.array_equal(self._rank_owner, other._rank_owner), \ - 'MPIBlockVectors must be distributed in same processors' - assert self._mpiw == other._mpiw, 'Need to have same communicator' - + if isinstance(other, MPIBlockVector): + assert np.array_equal(self._rank_owner, other._rank_owner), \ + 'MPIBlockVectors must be distributed in same processors' + assert self._mpiw == other._mpiw, 'Need to have same communicator' for i in self._owned_blocks: result.set_block(i, operation(self.get_block(i), other.get_block(i))) return result - elif isinstance(other, BlockVector): - raise RuntimeError('Operation not supported by MPIBlockVector') elif isinstance(other, np.ndarray): - raise RuntimeError('Operation not supported by MPIBlockVector') + _tmp = self.copy_structure() + _tmp.copyfrom(other) + return self._binary_operation_helper(_tmp, operation) elif np.isscalar(other): for i in self._owned_blocks: result.set_block(i, operation(self.get_block(i), other)) @@ -975,23 +976,26 @@ def _reverse_binary_operation_helper(self, other, operation): def _inplace_binary_operation_helper(self, other, operation): assert_block_structure(self) - if isinstance(other, MPIBlockVector): - assert_block_structure(other) + if isinstance(other, MPIBlockVector) or isinstance(other, BlockVector): assert self.nblocks == other.nblocks, \ 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) - assert np.array_equal(self._rank_owner, other._rank_owner), \ - 'MPIBlockVectors must be distributed in same processors' - assert self._mpiw == other._mpiw, 'Need to have same communicator' + if isinstance(other, MPIBlockVector): + assert np.array_equal(self._rank_owner, other._rank_owner), \ + 'MPIBlockVectors must be distributed in same processors' + assert self._mpiw == other._mpiw, 'Need to have same communicator' + assert_block_structure(other) + else: + block_vector_assert_block_structure(other) for i in self._owned_blocks: blk = self.get_block(i) operation(blk, other.get_block(i)) self.set_block(i, blk) return self - elif isinstance(other, BlockVector): - raise RuntimeError('Operation not supported by MPIBlockVector') elif isinstance(other, np.ndarray): - raise RuntimeError('Operation not supported by MPIBlockVector') + _tmp = self.copy_structure() + _tmp.copyfrom(other) + return self._inplace_binary_operation_helper(_tmp, operation) elif np.isscalar(other): for i in self._owned_blocks: blk = self.get_block(i) @@ -1129,13 +1133,46 @@ def set_block(self, key, value): self._block_vector.set_block(key, value) self._set_block_size(key, value.size) + def _has_equal_structure(self, other): + if not (isinstance(other, MPIBlockVector) or isinstance(other, BlockVector)): + return False + if self.nblocks != other.nblocks: + return False + if isinstance(other, MPIBlockVector): + if (self.owned_blocks != other.owned_blocks).any(): + return False + for ndx in self.owned_blocks: + block1 = self.get_block(ndx) + block2 = other.get_block(ndx) + if isinstance(block1, BlockVector): + if not isinstance(block2, BlockVector): + return False + if not block1._has_equal_structure(block2): + return False + elif isinstance(block2, BlockVector): + return False + return True + def __getitem__(self, item): - raise NotImplementedError('MPIBlockVector does not support __getitem__. ' - 'Use get_block or set_block to access sub-blocks.') + if not self._has_equal_structure(item): + raise ValueError('MIPBlockVector.__getitem__ only accepts slices in the form of MPIBlockVectors of the same structure') + res = self.copy_structure() + for ndx in self.owned_blocks: + block = self.get_block(ndx) + res.set_block(ndx, block[item.get_block(ndx)]) def __setitem__(self, key, value): - raise NotImplementedError('MPIBlockVector does not support __setitem__. ' - 'Use get_block or set_block to access sub-blocks.') + if not (self._has_equal_structure(key) and (self._has_equal_structure(value) or np.isscalar(value))): + raise ValueError( + 'MPIBlockVector.__setitem__ only accepts slices in the form of MPIBlockVectors of the same structure') + if np.isscalar(value): + for ndx in self.owned_blocks: + block = self.get_block(ndx) + block[key.get_block(ndx)] = value + else: + for ndx in self.owned_blocks: + block = self.get_block(ndx) + block[key.get_block(ndx)] = value.get_block(ndx) def __str__(self): msg = '{}{}:\n'.format(self.__class__.__name__, self.bshape) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py b/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py index ab55b064987..580e172475a 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py @@ -913,3 +913,15 @@ def test_dimensions(self): self.assertTrue(np.all(bm.col_block_sizes() == np.ones(2)*4)) self.assertTrue(np.all(bm.row_block_sizes(copy=False) == np.ones(2)*4)) self.assertTrue(np.all(bm.col_block_sizes(copy=False) == np.ones(2)*4)) + + def test_transpose_with_empty_rows(self): + m = BlockMatrix(2, 2) + m.set_row_size(0, 2) + m.set_row_size(1, 2) + m.set_col_size(0, 2) + m.set_col_size(1, 2) + mt = m.transpose() + self.assertEqual(mt.get_row_size(0), 2) + self.assertEqual(mt.get_row_size(1), 2) + self.assertEqual(mt.get_col_size(0), 2) + self.assertEqual(mt.get_col_size(1), 2) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py b/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py index 34a3c87cc02..d6aebd6a049 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py @@ -1151,5 +1151,20 @@ def test_binary_ufuncs(self): res = fun(v, v2) self.assertTrue(np.allclose(flat_res, res.flatten())) + def test_min_with_empty_blocks(self): + b = BlockVector(3) + b.set_block(0, np.zeros(3)) + b.set_block(1, np.zeros(0)) + b.set_block(2, np.zeros(3)) + self.assertEqual(b.min(), 0) + + def test_max_with_empty_blocks(self): + b = BlockVector(3) + b.set_block(0, np.zeros(3)) + b.set_block(1, np.zeros(0)) + b.set_block(2, np.zeros(3)) + self.assertEqual(b.max(), 0) + + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py index a2568e0d3db..db6fd4ce836 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py @@ -538,15 +538,6 @@ def test_add(self): self.assertTrue(np.allclose(np.arange(4)*2, res.get_block(1))) self.assertTrue(np.allclose(np.arange(2)*2, res.get_block(2))) - bv = BlockVector(3) - bv.set_blocks([np.arange(3), np.arange(4), np.arange(2)]) - - with self.assertRaises(Exception) as context: - res = v + bv - - with self.assertRaises(Exception) as context: - res = bv + v - res = v + 5.0 self.assertTrue(isinstance(res, MPIBlockVector)) self.assertEqual(3, res.nblocks) @@ -573,11 +564,6 @@ def test_add(self): self.assertTrue(np.allclose(np.arange(4) + 5.0, res.get_block(1))) self.assertTrue(np.allclose(np.arange(2) + 5.0, res.get_block(2))) - with self.assertRaises(Exception) as context: - res = v + bv.flatten() - with self.assertRaises(Exception) as context: - res = bv.flatten() + v - def test_sub(self): v = MPIBlockVector(3, [0,1,-1], comm) rank = comm.Get_rank() @@ -601,15 +587,6 @@ def test_sub(self): self.assertTrue(np.allclose(np.zeros(4), res.get_block(1))) self.assertTrue(np.allclose(np.zeros(2), res.get_block(2))) - bv = BlockVector(3) - bv.set_blocks([np.arange(3), np.arange(4), np.arange(2)]) - - with self.assertRaises(Exception) as context: - res = bv - v - - with self.assertRaises(Exception) as context: - res = v - bv - res = 5.0 - v self.assertTrue(isinstance(res, MPIBlockVector)) self.assertEqual(3, res.nblocks) @@ -636,11 +613,6 @@ def test_sub(self): self.assertTrue(np.allclose(np.arange(4) - 5.0, res.get_block(1))) self.assertTrue(np.allclose(np.arange(2) - 5.0, res.get_block(2))) - with self.assertRaises(Exception) as context: - res = v - bv.flatten() - with self.assertRaises(Exception) as context: - res = bv.flatten() - v - def test_mul(self): v = MPIBlockVector(3, [0,1,-1], comm) rank = comm.Get_rank() @@ -664,15 +636,6 @@ def test_mul(self): self.assertTrue(np.allclose(np.arange(4) * np.arange(4), res.get_block(1))) self.assertTrue(np.allclose(np.arange(2) * np.arange(2), res.get_block(2))) - bv = BlockVector(3) - bv.set_blocks([np.arange(3), np.arange(4), np.arange(2)]) - - with self.assertRaises(Exception) as context: - res = v * bv - - with self.assertRaises(Exception) as context: - res = bv * v - res = v * 2.0 self.assertTrue(isinstance(res, MPIBlockVector)) self.assertEqual(3, res.nblocks) @@ -699,11 +662,6 @@ def test_mul(self): self.assertTrue(np.allclose(np.arange(4) * 2.0, res.get_block(1))) self.assertTrue(np.allclose(np.arange(2) * 2.0, res.get_block(2))) - with self.assertRaises(Exception) as context: - res = v * bv.flatten() - with self.assertRaises(Exception) as context: - res = bv.flatten() * v - def test_truediv(self): v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() @@ -727,16 +685,6 @@ def test_truediv(self): self.assertTrue(np.allclose(np.ones(4), res.get_block(1))) self.assertTrue(np.allclose(np.ones(2), res.get_block(2))) - bv = BlockVector(3) - bv.set_blocks([np.arange(3) + 1.0, - np.arange(4) + 1.0, - np.arange(2) + 1.0]) - with self.assertRaises(Exception) as context: - res = v / bv - - with self.assertRaises(Exception) as context: - res = bv / v - res = v / 2.0 self.assertTrue(isinstance(res, MPIBlockVector)) self.assertEqual(3, res.nblocks) @@ -763,12 +711,6 @@ def test_truediv(self): self.assertTrue(np.allclose(2.0/(np.arange(4) + 1.0), res.get_block(1))) self.assertTrue(np.allclose(2.0/(np.arange(2) + 1.0), res.get_block(2))) - with self.assertRaises(Exception) as context: - res = v / bv.flatten() - - with self.assertRaises(Exception) as context: - res = bv.flatten() / v - def test_floordiv(self): v = MPIBlockVector(3, [0,1,-1], comm) @@ -798,11 +740,6 @@ def test_floordiv(self): np.arange(4) + 1.0, np.arange(2) + 1.0]) - with self.assertRaises(Exception) as context: - res = v // bv - with self.assertRaises(Exception) as context: - res = bv // v - res1 = v // 2.0 res2 = bv // 2.0 self.assertTrue(isinstance(res1, MPIBlockVector)) @@ -831,11 +768,6 @@ def test_floordiv(self): self.assertTrue(np.allclose(res1.get_block(1), res2.get_block(1))) self.assertTrue(np.allclose(res1.get_block(2), res2.get_block(2))) - with self.assertRaises(Exception) as context: - res = v // bv.flatten() - with self.assertRaises(Exception) as context: - res = bv.flatten() // v - def test_isum(self): v = MPIBlockVector(3, [0,1,-1], comm) @@ -865,14 +797,6 @@ def test_isum(self): v.set_block(2, np.arange(2)) v.broadcast_block_sizes() - bv = BlockVector(3) - bv.set_blocks([np.arange(3), np.arange(4), np.arange(2)]) - - with self.assertRaises(Exception) as context: - v += bv - with self.assertRaises(Exception) as context: - v += bv.flatten() - v = MPIBlockVector(3, [0,1,-1], comm) rank = comm.Get_rank() if rank == 0: @@ -920,15 +844,6 @@ def test_isub(self): v.set_block(2, np.arange(2)) v.broadcast_block_sizes() - bv = BlockVector(3) - bv.set_blocks([np.arange(3), np.arange(4), np.arange(2)]) - - with self.assertRaises(Exception) as context: - v -= bv - - with self.assertRaises(Exception) as context: - v -= bv.flatten() - v = MPIBlockVector(3, [0,1,-1], comm) rank = comm.Get_rank() if rank == 0: @@ -976,14 +891,6 @@ def test_imul(self): v.set_block(2, np.arange(2)) v.broadcast_block_sizes() - bv = BlockVector(3) - bv.set_blocks([np.arange(3), np.arange(4), np.arange(2)]) - - with self.assertRaises(Exception) as context: - v *= bv - with self.assertRaises(Exception) as context: - v *= bv.flatten() - v = MPIBlockVector(3, [0,1,-1], comm) rank = comm.Get_rank() if rank == 0: @@ -1031,16 +938,6 @@ def test_itruediv(self): v.set_block(2, np.arange(2) + 1.0) v.broadcast_block_sizes() - bv = BlockVector(3) - bv.set_blocks([np.arange(3) + 1.0, - np.arange(4) + 1.0, - np.arange(2) + 1.0]) - - with self.assertRaises(Exception) as context: - v /= bv - with self.assertRaises(Exception) as context: - v /= bv.flatten() - v = MPIBlockVector(3, [0,1,-1], comm) rank = comm.Get_rank() if rank == 0: diff --git a/pyomo/contrib/pynumero/src/CMakeLists.txt b/pyomo/contrib/pynumero/src/CMakeLists.txt index 22c65b86e54..001e1319175 100644 --- a/pyomo/contrib/pynumero/src/CMakeLists.txt +++ b/pyomo/contrib/pynumero/src/CMakeLists.txt @@ -33,6 +33,8 @@ FIND_LIBRARY(DL_LIBRARY dl) SET(IPOPT_DIR "" CACHE PATH "Path to compiled Ipopt installation") SET(AMPLMP_DIR "" CACHE PATH "Path to compiled AMPL/MP installation") #SET(ASL_NETLIB_DIR "" CACHE PATH "Path to compiled ASL (netlib) installation") +SET(MA27_OBJECT "" CACHE FILEPATH + "Path to compiled ma27d.o object. Must be compiled with -fPIC.") # Use pkg-config to get the ASL/HSL directories from the Ipopt/COIN-OR build FIND_PACKAGE(PkgConfig) @@ -44,6 +46,13 @@ IF( PKG_CONFIG_FOUND ) SET(ENV{PKG_CONFIG_PATH} "${_TMP}") ENDIF() +# cmake does not search LD_LIBRARY_PATH by default. So that libraries +# like HSL can be added through mechanisms like 'environment modules', +# we will explicitly add LD_LIBRARY_PATH to teh search path +string(REPLACE ":" ";" LD_LIBRARY_DIR_LIST + $ENV{LD_LIBRARY_PATH}:$ENV{DYLD_LIBRARY_PATH} + ) + # Note: the directory search order is intentional: first the modules we # are creating, then directories specifically set by the user, and # finally automatically located installations (e.g., from pkg-config) @@ -62,13 +71,32 @@ FIND_LIBRARY(ASL_LIBRARY NAMES coinasl asl "${AMPLMP_DIR}/lib" "${PC_COINASL_LIBDIR}" "${PC_COINASL_LIBRARY_DIRS}" + ${LD_LIBRARY_DIR_LIST} ) -FIND_LIBRARY(HSL_LIBRARY NAMES coinhsl libcoinhsl +FIND_LIBRARY(MA27_LIBRARY NAMES coinhsl libcoinhsl ma27 libma27 HINTS "${CMAKE_INSTALL_PREFIX}/lib" "${IPOPT_DIR}/lib" "${PC_COINHSL_LIBDIR}" "${PC_COINHSL_LIBRARY_DIRS}" + "${MA27_DIR}" + "${MA27_DIR}/lib" + ${LD_LIBRARY_DIR_LIST} ) +FIND_LIBRARY(MA57_LIBRARY NAMES coinhsl libcoinhsl ma57 libma57 + HINTS "${CMAKE_INSTALL_PREFIX}/lib" + "${IPOPT_DIR}/lib" + "${PC_COINHSL_LIBDIR}" + "${PC_COINHSL_LIBRARY_DIRS}" + "${MA57_DIR}" + "${MA57_DIR}/lib" + ${LD_LIBRARY_DIR_LIST} +) + +# If we were able to find the HSL, we will automatically enable the ma27 +# interface, as all versions of the HSL library contain ma27. +IF( MA27_LIBRARY OR MA27_OBJECT ) + set_property(CACHE BUILD_MA27 PROPERTY VALUE ON) +ENDIF() # If BUILD_AMPLMP_IF_NEEDED is set and we couldn't find / weren't # pointed to an ASL build, then we will forcibly enable the AMPLMP build @@ -92,6 +120,7 @@ IF( BUILD_AMPLMP ) # 3.1.0 needs to be patched to compile with recent compilers, # notably ubuntu 18.04. The patch applies a backport of fmtlib/fmt # abbefd7; see https://github.com/fmtlib/fmt/issues/398 + # The patch also disables AMPL/MP tests to speed up compilation. PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/amplmp-${AMPLMP_TAG}.patch ) @@ -139,14 +168,24 @@ IF( BUILD_ASL ) ENDIF() ENDIF() +# +# build hsl interfaces +# set(PYNUMERO_MA27_SOURCES "ma27Interface.cpp" - "ma27Interface.hpp" ) IF( BUILD_MA27 ) ADD_LIBRARY( pynumero_MA27 SHARED ${PYNUMERO_MA27_SOURCES} ) - TARGET_LINK_LIBRARIES( pynumero_MA27 ${HSL_LIBRARY} ) + IF( MA27_OBJECT ) + TARGET_LINK_LIBRARIES( pynumero_MA27 ${MA27_OBJECT} ) + ELSE() + TARGET_LINK_LIBRARIES( pynumero_MA27 ${MA27_LIBRARY} ) + ENDIF() + if ( DL_LIBRARY ) + TARGET_LINK_LIBRARIES( pynumero_ASL PUBLIC ${DL_LIBRARY} ) + ENDIF() + TARGET_COMPILE_DEFINITIONS( pynumero_MA27 PRIVATE BUILDING_PYNUMERO_MA27 ) SET_TARGET_PROPERTIES( pynumero_MA27 PROPERTIES ENABLE_EXPORTS 1 ) INSTALL(TARGETS pynumero_MA27 LIBRARY DESTINATION lib RUNTIME DESTINATION lib ) @@ -154,12 +193,15 @@ ENDIF() set(PYNUMERO_MA57_SOURCES "ma57Interface.cpp" - "ma57Interface.hpp" ) IF( BUILD_MA57 ) ADD_LIBRARY( pynumero_MA57 SHARED ${PYNUMERO_MA57_SOURCES} ) - TARGET_LINK_LIBRARIES( pynumero_MA57 ${HSL_LIBRARY} ) + TARGET_LINK_LIBRARIES( pynumero_MA57 ${MA57_LIBRARY} ) + if ( DL_LIBRARY ) + TARGET_LINK_LIBRARIES( pynumero_ASL PUBLIC ${DL_LIBRARY} ) + ENDIF() + TARGET_COMPILE_DEFINITIONS( pynumero_MA27 PRIVATE BUILDING_PYNUMERO_MA57 ) SET_TARGET_PROPERTIES( pynumero_MA57 PROPERTIES ENABLE_EXPORTS 1 ) INSTALL(TARGETS pynumero_MA57 LIBRARY DESTINATION lib RUNTIME DESTINATION lib ) diff --git a/pyomo/contrib/pynumero/src/ma27Interface.cpp b/pyomo/contrib/pynumero/src/ma27Interface.cpp new file mode 100644 index 00000000000..624c7edd6f3 --- /dev/null +++ b/pyomo/contrib/pynumero/src/ma27Interface.cpp @@ -0,0 +1,285 @@ +#include +#include +#include +#include + +// This would normally be in a header file, but as we do not need one, +// we will explicitly include it here. +#if defined(_WIN32) || defined(_WIN64) +# if defined(BUILDING_PYNUMERO_MA27) +# define PYNUMERO_HSL_EXPORT __declspec(dllexport) +# else +# define PYNUMERO_HSL_EXPORT __declspec(dllimport) +# endif +#else +# define PYNUMERO_HSL_EXPORT +#endif + +// Forward declaration of MA27 fortran routines +extern "C" { + void ma27id_(int* ICNTL, double* CNTL); + void ma27ad_(int *N, int *NZ, int *IRN, int* ICN, + int *IW, int* LIW, int* IKEEP, int *IW1, + int* NSTEPS, int* IFLAG, int* ICNTL, + double* CNTL, int *INFO, double* OPS); + void ma27bd_(int *N, int *NZ, int *IRN, int* ICN, + double* A, int* LA, int* IW, int* LIW, + int* IKEEP, int* NSTEPS, int* MAXFRT, + int* IW1, int* ICNTL, double* CNTL, + int* INFO); + void ma27cd_(int *N, double* A, int* LA, int* IW, + int* LIW, double* W, int* MAXFRT, + double* RHS, int* IW1, int* NSTEPS, + int* ICNTL, int* INFO); +} // extern "C" + +void abort_bad_memory(int status) { + printf("Bad memory allocation in MA27 C interface. Aborting."); + exit(status); +} + + +struct MA27_struct { + // Constructor: set defaults, initialize cached arrays to NULL + MA27_struct(): + LA(0), + LIW_a(0), + LIW_b(0), + NSTEPS(0), + IFLAG(0), + MAXFRT(0), + IW_factor(1.2), + A_factor(2.0), + OPS(0), + IW_a(NULL), + IW_b(NULL), + IKEEP(NULL), + A(NULL) + { + ma27id_(this->ICNTL, this->CNTL); + } + // Destructor: delete all cached arrays + virtual ~MA27_struct() { + if ( this->A ) { + delete[] this->A; + } + if ( this->IW_a ) { + delete[] this->IW_a; + } + if ( this->IW_b ) { + delete[] this->IW_b; + } + if ( this->IKEEP ) { + delete[] this->IKEEP; + } + } + + int LA, LIW_a, LIW_b, NSTEPS, IFLAG, MAXFRT; + double IW_factor, A_factor, OPS; + int* IW_a; + int* IW_b; + // Use different arrays for IW that is sent to MA27A and that sent to + // MA27B because IW must be discarded after MA27A but kept after MA27B. + // If these arrays are the same, and a symbolic factorization is performed + // after a numeric factorization (e.g. on a new matrix), user-defined + // and MA27B-defined allocations of IW can be conflated. + int* IKEEP; + double* A; + int ICNTL[30], INFO[20]; + double CNTL[5]; +}; + +extern "C" { + + PYNUMERO_HSL_EXPORT + MA27_struct* new_MA27_struct(void) { + MA27_struct* ma27 = new MA27_struct; + if (ma27 == NULL) { abort_bad_memory(1); } + // Return pointer to ma27 that Python program can pass to other + // functions in this code + return ma27; + } + + + PYNUMERO_HSL_EXPORT + void free_MA27_struct(MA27_struct* ma27) { + delete ma27; + } + + // Functions for setting/accessing INFO/CNTL arrays: + PYNUMERO_HSL_EXPORT + void set_icntl(MA27_struct* ma27, int i, int val) { + ma27->ICNTL[i] = val; + } + + PYNUMERO_HSL_EXPORT + int get_icntl(MA27_struct* ma27, int i) { + return ma27->ICNTL[i]; + } + + PYNUMERO_HSL_EXPORT + void set_cntl(MA27_struct* ma27, int i, double val) { + ma27->CNTL[i] = val; + } + + PYNUMERO_HSL_EXPORT + double get_cntl(MA27_struct* ma27, int i) { + return ma27->CNTL[i]; + } + + PYNUMERO_HSL_EXPORT + int get_info(MA27_struct* ma27, int i) { + return ma27->INFO[i]; + } + + // Functions for allocating WORK/FACT arrays: + PYNUMERO_HSL_EXPORT + void alloc_iw_a(MA27_struct* ma27, int l) { + if ( ma27->IW_a ) { + delete[] ma27->IW_a; + } + ma27->LIW_a = l; + ma27->IW_a = new int[l]; + if (ma27->IW_a == NULL) { abort_bad_memory(1); } + } + + PYNUMERO_HSL_EXPORT + void alloc_iw_b(MA27_struct* ma27, int l) { + if ( ma27->IW_b ) { + delete[] ma27->IW_b; + } + ma27->LIW_b = l; + ma27->IW_b = new int[l]; + if (ma27->IW_b == NULL) { abort_bad_memory(1); } + } + + PYNUMERO_HSL_EXPORT + void alloc_a(MA27_struct* ma27, int l) { + if ( ma27->A ) { + delete[] ma27->A; + } + ma27->LA = l; + ma27->A = new double[l]; + if (ma27->A == NULL) { abort_bad_memory(1); } + } + + PYNUMERO_HSL_EXPORT + void do_symbolic_factorization(MA27_struct* ma27, int N, int NZ, + int* IRN, int* ICN) { + // Arrays, presumably supplied from Python, are assumed to have base- + // zero indices. Convert to base-one before sending to Fortran. + for (int i=0; iIW_a ) { + int min_size = 2*NZ + 3*N + 1; + int size = (int)(ma27->IW_factor*min_size); + alloc_iw_a(ma27, size); + } + + if ( ma27->IKEEP ) { + delete[] ma27->IKEEP; + } + ma27->IKEEP = new int[3*N]; + if (ma27->IKEEP == NULL) { abort_bad_memory(1); } + int* IW1 = new int[2*N]; + if (IW1 == NULL) { abort_bad_memory(1); } + + ma27ad_(&N, + &NZ, + IRN, + ICN, + ma27->IW_a, + &(ma27->LIW_a), + ma27->IKEEP, + IW1, + &(ma27->NSTEPS), + &(ma27->IFLAG), + ma27->ICNTL, + ma27->CNTL, + ma27->INFO, + &(ma27->OPS)); + + delete[] IW1; + delete[] ma27->IW_a; + ma27->IW_a = NULL; + } + + PYNUMERO_HSL_EXPORT + void do_numeric_factorization(MA27_struct* ma27, int N, int NZ, + int* IRN, int* ICN, double* A) { + + // Convert indices to base-one for Fortran + for (int i=0; iA ) { + int info5 = ma27->INFO[5-1]; + int size = (int)(ma27->A_factor*info5); + alloc_a(ma27, size); + // A is now allocated + } + // Regardless of ma27->A's previous allocation status, copy values from A. + memcpy(ma27->A, A, NZ*sizeof(double)); + + if ( ! ma27->IW_b ) { + int info6 = ma27->INFO[6-1]; + int size = (int)(ma27->IW_factor*info6); + alloc_iw_b(ma27, size); + } + + int* IW1 = new int[N]; + if (IW1 == NULL) { abort_bad_memory(1); } + + ma27bd_(&N, + &NZ, + IRN, + ICN, + ma27->A, + &(ma27->LA), + ma27->IW_b, + &(ma27->LIW_b), + ma27->IKEEP, + &(ma27->NSTEPS), + &(ma27->MAXFRT), + IW1, + ma27->ICNTL, + ma27->CNTL, + ma27->INFO); + + delete[] IW1; + } + + PYNUMERO_HSL_EXPORT + void do_backsolve(MA27_struct* ma27, int N, double* RHS) { + + double* W = new double[ma27->MAXFRT]; + if (W == NULL) { abort_bad_memory(1); } + int* IW1 = new int[ma27->NSTEPS]; + if (IW1 == NULL) { abort_bad_memory(1); } + + ma27cd_( + &N, + ma27->A, + &(ma27->LA), + ma27->IW_b, + &(ma27->LIW_b), + W, + &(ma27->MAXFRT), + RHS, + IW1, + &(ma27->NSTEPS), + ma27->ICNTL, + ma27->INFO + ); + + delete[] IW1; + delete[] W; + } + +} // extern "C" diff --git a/pyomo/contrib/pynumero/src/ma57Interface.cpp b/pyomo/contrib/pynumero/src/ma57Interface.cpp new file mode 100644 index 00000000000..99b98ef6215 --- /dev/null +++ b/pyomo/contrib/pynumero/src/ma57Interface.cpp @@ -0,0 +1,411 @@ +#include +#include +#include + +// This would normally be in a header file, but as we do not need one, +// we will explicitly include it here. +#if defined(_WIN32) || defined(_WIN64) +# if defined(BUILDING_PYNUMERO_MA57) +# define PYNUMERO_HSL_EXPORT __declspec(dllexport) +# else +# define PYNUMERO_HSL_EXPORT __declspec(dllimport) +# endif +#else +# define PYNUMERO_HSL_EXPORT +#endif + +// Forward declaration of MA57 fortran routines +extern "C" { + void ma57id_(double* CNTL, int* ICNTL); + void ma57ad_(int *N, int *NE, const int *IRN, const int* JCN, + int *LKEEP, int* KEEP, int* IWORK, int *ICNTL, + int* INFO, double* RINFO); + void ma57bd_(int *N, int *NE, double* A, double* FACT, int* LFACT, + int* IFACT, int* LIFACT, int* LKEEP, int* KEEP, int* IWORK, + int* ICNTL, double* CNTL, int* INFO, double* RINFO); + void ma57cd_(int* JOB, int *N, double* FACT, int* LFACT, + int* IFACT, int* LIFACT, int* NRHS, double* RHS, + int* LRHS, double* WORK, int* LWORK, int* IWORK, + int* ICNTL, int* INFO); + void ma57dd_(int* JOB, int *N, int *NE, int *IRN, int *JCN, + double *FACT, int *LFACT, int *IFACT, int *LIFACT, + double *RHS, double *X, double *RESID, double *WORK, + int *IWORK, int *ICNTL, double *CNTL, int *INFO, + double *RINFO); + void ma57ed_(int *N, int* IC, int* KEEP, double* FACT, int* LFACT, + double* NEWFAC, int* LNEW, int* IFACT, int* LIFACT, + int* NEWIFC, int* LINEW, int* INFO); +} // extern "C" + +void abort_bad_memory(int status){ + printf("Bad memory allocation in MA57 C interface. Aborting."); + exit(status); +} + + +struct MA57_struct { + MA57_struct(): + LKEEP(0), LIFACT(0), LWORK(0), LFACT(0), + LRHS(0), NRHS(0), JOB(0), + NRHS_set(false), + LRHS_set(false), + JOB_set(false), + WORK_factor(1.2), + FACT_factor(2.0), + IFACT_factor(2.0), + KEEP(NULL), + IFACT(NULL), + WORK(NULL), + FACT(NULL) + { + ma57id_(this->CNTL, this->ICNTL); + } + virtual ~MA57_struct() { + if ( this->WORK ) { + delete[] this->WORK; + } + if ( this->FACT ) { + delete[] this->FACT; + } + if ( this->IFACT ) { + delete[] this->IFACT; + } + if ( this->KEEP ) { + delete[] this->KEEP; + } + } + + int LKEEP, LIFACT, LWORK, LFACT, LRHS, NRHS, JOB; + bool NRHS_set, LRHS_set, JOB_set; + double WORK_factor, FACT_factor, IFACT_factor; + int* KEEP; + int* IFACT; + double* WORK; + double* FACT; + int ICNTL[20], INFO[40]; + double CNTL[5], RINFO[20]; +}; + +extern "C" { + + PYNUMERO_HSL_EXPORT + MA57_struct* new_MA57_struct(void){ + + MA57_struct* ma57 = new MA57_struct; + if (ma57 == NULL) { abort_bad_memory(1); } + // Return pointer to ma57 that Python program can pass to other + // functions in this code + return ma57; + } + + PYNUMERO_HSL_EXPORT + void free_MA57_struct(MA57_struct* ma57) { + delete ma57; + } + + // Functions for setting/accessing INFO/CNTL arrays: + PYNUMERO_HSL_EXPORT + void set_icntl(MA57_struct* ma57, int i, int val) { + ma57->ICNTL[i] = val; + } + + PYNUMERO_HSL_EXPORT + int get_icntl(MA57_struct* ma57, int i) { + return ma57->ICNTL[i]; + } + + PYNUMERO_HSL_EXPORT + void set_cntl(MA57_struct* ma57, int i, double val) { + ma57->CNTL[i] = val; + } + + PYNUMERO_HSL_EXPORT + double get_cntl(MA57_struct* ma57, int i) { + return ma57->CNTL[i]; + } + + PYNUMERO_HSL_EXPORT + int get_info(MA57_struct* ma57, int i) { + return ma57->INFO[i]; + } + + PYNUMERO_HSL_EXPORT + double get_rinfo(MA57_struct* ma57, int i) { + return ma57->RINFO[i]; + } + + // Functions for allocating WORK/FACT arrays: + PYNUMERO_HSL_EXPORT + void alloc_keep(MA57_struct* ma57, int l) { + if ( ma57->KEEP ) { + delete[] ma57->KEEP; + } + ma57->LKEEP = l; + ma57->KEEP = new int[l]; + if (ma57->KEEP == NULL) { abort_bad_memory(1); } + } + + PYNUMERO_HSL_EXPORT + void alloc_work(MA57_struct* ma57, int l) { + if ( ma57->WORK ) { + delete[] ma57->WORK; + } + ma57->LWORK = l; + ma57->WORK = new double[l]; + if (ma57->WORK == NULL) { abort_bad_memory(1); } + } + + PYNUMERO_HSL_EXPORT + void alloc_fact(MA57_struct* ma57, int l) { + if ( ma57->FACT ) { + delete[] ma57->FACT; + } + ma57->LFACT = l; + ma57->FACT = new double[l]; + if (ma57->FACT == NULL) { abort_bad_memory(1); } + } + + PYNUMERO_HSL_EXPORT + void alloc_ifact(MA57_struct* ma57, int l) { + if ( ma57->IFACT ) { + delete[] ma57->IFACT; + } + ma57->LIFACT = l; + ma57->IFACT = new int[l]; + if (ma57->IFACT == NULL) { abort_bad_memory(1); } + } + + // Functions for specifying dimensions of RHS: + PYNUMERO_HSL_EXPORT + void set_nrhs(MA57_struct* ma57, int n) { + ma57->NRHS = n; + ma57->NRHS_set = true; + } + + PYNUMERO_HSL_EXPORT + void set_lrhs(MA57_struct* ma57, int l) { + ma57->LRHS = l; + ma57->LRHS_set = true; + } + + // Specify what job to be performed - maybe make an arg to functions + PYNUMERO_HSL_EXPORT + void set_job(MA57_struct* ma57, int j) { + ma57->JOB = j; + ma57->JOB_set = true; + } + + + PYNUMERO_HSL_EXPORT + void do_symbolic_factorization(MA57_struct* ma57, int N, int NE, + int* IRN, int* JCN) { + + // Arrays, presumably supplied from Python, are assumed to have base- + // zero indices. Convert to base-one before sending to Fortran. + for (int i=0; iKEEP ) { + // KEEP must be >= 5*N+NE+MAX(N,NE)+42 + int size = 5*N + NE + (NE + N) + 42; + alloc_keep(ma57, size); + } + + // This is a hard requirement, no need to give the user the option + // to change + int* IWORK = new int[5*N]; + if (IWORK == NULL) { abort_bad_memory(1); } + + ma57ad_(&N, &NE, IRN, JCN, + &(ma57->LKEEP), ma57->KEEP, + IWORK, ma57->ICNTL, + ma57->INFO, ma57->RINFO); + + delete[] IWORK; + } + + + PYNUMERO_HSL_EXPORT + void do_numeric_factorization(MA57_struct* ma57, int N, int NE, + double* A) { + + // Get memory estimates from INFO, allocate FACT and IFACT + if ( ! ma57->FACT ) { + int info9 = ma57->INFO[9-1]; + int size = (int)(ma57->FACT_factor*info9); + alloc_fact(ma57, size); + } + if ( ! ma57->IFACT ) { + int info10 = ma57->INFO[10-1]; + int size = (int)(ma57->IFACT_factor*info10); + alloc_ifact(ma57, size); + } + + // Again, length of IWORK is a hard requirement + int* IWORK = new int[N]; + if (IWORK == NULL) { abort_bad_memory(1); } + + ma57bd_(&N, &NE, A, + ma57->FACT, &(ma57->LFACT), + ma57->IFACT, &(ma57->LIFACT), + &(ma57->LKEEP), ma57->KEEP, + IWORK, ma57->ICNTL, + ma57->CNTL, ma57->INFO, + ma57->RINFO); + + delete[] IWORK; + } + + + PYNUMERO_HSL_EXPORT + void do_backsolve(MA57_struct* ma57, int N, double* RHS) { + + // Set number and length (principal axis) of RHS if not already set + if (!ma57->NRHS_set) { + set_nrhs(ma57, 1); + } + if (!ma57->LRHS_set) { + set_lrhs(ma57, N); + } + + // Set JOB. Default is to perform full factorization + if (!ma57->JOB_set) { + set_job(ma57, 1); + } + + // Allocate WORK if not done. Should be >= N + if ( ! ma57->WORK ) { + int size = (int)(ma57->WORK_factor*ma57->NRHS*N); + alloc_work(ma57, size); + } + + // IWORK should always be length N + int* IWORK = new int[N]; + if (IWORK == NULL) { abort_bad_memory(1); } + + ma57cd_( + &(ma57->JOB), + &N, + ma57->FACT, + &(ma57->LFACT), + ma57->IFACT, + &(ma57->LIFACT), + &(ma57->NRHS), + RHS, + &(ma57->LRHS), + ma57->WORK, + &(ma57->LWORK), + IWORK, + ma57->ICNTL, + ma57->INFO + ); + + delete[] IWORK; + delete[] ma57->WORK; + ma57->WORK = NULL; + } + + + PYNUMERO_HSL_EXPORT + void do_iterative_refinement(MA57_struct* ma57, int N, int NE, + double* A, int* IRN, int* JCN, double* RHS, double* X, double* RESID) { + // Number of steps of iterative refinement can be controlled with ICNTL[9-1] + + // Set JOB if not set. Controls how (whether) X and RESID will be used + if (!ma57->JOB_set) { + set_job(ma57, 1); + } + + // Need to allocate WORK differently depending on ICNTL options + if ( ! ma57->WORK ) { + int icntl9 = ma57->ICNTL[9-1]; + int icntl10 = ma57->ICNTL[10-1]; + int size; + if (icntl9 == 1) { + size = (int)(ma57->WORK_factor*N); + } else if (icntl9 > 1 && icntl10 == 0) { + size = (int)(ma57->WORK_factor*3*N); + } else if (icntl9 > 1 && icntl10 > 0) { + size = (int)(ma57->WORK_factor*4*N); + } + alloc_work(ma57, size); + } + + int* IWORK = new int[N]; + if (IWORK == NULL) { abort_bad_memory(1); } + + ma57dd_( + &(ma57->JOB), + &N, + &NE, + IRN, + JCN, + ma57->FACT, + &(ma57->LFACT), + ma57->IFACT, + &(ma57->LIFACT), + RHS, + X, + RESID, + ma57->WORK, + IWORK, + ma57->ICNTL, + ma57->CNTL, + ma57->INFO, + ma57->RINFO + ); + + delete[] IWORK; + delete[] ma57->WORK; + ma57->WORK = NULL; + } + + + PYNUMERO_HSL_EXPORT + void do_reallocation(MA57_struct* ma57, int N, double realloc_factor, int IC) { + // Need realloc_factor > 1 here + + // MA57 seems to require that both LNEW and LINEW are larger than the old + // values, regardless of which is being reallocated (set by IC) + int LNEW = (int)(realloc_factor*ma57->LFACT); + double* NEWFAC = new double[LNEW]; + if (NEWFAC == NULL) { abort_bad_memory(1); } + + int LINEW = (int)(realloc_factor*ma57->LIFACT); + int* NEWIFC = new int[LINEW]; + if (NEWIFC == NULL) { abort_bad_memory(1); } + + ma57ed_( + &N, + &IC, + ma57->KEEP, + ma57->FACT, + &(ma57->LFACT), + NEWFAC, + &LNEW, + ma57->IFACT, + &(ma57->LIFACT), + NEWIFC, + &LINEW, + ma57->INFO + ); + + if (IC <= 0) { + // Copied real array; new int array is garbage + delete[] ma57->FACT; + ma57->LFACT = LNEW; + ma57->FACT = NEWFAC; + delete[] NEWIFC; + } else if (IC >= 1) { + // Copied int array; new real array is garbage + delete[] ma57->IFACT; + ma57->LIFACT = LINEW; + ma57->IFACT = NEWIFC; + delete[] NEWFAC; + } // Now either FACT or IFACT, whichever was specified by IC, can be used + // as normal in MA57B/C/D + } + +} // extern "C" diff --git a/pyomo/contrib/satsolver/satsolver.py b/pyomo/contrib/satsolver/satsolver.py index a220d874a26..8352353eb91 100644 --- a/pyomo/contrib/satsolver/satsolver.py +++ b/pyomo/contrib/satsolver/satsolver.py @@ -277,20 +277,20 @@ def exitNode(self, node, data): raise NotImplementedError(str(type(node)) + " expression not handled by z3 interface") return ans - def beforeChild(self, node, child): + def beforeChild(self, node, child, child_idx): if type(child) in nonpyomo_leaf_types: # This means the child is POD # i.e., int, float, string return False, str(child) - elif child.is_variable_type(): - return False, str(self.variable_label_map.getSymbol(child)) - elif child.is_parameter_type(): - return False, str(value(child)) - elif not child.is_expression_type(): - return False, str(child) - else: - # this is an expression node + elif child.is_expression_type(): return True, "" + elif child.is_numeric_type(): + if child.is_fixed(): + return False, str(value(child)) + else: + return False, str(self.variable_label_map.getSymbol(child)) + else: + return False, str(child) def finalizeResult(self, node_result): return node_result diff --git a/pyomo/core/base/block.py b/pyomo/core/base/block.py index ed40027205f..543b391830e 100644 --- a/pyomo/core/base/block.py +++ b/pyomo/core/base/block.py @@ -1051,8 +1051,10 @@ def add_component(self, name, val): # NB: we don't have to construct the temporary / implicit # sets here: if necessary, that happens when # _add_implicit_sets() calls add_component(). - if id(self) in _BlockConstruction.data: - data = _BlockConstruction.data[id(self)].get(name, None) + if _BlockConstruction.data: + data = _BlockConstruction.data.get(id(self), None) + if data is not None: + data = data.get(name, None) else: data = None if __debug__ and logger.isEnabledFor(logging.DEBUG): @@ -1317,27 +1319,22 @@ def _component_data_iter(self, ctype=None, active=None, sort=False): _sort_indices = SortComponents.sort_indices(sort) _subcomp = PseudoMap(self, ctype, active, sort) for name, comp in _subcomp.iteritems(): - # _NOTE_: Suffix has a dict interface (something other - # derived non-indexed Components may do as well), - # so we don't want to test the existence of - # iteritems as a check for components. Also, - # the case where we test len(comp) after seeing - # that comp.is_indexed is False is a hack for a - # SimpleConstraint whose expression resolved to - # Constraint.skip or Constraint.feasible (in which - # case its data is empty and iteritems would have - # been empty as well) - # try: - # _items = comp.iteritems() - # except AttributeError: - # _items = [ (None, comp) ] + # NOTE: Suffix has a dict interface (something other derived + # non-indexed Components may do as well), so we don't want + # to test the existence of iteritems as a check for + # component datas. We will rely on is_indexed() to catch + # all the indexed components. Then we will do special + # processing for the scalar components to catch the case + # where there are "sparse scalar components" if comp.is_indexed(): _items = comp.iteritems() - # This is a hack (see _NOTE_ above). - elif len(comp) or not hasattr(comp, '_data'): - _items = ((None, comp),) + elif hasattr(comp, '_data'): + # This may be an empty Scalar component (e.g., from + # Constraint.Skip on a scalar Constraint) + assert len(comp._data) <= 1 + _items = iteritems(comp._data) else: - _items = tuple() + _items = ((None, comp),) if _sort_indices: _items = sorted(_items, key=itemgetter(0)) @@ -1834,7 +1831,35 @@ def __init__(self, *args, **kwargs): self.construct() def _getitem_when_not_present(self, idx): - return self._setitem_when_not_present(idx) + _block = self._setitem_when_not_present(idx) + if self._rule is None: + return _block + + if _BlockConstruction.data: + data = _BlockConstruction.data.get(id(self), None) + if data is not None: + data = data.get(idx, None) + if data is not None: + _BlockConstruction.data[id(_block)] = data + else: + data = None + + try: + obj = apply_indexed_rule( + self, self._rule, _block, idx, self._options) + finally: + if data is not None: + del _BlockConstruction.data[id(_block)] + + if obj is not _block and isinstance(obj, _BlockData): + # If the user returns a block, transfer over everything + # they defined into the empty one we created. + _block.transfer_attributes_from(obj) + + # TBD: Should we allow skipping Blocks??? + # if obj is Block.Skip and idx is not None: + # del self._data[idx] + return _block def find_component(self, label_or_component): """ @@ -1854,55 +1879,63 @@ def construct(self, data=None): timer = ConstructionTimer(self) self._constructed = True - # We must check that any pre-existing components are - # constructed. This catches the case where someone is building - # a Concrete model by building (potentially pseudo-abstract) - # sub-blocks and then adding them to a Concrete model block. - for idx in self._data: - _block = self[idx] - for name, obj in iteritems(_block.component_map()): - if not obj._constructed: - if data is None: - _data = None - else: - _data = data.get(name, None) - obj.construct(_data) - - if self._rule is None: - # Ensure the _data dictionary is populated for singleton - # blocks - if not self.is_indexed(): - self[None] + # Constructing blocks is tricky. Scalar blocks are already + # partially constructed (they have _data[None] == self) in order + # to support Abstract blocks. The block may therefore already + # have components declared on it. In order to preserve + # decl_order, we must construct those components *first* before + # firing any rule. Indexed blocks should be empty, so we only + # need to fire the rule in order. + # + # Since the rule does not pass any "data" on, we build a scalar + # "stack" of pointers to block data (_BlockConstruction.data) + # that the individual blocks' add_component() can refer back to + # to handle component construction. + if data is not None: + _BlockConstruction.data[id(self)] = data + try: + if self.is_indexed(): + # We can only populate Blocks with finite indexing sets + if self._rule is not None and self.index_set().isfinite(): + for _idx in self.index_set(): + # Trigger population & call the rule + self._getitem_when_not_present(_idx) + else: + # We must check that any pre-existing components are + # constructed. This catches the case where someone is + # building a Concrete model by building (potentially + # pseudo-abstract) sub-blocks and then adding them to a + # Concrete model block. + _idx = next(iter(UnindexedComponent_set)) + if _idx not in self._data: + # Derived block classes may not follow the scalar + # Block convention of initializing _data to point to + # itself (i.e., they are not set up to support + # Abstract models) + self._data[_idx] = self + _block = self + for name, obj in iteritems(_block.component_map()): + if not obj._constructed: + if data is None: + _data = None + else: + _data = data.get(name, None) + obj.construct(_data) + if self._rule is not None: + obj = apply_indexed_rule( + self, self._rule, _block, _idx, self._options) + if obj is not _block and isinstance(obj, _BlockData): + # If the user returns a block, transfer over + # everything they defined into the empty one we + # created. + _block.transfer_attributes_from(obj) + finally: + # We must check if data is still in the dictionary, as + # scalar blocks will have already removed the entry (as + # the _data and the component are the same object) + if data is not None and id(self) in _BlockConstruction.data: + del _BlockConstruction.data[id(self)] timer.report() - return - # If we have a rule, fire the rule for all indices. - # Notes: - # - Since this block is now concrete, any components added to - # it will be immediately constructed by - # block.add_component(). - # - Since the rule does not pass any "data" on, we build a - # scalar "stack" of pointers to block data - # (_BlockConstruction.data) that the individual blocks' - # add_component() can refer back to to handle component - # construction. - for idx in self._index: - _block = self[idx] - if data is not None and idx in data: - _BlockConstruction.data[id(_block)] = data[idx] - obj = apply_indexed_rule( - self, self._rule, _block, idx, self._options) - if id(_block) in _BlockConstruction.data: - del _BlockConstruction.data[id(_block)] - - if obj is not _block and isinstance(obj, _BlockData): - # If the user returns a block, transfer over everything - # they defined into the empty one we created. - _block.transfer_attributes_from(obj) - - # TBD: Should we allow skipping Blocks??? - # if obj is Block.Skip and idx is not None: - # del self._data[idx] - timer.report() def _pprint_callback(self, ostream, idx, data): if not self.is_indexed(): @@ -1945,6 +1978,10 @@ class SimpleBlock(_BlockData, Block): def __init__(self, *args, **kwds): _BlockData.__init__(self, component=self) Block.__init__(self, *args, **kwds) + # Initialize the data dict so that (abstract) attribute + # assignment will work. Note that we do not trigger + # get/setitem_when_not_present so that we do not (implicitly) + # trigger the Block rule self._data[None] = self def display(self, filename=None, ostream=None, prefix=""): diff --git a/pyomo/core/base/component.py b/pyomo/core/base/component.py index cd0d93db975..6f10aaefb42 100644 --- a/pyomo/core/base/component.py +++ b/pyomo/core/base/component.py @@ -22,6 +22,7 @@ import pyomo.common from pyomo.common import deprecated +from pyomo.core.pyomoobject import PyomoObject from pyomo.core.base.misc import tabular_writer, sorted_robust logger = logging.getLogger('pyomo.core') @@ -75,7 +76,7 @@ def cname(*args, **kwds): class CloneError(pyomo.common.errors.PyomoException): pass -class _ComponentBase(object): +class _ComponentBase(PyomoObject): """A base class for Component and ComponentData This class defines some fundamental methods and properties that are @@ -86,6 +87,10 @@ class _ComponentBase(object): _PPRINT_INDENT = " " + def is_component_type(self): + """Return True if this class is a Pyomo component""" + return True + def __deepcopy__(self, memo): # The problem we are addressing is when we want to clone a # sub-block in a model. In that case, the block can have @@ -594,10 +599,6 @@ def is_indexed(self): """Return true if this component is indexed""" return False - def is_component_type(self): - """Return True if this class is a Pyomo component""" - return True - def clear_suffix_value(self, suffix_or_name, expand=True): """Clear the suffix value for this component data""" if isinstance(suffix_or_name, six.string_types): @@ -912,10 +913,6 @@ def is_indexed(self): """Return true if this component is indexed""" return False - def is_component_type(self): - """Return True if this class is a Pyomo component""" - return True - def clear_suffix_value(self, suffix_or_name, expand=True): """Set the suffix value for this component data""" if isinstance(suffix_or_name, six.string_types): diff --git a/pyomo/core/base/external.py b/pyomo/core/base/external.py index 44f36475e36..7fb001ab1bc 100644 --- a/pyomo/core/base/external.py +++ b/pyomo/core/base/external.py @@ -47,6 +47,7 @@ def __new__(cls, *args, **kwds): def __init__(self, *args, **kwds): self._units = kwds.pop('units', None) + self._arg_units = kwds.pop('arg_units', None) kwds.setdefault('ctype', ExternalFunction) Component.__init__(self, **kwds) self._constructed = True @@ -60,6 +61,10 @@ def get_units(self): """Return the units for this ExternalFunction""" return self._units + def get_arg_units(self): + """Return the units for this ExternalFunctions arguments""" + return self._arg_units + def __call__(self, *args): args_ = [] for arg in args: @@ -200,6 +205,9 @@ def __init__(self, *args, **kwds): self._library = 'pyomo_ampl.so' self._function = 'pyomo_socket_server' + arg_units = kwds.get('arg_units', None) + if arg_units is not None: + kwds['arg_units'] = [None]+list(arg_units) ExternalFunction.__init__(self, *args, **kwds) self._fcn_id = PythonCallbackFunction.register_instance(self) diff --git a/pyomo/core/base/global_set.py b/pyomo/core/base/global_set.py index afe290d8bcc..f335b129a73 100644 --- a/pyomo/core/base/global_set.py +++ b/pyomo/core/base/global_set.py @@ -46,11 +46,22 @@ def get(self, value, default): return value return default def __iter__(self): - yield None + return (None,).__iter__() def subsets(self): - return [self] + return [ self ] def construct(self): pass def __len__(self): return 1 + def __eq__(self, other): + return self is other + def __ne__(self, other): + return self is not other + def isdiscrete(self): + return True + def isfinite(self): + return True + def isordered(self): + # As this set only has a single element, it is implicitly "ordered" + return True UnindexedComponent_set = _UnindexedComponent_set('UnindexedComponent_set') diff --git a/pyomo/core/base/indexed_component.py b/pyomo/core/base/indexed_component.py index 68ad2f2cbee..97c26e7c87b 100644 --- a/pyomo/core/base/indexed_component.py +++ b/pyomo/core/base/indexed_component.py @@ -14,7 +14,7 @@ from pyomo.core.expr.expr_errors import TemplateExpressionError from pyomo.core.expr.numvalue import native_types -from pyomo.core.base.indexed_component_slice import _IndexedComponent_slice +from pyomo.core.base.indexed_component_slice import IndexedComponent_slice from pyomo.core.base.component import Component, ActiveComponent from pyomo.core.base.config import PyomoOptions from pyomo.core.base.global_set import UnindexedComponent_set @@ -375,7 +375,7 @@ def __getitem__(self, index): index = TypeError if index is TypeError: raise - if index.__class__ is _IndexedComponent_slice: + if index.__class__ is IndexedComponent_slice: return index # The index could have contained constant but nonhashable # objects (e.g., scalar immutable Params). @@ -401,7 +401,7 @@ def __getitem__(self, index): # _processUnhashableIndex could have found a slice, or # _validate could have found an Ellipsis and returned a # slicer - if index.__class__ is _IndexedComponent_slice: + if index.__class__ is IndexedComponent_slice: return index obj = self._data.get(index, _NotFound) # @@ -438,7 +438,7 @@ def __setitem__(self, index, val): # If we didn't find the index in the data, then we need to # validate it against the underlying set (as long as # _processUnhashableIndex didn't return a slicer) - if index.__class__ is not _IndexedComponent_slice: + if index.__class__ is not IndexedComponent_slice: index = self._validate_index(index) else: return self._setitem_impl(index, obj, val) @@ -447,10 +447,10 @@ def __setitem__(self, index, val): # dictionary and set the value # # Note that we need to RECHECK the class against - # _IndexedComponent_slice, as _validate_index could have found + # IndexedComponent_slice, as _validate_index could have found # an Ellipsis (which is hashable) and returned a slicer # - if index.__class__ is _IndexedComponent_slice: + if index.__class__ is IndexedComponent_slice: # support "m.x[:,1] = 5" through a simple recursive call. # # Assert that this slice was just generated @@ -480,11 +480,11 @@ def __delitem__(self, index): index = self._processUnhashableIndex(index) if obj is _NotFound: - if index.__class__ is not _IndexedComponent_slice: + if index.__class__ is not IndexedComponent_slice: index = self._validate_index(index) # this supports "del m.x[:,1]" through a simple recursive call - if index.__class__ is _IndexedComponent_slice: + if index.__class__ is IndexedComponent_slice: # Assert that this slice ws just generated assert len(index._call_stack) == 1 # Make a copy of the slicer items *before* we start @@ -525,7 +525,7 @@ def _validate_index(self, idx): # indexing set is a complex set operation)! return validated_idx - if idx.__class__ is _IndexedComponent_slice: + if idx.__class__ is IndexedComponent_slice: return idx if normalize_index.flatten: @@ -627,7 +627,7 @@ def _processUnhashableIndex(self, idx): # templatized expression. # from pyomo.core.expr import current as EXPR - return EXPR.GetItemExpression(tuple(idx), self) + return EXPR.GetItemExpression((self,) + tuple(idx)) except EXPR.NonConstantExpressionError: # @@ -666,7 +666,7 @@ def _processUnhashableIndex(self, idx): fixed[i - len(idx)] = val if sliced or ellipsis is not None: - return _IndexedComponent_slice(self, fixed, sliced, ellipsis) + return IndexedComponent_slice(self, fixed, sliced, ellipsis) elif _found_numeric: if len(idx) == 1: return fixed[0] diff --git a/pyomo/core/base/indexed_component_slice.py b/pyomo/core/base/indexed_component_slice.py index 5c7d99e9ae1..76e9e3b8dec 100644 --- a/pyomo/core/base/indexed_component_slice.py +++ b/pyomo/core/base/indexed_component_slice.py @@ -8,10 +8,10 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ import copy -from six import PY3, iteritems, advance_iterator +from six import PY3, iteritems, iterkeys, advance_iterator from pyomo.common import DeveloperError -class _IndexedComponent_slice(object): +class IndexedComponent_slice(object): """Special class for slicing through hierarchical component trees The basic concept is to interrupt the normal slice generation @@ -23,29 +23,87 @@ class _IndexedComponent_slice(object): calls to __getitem__ / __getattr__ / __call__ happen *before* the call to __iter__() """ + ATTR_MASK = 4 + ITEM_MASK = 8 + CALL_MASK = 16 + slice_info = 0 - get_attribute = 1 - set_attribute = 4 - del_attribute = 7 - get_item = 2 - set_item = 5 - del_item = 6 - call = 3 - - def __init__(self, component, fixed, sliced, ellipsis): + get_attribute = ATTR_MASK | 1 + set_attribute = ATTR_MASK | 2 + del_attribute = ATTR_MASK | 3 + get_item = ITEM_MASK | 1 + set_item = ITEM_MASK | 2 + del_item = ITEM_MASK | 3 + call = CALL_MASK + + def __init__(self, component, fixed=None, sliced=None, ellipsis=None): + """A "slice" over an _IndexedComponent hierarchy + + This class has two forms for the constructor. The first form is + the standard constructor that takes a base component and + indexing information. This form takes + + IndexedComponent_slice(component, fixed, sliced, ellipsis) + + The second form is a "copy constructor" that is used internally + when building up the "call stack" for the hierarchical slice. The + copy constructor takes an IndexedComponent_slice and an + optional "next term" in the slice construction (from get/set/del + item/attr or call): + + IndexedComponent_slice(slice, next_term=None) + + Parameters + ---------- + component: IndexedComponent + The base component for this slice + + fixed: dict + A dictionary indicating the fixed indices of component, + mapping index position to value + + sliced: dict + A dictionary indicating the sliced indices of component + mapping the index position to the (python) slice object + + ellipsis: int + The position of the ellipsis in the initial component slice + + """ # Note that because we use a custom __setattr__, we need to # define actual instance attributes using the base class # __setattr__. - set_attr = super(_IndexedComponent_slice, self).__setattr__ - - set_attr('_call_stack', [ - (_IndexedComponent_slice.slice_info, - (component, fixed, sliced, ellipsis)) ]) - # Since this is an object, users may change these flags between - # where they declare the slice and iterate over it. - set_attr('call_errors_generate_exceptions', True) - set_attr('key_errors_generate_exceptions', True) - set_attr('attribute_errors_generate_exceptions', True) + set_attr = super(IndexedComponent_slice, self).__setattr__ + if type(component) is IndexedComponent_slice: + # Copy constructor + _len = component._len + # For efficiency, we will only duplicate the call stack + # list if this instance is not point to the end of the list. + if _len == len(component._call_stack): + set_attr('_call_stack', component._call_stack) + else: + set_attr('_call_stack', component._call_stack[:_len]) + set_attr('_len', _len) + if fixed is not None: + self._call_stack.append(fixed) + self._len += 1 + set_attr('call_errors_generate_exceptions', + component.call_errors_generate_exceptions) + set_attr('key_errors_generate_exceptions', + component.key_errors_generate_exceptions) + set_attr('attribute_errors_generate_exceptions', + component.attribute_errors_generate_exceptions) + else: + # Normal constructor + set_attr('_call_stack', [ + (IndexedComponent_slice.slice_info, + (component, fixed, sliced, ellipsis)) ]) + set_attr('_len', 1) + # Since this is an object, users may change these flags + # between where they declare the slice and iterate over it. + set_attr('call_errors_generate_exceptions', True) + set_attr('key_errors_generate_exceptions', True) + set_attr('attribute_errors_generate_exceptions', True) def __getstate__(self): """Serialize this object. @@ -59,7 +117,7 @@ def __getstate__(self): def __setstate__(self, state): """Deserialize the state into this object. """ - set_attr = super(_IndexedComponent_slice, self).__setattr__ + set_attr = super(IndexedComponent_slice, self).__setattr__ for k,v in iteritems(state): set_attr(k,v) @@ -77,12 +135,11 @@ def __getattr__(self, name): """Override the "." operator to defer resolution until iteration. Creating a slice of a component returns a - _IndexedComponent_slice object. Subsequent attempts to resolve + IndexedComponent_slice object. Subsequent attempts to resolve attributes hit this method. """ - self._call_stack.append( ( - _IndexedComponent_slice.get_attribute, name ) ) - return self + return IndexedComponent_slice(self, ( + IndexedComponent_slice.get_attribute, name ) ) def __setattr__(self, name, value): """Override the "." operator implementing attribute assignment @@ -95,24 +152,23 @@ def __setattr__(self, name, value): """ # Don't overload any pre-existing attributes if name in self.__dict__: - return super(_IndexedComponent_slice, self).__setattr__(name,value) + return super(IndexedComponent_slice, self).__setattr__(name,value) - self._call_stack.append( ( - _IndexedComponent_slice.set_attribute, name, value ) ) # Immediately evaluate the slice and set the attributes - for i in self: pass + for i in IndexedComponent_slice(self, ( + IndexedComponent_slice.set_attribute, name, value ) ): + pass return None def __getitem__(self, idx): """Override the "[]" operator to defer resolution until iteration. Creating a slice of a component returns a - _IndexedComponent_slice object. Subsequent attempts to query + IndexedComponent_slice object. Subsequent attempts to query items hit this method. """ - self._call_stack.append( ( - _IndexedComponent_slice.get_item, idx ) ) - return self + return IndexedComponent_slice(self, ( + IndexedComponent_slice.get_item, idx ) ) def __setitem__(self, idx, val): """Override the "[]" operator for setting item values. @@ -123,10 +179,10 @@ def __setitem__(self, idx, val): and immediately evaluates the slice. """ - self._call_stack.append( ( - _IndexedComponent_slice.set_item, idx, val ) ) # Immediately evaluate the slice and set the attributes - for i in self: pass + for i in IndexedComponent_slice(self, ( + IndexedComponent_slice.set_item, idx, val ) ): + pass return None def __delitem__(self, idx): @@ -138,16 +194,16 @@ def __delitem__(self, idx): and immediately evaluates the slice. """ - self._call_stack.append( ( - _IndexedComponent_slice.del_item, idx ) ) # Immediately evaluate the slice and set the attributes - for i in self: pass + for i in IndexedComponent_slice(self, ( + IndexedComponent_slice.del_item, idx ) ): + pass return None def __call__(self, *idx, **kwds): """Special handling of the "()" operator for component slices. - Creating a slice of a component returns a _IndexedComponent_slice + Creating a slice of a component returns a IndexedComponent_slice object. Subsequent attempts to call items hit this method. We handle the __call__ method separately based on the item (identifier immediately before the "()") being called: @@ -164,28 +220,39 @@ def __call__(self, *idx, **kwds): # called after retrieving an attribute that will be called. I # don't know why that happens, but we will trap it here and # remove the getattr(__name__) from the call stack. - if self._call_stack[-1][0] == _IndexedComponent_slice.get_attribute \ - and self._call_stack[-1][1] == '__name__': - self._call_stack.pop() - - self._call_stack.append( ( - _IndexedComponent_slice.call, idx, kwds ) ) - if self._call_stack[-2][1] == 'component': - return self + _len = self._len + if self._call_stack[_len-1][0] == IndexedComponent_slice.get_attribute \ + and self._call_stack[_len-1][1] == '__name__': + self._len -= 1 + + ans = IndexedComponent_slice(self, ( + IndexedComponent_slice.call, idx, kwds ) ) + # Because we just duplicated the slice and added a new entry, we + # know that the _len == len(_call_stack) + if ans._call_stack[-2][1] == 'component': + return ans else: # Note: simply calling "list(self)" results in infinite # recursion in python2.6 - return list( i for i in self ) + return list( i for i in ans ) + + def __hash__(self): + return hash(tuple(_freeze(x) for x in self._call_stack[:self._len])) + + def __eq__(self, other): + if other is self: + return True + if type(other) is not IndexedComponent_slice: + return False + return tuple(_freeze(x) for x in self._call_stack[:self._len]) \ + == tuple(_freeze(x) for x in other._call_stack[:other._len]) + + def __ne__(self, other): + return not self.__eq__(other) def duplicate(self): - ans = _IndexedComponent_slice(None,None,None,None) - ans.call_errors_generate_exceptions \ - = self.call_errors_generate_exceptions - ans.key_errors_generate_exceptions \ - = self.key_errors_generate_exceptions - ans.attribute_errors_generate_exceptions \ - = self.attribute_errors_generate_exceptions - ans._call_stack = list(self._call_stack) + ans = IndexedComponent_slice(self) + ans._call_stack = ans._call_stack[:ans._len] return ans def index_wildcard_keys(self): @@ -209,6 +276,27 @@ def expanded_items(self): return ((_iter.get_last_index(), _) for _ in _iter) +def _freeze(info): + if info[0] == IndexedComponent_slice.slice_info: + return ( + info[0], + id(info[1][0]), # id of the Component + tuple(iteritems(info[1][1])), # {idx: value} for fixed + tuple(iterkeys(info[1][2])), # {idx: slice} for slices + info[1][3] # elipsis index + ) + elif info[0] & IndexedComponent_slice.ITEM_MASK: + return ( + info[0], + tuple( (x.start,x.stop,x.step) if type(x) is slice else x + for x in info[1] ), + info[2:], + ) + else: + return info + + + class _slice_generator(object): """Utility (iterator) for generating the elements of one slice @@ -270,6 +358,8 @@ def __next__(self): else: return None +# Backwards compatibility +_IndexedComponent_slice = IndexedComponent_slice # Mock up a callable object with a "check_complete" method def _advance_iter(_iter): @@ -293,12 +383,13 @@ def __init__(self, component_slice, advance_iter=_advance_iter, self.advance_iter = advance_iter self._iter_over_index = iter_over_index call_stack = self._slice._call_stack - self._iter_stack = [None]*len(call_stack) - if call_stack[0][0] == _IndexedComponent_slice.slice_info: + call_stack_len = self._slice._len + self._iter_stack = [None]*call_stack_len + if call_stack[0][0] == IndexedComponent_slice.slice_info: self._iter_stack[0] = _slice_generator( *call_stack[0][1], iter_over_index=self._iter_over_index) - elif call_stack[0][0] == _IndexedComponent_slice.set_item: - assert len(call_stack) == 1 + elif call_stack[0][0] == IndexedComponent_slice.set_item: + assert call_stack_len == 1 # defer creating the iterator until later self._iter_stack[0] = _NotIterable # Something not None else: @@ -338,9 +429,9 @@ def __next__(self): idx -= 1 continue # Walk down the hierarchy to get to the final object - while idx < len(self._slice._call_stack): + while idx < self._slice._len: _call = self._slice._call_stack[idx] - if _call[0] == _IndexedComponent_slice.get_attribute: + if _call[0] == IndexedComponent_slice.get_attribute: try: _comp = getattr(_comp, _call[1]) except AttributeError: @@ -352,7 +443,7 @@ def __next__(self): and not self._iter_over_index: raise break - elif _call[0] == _IndexedComponent_slice.get_item: + elif _call[0] == IndexedComponent_slice.get_item: try: _comp = _comp.__getitem__( _call[1] ) except KeyError: @@ -365,12 +456,12 @@ def __next__(self): and not self._iter_over_index: raise break - if _comp.__class__ is _IndexedComponent_slice: + if _comp.__class__ is IndexedComponent_slice: # Extract the _slice_generator (for # efficiency... these are always 1-level slices, # so we don't need the overhead of the - # _IndexedComponent_slice object) - assert len(_comp._call_stack) == 1 + # IndexedComponent_slice object) + assert _comp._len == 1 self._iter_stack[idx] = _slice_generator( *_comp._call_stack[0][1], iter_over_index=self._iter_over_index @@ -387,7 +478,7 @@ def __next__(self): break else: self._iter_stack[idx] = None - elif _call[0] == _IndexedComponent_slice.call: + elif _call[0] == IndexedComponent_slice.call: try: _comp = _comp( *(_call[1]), **(_call[2]) ) except: @@ -400,8 +491,8 @@ def __next__(self): and not self._iter_over_index: raise break - elif _call[0] == _IndexedComponent_slice.set_attribute: - assert idx == len(self._slice._call_stack) - 1 + elif _call[0] == IndexedComponent_slice.set_attribute: + assert idx == self._slice._len - 1 try: _comp = setattr(_comp, _call[1], _call[2]) except AttributeError: @@ -412,8 +503,8 @@ def __next__(self): if self._slice.attribute_errors_generate_exceptions: raise break - elif _call[0] == _IndexedComponent_slice.set_item: - assert idx == len(self._slice._call_stack) - 1 + elif _call[0] == IndexedComponent_slice.set_item: + assert idx == self._slice._len - 1 # We have a somewhat unusual situation when someone # makes a _ReferenceDict to m.x[:] and then wants to # set one of the attributes. In that situation, @@ -455,9 +546,9 @@ def __next__(self): and not self._iter_over_index: raise break - if _tmp.__class__ is _IndexedComponent_slice: + if _tmp.__class__ is IndexedComponent_slice: # Extract the _slice_generator and evaluate it. - assert len(_tmp._call_stack) == 1 + assert _tmp._len == 1 _iter = _IndexedComponent_slice_iter( _tmp, self.advance_iter) for _ in _iter: @@ -472,8 +563,8 @@ def __next__(self): self.advance_iter.check_complete() # No try-catch, since we know this key is valid _comp[_call[1]] = _call[2] - elif _call[0] == _IndexedComponent_slice.del_item: - assert idx == len(self._slice._call_stack) - 1 + elif _call[0] == IndexedComponent_slice.del_item: + assert idx == self._slice._len - 1 # The problem here is that _call[1] may be a slice. # If it is, but we are in something like a # _ReferenceDict, where the caller actually wants a @@ -494,9 +585,9 @@ def __next__(self): if self._slice.key_errors_generate_exceptions: raise break - if _tmp.__class__ is _IndexedComponent_slice: + if _tmp.__class__ is IndexedComponent_slice: # Extract the _slice_generator and evaluate it. - assert len(_tmp._call_stack) == 1 + assert _tmp._len == 1 _iter = _IndexedComponent_slice_iter( _tmp, self.advance_iter) _idx_to_del = [] @@ -513,8 +604,8 @@ def __next__(self): else: # No try-catch, since we know this key is valid del _comp[_call[1]] - elif _call[0] == _IndexedComponent_slice.del_attribute: - assert idx == len(self._slice._call_stack) - 1 + elif _call[0] == IndexedComponent_slice.del_attribute: + assert idx == self._slice._len - 1 try: _comp = delattr(_comp, _call[1]) except AttributeError: @@ -527,11 +618,11 @@ def __next__(self): break else: raise DeveloperError( - "Unexpected entry in _IndexedComponent_slice " + "Unexpected entry in IndexedComponent_slice " "_call_stack: %s" % (_call[0],)) idx += 1 - if idx == len(self._slice._call_stack): + if idx == self._slice._len: # Check to make sure the custom iterator # (i.e._fill_in_known_wildcards) is complete self.advance_iter.check_complete() diff --git a/pyomo/core/base/param.py b/pyomo/core/base/param.py index 69cd6a35c28..67340c7e7f7 100644 --- a/pyomo/core/base/param.py +++ b/pyomo/core/base/param.py @@ -188,22 +188,6 @@ def is_parameter_type(self): """ return True - def is_variable_type(self): - """ - Returns False because this is not a variable object. - """ - return False - - def is_expression_type(self): - """Returns False because this is not an expression""" - return False - - def is_potentially_variable(self): - """ - Returns False because this object can never reference variables. - """ - return False - def _compute_polynomial_degree(self, result): """ Returns 0 because this object can never reference variables. @@ -306,10 +290,6 @@ def __iter__(self): return self._data.__iter__() return self._index.__iter__() - def is_expression_type(self): - """Returns False because this is not an expression""" - return False - # # These are "sparse equivalent" access / iteration methods that # only loop over the defined data. diff --git a/pyomo/core/base/piecewise.py b/pyomo/core/base/piecewise.py index 59a12aba9df..193e12f7b20 100644 --- a/pyomo/core/base/piecewise.py +++ b/pyomo/core/base/piecewise.py @@ -43,8 +43,8 @@ import itertools import operator import types +import enum -from pyutilib.enum import Enum from pyutilib.misc import flatten_tuple from pyomo.common.timing import ConstructionTimer @@ -61,19 +61,21 @@ logger = logging.getLogger('pyomo.core') -PWRepn = Enum('SOS2', - 'BIGM_BIN', - 'BIGM_SOS1', - 'CC', - 'DCC', - 'DLOG', - 'LOG', - 'MC', - 'INC') - -Bound = Enum('Lower', - 'Upper', - 'Equal') +class PWRepn(str, enum.Enum): + SOS2 = 'SOS2' + BIGM_BIN = 'BIGM_BIN' + BIGM_SOS1 = 'BIGM_SOS1' + CC = 'CC' + DCC = 'DCC' + DLOG = 'DLOG' + LOG = 'LOG' + MC = 'MC' + INC = 'INC' + +class Bound(str, enum.Enum): + Lower = 'Lower' + Upper = 'Upper' + Equal = 'Equal' # BE SURE TO CHANGE THE PIECWISE DOCSTRING # IF THIS GETS CHANGED diff --git a/pyomo/core/base/reference.py b/pyomo/core/base/reference.py index d7f64b73fcd..ec056da2411 100644 --- a/pyomo/core/base/reference.py +++ b/pyomo/core/base/reference.py @@ -16,7 +16,7 @@ IndexedComponent, UnindexedComponent_set ) from pyomo.core.base.indexed_component_slice import ( - _IndexedComponent_slice, _IndexedComponent_slice_iter + IndexedComponent_slice, _IndexedComponent_slice_iter ) import six @@ -143,14 +143,14 @@ class _ReferenceDict(collections_MutableMapping): """A dict-like object whose values are defined by a slice. This implements a dict-like object whose keys and values are defined - by a component slice (:py:class:`_IndexedComponent_slice`). The + by a component slice (:py:class:`IndexedComponent_slice`). The intent behind this object is to replace the normal ``_data`` :py:class:`dict` in :py:class:`IndexedComponent` containers to create "reference" components. Parameters ---------- - component_slice : :py:class:`_IndexedComponent_slice` + component_slice : :py:class:`IndexedComponent_slice` The slice object that defines the "members" of this mutable mapping. """ def __init__(self, component_slice): @@ -192,19 +192,19 @@ def __getitem__(self, key): def __setitem__(self, key, val): tmp = self._slice.duplicate() op = tmp._call_stack[-1][0] - if op == _IndexedComponent_slice.get_item: + if op == IndexedComponent_slice.get_item: tmp._call_stack[-1] = ( - _IndexedComponent_slice.set_item, + IndexedComponent_slice.set_item, tmp._call_stack[-1][1], val ) - elif op == _IndexedComponent_slice.slice_info: + elif op == IndexedComponent_slice.slice_info: tmp._call_stack[-1] = ( - _IndexedComponent_slice.set_item, + IndexedComponent_slice.set_item, tmp._call_stack[-1][1], val ) - elif op == _IndexedComponent_slice.get_attribute: + elif op == IndexedComponent_slice.get_attribute: tmp._call_stack[-1] = ( - _IndexedComponent_slice.set_attribute, + IndexedComponent_slice.set_attribute, tmp._call_stack[-1][1], val ) else: @@ -218,13 +218,13 @@ def __setitem__(self, key, val): def __delitem__(self, key): tmp = self._slice.duplicate() op = tmp._call_stack[-1][0] - if op == _IndexedComponent_slice.get_item: + if op == IndexedComponent_slice.get_item: # If the last attribute of the slice gets an item, # change it to delete the item tmp._call_stack[-1] = ( - _IndexedComponent_slice.del_item, + IndexedComponent_slice.del_item, tmp._call_stack[-1][1] ) - elif op == _IndexedComponent_slice.slice_info: + elif op == IndexedComponent_slice.slice_info: assert len(tmp._call_stack) == 1 _iter = self._get_iter(tmp, key) try: @@ -233,11 +233,11 @@ def __delitem__(self, key): return except StopIteration: raise KeyError("KeyError: %s" % (key,)) - elif op == _IndexedComponent_slice.get_attribute: + elif op == IndexedComponent_slice.get_attribute: # If the last attribute of the slice retrieves an attribute, # change it to delete the attribute tmp._call_stack[-1] = ( - _IndexedComponent_slice.del_attribute, + IndexedComponent_slice.del_attribute, tmp._call_stack[-1][1] ) else: raise DeveloperError( @@ -300,7 +300,7 @@ class _ReferenceSet(collections_Set): """A set-like object whose values are defined by a slice. This implements a dict-like object whose members are defined by a - component slice (:py:class:`_IndexedComponent_slice`). + component slice (:py:class:`IndexedComponent_slice`). :py:class:`_ReferenceSet` differs from the :py:class:`_ReferenceDict` above in that it looks in the underlying component ``index_set()`` for values that match the slice, and not @@ -308,7 +308,7 @@ class _ReferenceSet(collections_Set): Parameters ---------- - component_slice : :py:class:`_IndexedComponent_slice` + component_slice : :py:class:`IndexedComponent_slice` The slice object that defines the "members" of this set """ @@ -431,7 +431,7 @@ def Reference(reference, ctype=_NotSpecified): Parameters ---------- - reference : :py:class:`_IndexedComponent_slice` + reference : :py:class:`IndexedComponent_slice` component slice that defines the data to include in the Reference component @@ -506,7 +506,7 @@ def Reference(reference, ctype=_NotSpecified): 4 : 1 : 10 : None : False : False : Reals """ - if isinstance(reference, _IndexedComponent_slice): + if isinstance(reference, IndexedComponent_slice): pass elif isinstance(reference, Component): reference = reference[...] diff --git a/pyomo/core/base/set.py b/pyomo/core/base/set.py index 89c0fcce557..8597c9d8d12 100644 --- a/pyomo/core/base/set.py +++ b/pyomo/core/base/set.py @@ -1127,9 +1127,20 @@ def __len__(self): raise DeveloperError("Derived finite set class (%s) failed to " "implement __len__" % (type(self).__name__,)) - def __iter__(self): + def _iter_impl(self): raise DeveloperError("Derived finite set class (%s) failed to " - "implement __iter__" % (type(self).__name__,)) + "implement _iter_impl" % (type(self).__name__,)) + + def __iter__(self): + """Iterate over the finite set + + Note: derived classes should NOT reimplement this method, and + should instead overload _iter_impl. The expression template + system relies on being able to replace this method for all Sets + during template generation. + + """ + return self._iter_impl() def __reversed__(self): return reversed(self.data()) @@ -1242,7 +1253,7 @@ def get(self, value, default=None): return value return default - def __iter__(self): + def _iter_impl(self): return iter(self._values) def __len__(self): @@ -1518,7 +1529,7 @@ def __getstate__(self): # Note: because none of the slots on this class need to be edited, # we don't need to implement a specialized __setstate__ method. - def __iter__(self): + def _iter_impl(self): """ Return an iterator for the set. """ @@ -1661,13 +1672,13 @@ def __getstate__(self): # Note: because none of the slots on this class need to be edited, # we don't need to implement a specialized __setstate__ method. - def __iter__(self): + def _iter_impl(self): """ Return an iterator for the set. """ if not self._is_sorted: self._sort() - return super(_SortedSetData, self).__iter__() + return super(_SortedSetData, self)._iter_impl() def __reversed__(self): if not self._is_sorted: @@ -2252,7 +2263,7 @@ def get(self, value, default=None): def __len__(self): return len(self._ref) - def __iter__(self): + def _iter_impl(self): return iter(self._ref) def __str__(self): @@ -2411,7 +2422,7 @@ def _range_gen(r): i += 1 n = start + i*step - def __iter__(self): + def _iter_impl(self): # If there is only a single underlying range, then we will # iterate over it nIters = len(self._ranges) - 1 @@ -2527,7 +2538,7 @@ class RangeSet(Component): Parameters ---------- - *args: tuple, optional + *args: int | float | None The range defined by ([start=1], end, [step=1]). If only a single positional parameter, `end` is supplied, then the RangeSet will be the integers starting at 1 up through and @@ -3119,7 +3130,7 @@ def get(self, val, default=None): class SetUnion_FiniteSet(_FiniteSetMixin, SetUnion_InfiniteSet): __slots__ = tuple() - def __iter__(self): + def _iter_impl(self): set0 = self._sets[0] return itertools.chain( set0, @@ -3250,7 +3261,7 @@ def get(self, val, default=None): class SetIntersection_FiniteSet(_FiniteSetMixin, SetIntersection_InfiniteSet): __slots__ = tuple() - def __iter__(self): + def _iter_impl(self): set0, set1 = self._sets if not set0.isordered(): if set1.isordered(): @@ -3355,7 +3366,7 @@ def get(self, val, default=None): class SetDifference_FiniteSet(_FiniteSetMixin, SetDifference_InfiniteSet): __slots__ = tuple() - def __iter__(self): + def _iter_impl(self): set0, set1 = self._sets return (_ for _ in set0 if _ not in set1) @@ -3459,7 +3470,7 @@ class SetSymmetricDifference_FiniteSet(_FiniteSetMixin, SetSymmetricDifference_InfiniteSet): __slots__ = tuple() - def __iter__(self): + def _iter_impl(self): set0, set1 = self._sets return itertools.chain( (_ for _ in set0 if _ not in set1), @@ -3732,7 +3743,7 @@ def _cutPointGenerator(subsets, val_len): class SetProduct_FiniteSet(_FiniteSetMixin, SetProduct_InfiniteSet): __slots__ = tuple() - def __iter__(self): + def _iter_impl(self): _iter = itertools.product(*self._sets) # Note: if all the member sets are simple 1-d sets, then there # is no need to call flatten_product. @@ -3866,7 +3877,7 @@ def clear(self): def __len__(self): return 0 - def __iter__(self): + def _iter_impl(self): return iter(tuple()) @property diff --git a/pyomo/core/base/template_expr.py b/pyomo/core/base/template_expr.py index faf6a29f599..6d7b80e6c92 100644 --- a/pyomo/core/base/template_expr.py +++ b/pyomo/core/base/template_expr.py @@ -2,232 +2,18 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -import copy -import logging -from pyomo.core.expr import current as EXPR -from pyomo.core.expr.numvalue import ( - NumericValue, native_numeric_types, as_numeric, value ) -import pyomo.core.base -from pyomo.core.expr.expr_errors import TemplateExpressionError +from pyomo.core.expr.template_expr import ( + IndexTemplate, _GetItemIndexer, TemplateExpressionError +) -class IndexTemplate(NumericValue): - """A "placeholder" for an index value in template expressions. - - This class is a placeholder for an index value within a template - expression. That is, given the expression template for "m.x[i]", - where `m.z` is indexed by `m.I`, the expression tree becomes: - - _GetItem: - - m.x - - IndexTemplate(_set=m.I, _value=None) - - Constructor Arguments: - _set: the Set from which this IndexTemplate can take values - """ - - __slots__ = ('_set', '_value') - - def __init__(self, _set): - self._set = _set - self._value = None - - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(IndexTemplate, self).__getstate__() - for i in IndexTemplate.__slots__: - state[i] = getattr(self, i) - return state - - def __deepcopy__(self, memo): - # Because we leverage deepcopy for expression cloning, we need - # to see if this is a clone operation and *not* copy the - # template. - # - # TODO: JDS: We should consider converting the IndexTemplate to - # a proper Component: that way it could leverage the normal - # logic of using the parent_block scope to dictate the behavior - # of deepcopy. - if '__block_scope__' in memo: - memo[id(self)] = self - return self - # - # "Normal" deepcopying outside the context of pyomo. - # - ans = memo[id(self)] = self.__class__.__new__(self.__class__) - ans.__setstate__(copy.deepcopy(self.__getstate__(), memo)) - return ans - - # Note: because NONE of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ method. - - def __call__(self, exception=True): - """ - Return the value of this object. - """ - if self._value is None: - if exception: - raise TemplateExpressionError(self) - return None - else: - return self._value - - def is_fixed(self): - """ - Returns True because this value is fixed. - """ - return True - - def is_constant(self): - """ - Returns False because this cannot immediately be simplified. - """ - return False - - def is_potentially_variable(self): - """Returns False because index values cannot be variables. - - The IndexTemplate represents a placeholder for an index value - for an IndexedComponent, and at the moment, Pyomo does not - support variable indirection. - """ - return False - - def __str__(self): - return self.getname() - - def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): - return "{"+self._set.getname(fully_qualified, name_buffer, relative_to)+"}" - - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): - return self.name - - def set_value(self, value): - # It might be nice to check if the value is valid for the base - # set, but things are tricky when the base set is not dimention - # 1. So, for the time being, we will just "trust" the user. - self._value = value - - -class ReplaceTemplateExpression(EXPR.ExpressionReplacementVisitor): - - def __init__(self, substituter, *args): - super(ReplaceTemplateExpression, self).__init__() - self.substituter = substituter - self.substituter_args = args - - def visiting_potential_leaf(self, node): - if type(node) is EXPR.GetItemExpression or type(node) is IndexTemplate: - return True, self.substituter(node, *self.substituter_args) - - return super( - ReplaceTemplateExpression, self).visiting_potential_leaf(node) - - -def substitute_template_expression(expr, substituter, *args): - """Substitute IndexTemplates in an expression tree. - - This is a general utility function for walking the expression tree - and subtituting all occurances of IndexTemplate and - _GetItemExpression nodes. - - Args: - substituter: method taking (expression, *args) and returning - the new object - *args: these are passed directly to the substituter - - Returns: - a new expression tree with all substitutions done - """ - visitor = ReplaceTemplateExpression(substituter, *args) - return visitor.dfs_postorder_stack(expr) - - -class _GetItemIndexer(object): - # Note that this class makes the assumption that only one template - # ever appears in an expression for a single index - - def __init__(self, expr): - self._base = expr._base - self._args = [] - _hash = [ id(self._base) ] - for x in expr.args: - try: - logging.disable(logging.CRITICAL) - val = value(x) - self._args.append(val) - _hash.append(val) - except TemplateExpressionError as e: - if x is not e.template: - raise TypeError( - "Cannot use the param substituter with expression " - "templates\nwhere the component index has the " - "IndexTemplate in an expression.\n\tFound in %s" - % ( expr, )) - self._args.append(e.template) - _hash.append(id(e.template._set)) - finally: - logging.disable(logging.NOTSET) - - self._hash = tuple(_hash) - - def nargs(self): - return len(self._args) - - def arg(self, i): - return self._args[i] - - def __hash__(self): - return hash(self._hash) - - def __eq__(self, other): - if type(other) is _GetItemIndexer: - return self._hash == other._hash - else: - return False - - def __str__(self): - return "%s[%s]" % ( - self._base.name, ','.join(str(x) for x in self._args) ) - - -def substitute_getitem_with_param(expr, _map): - """A simple substituter to replace _GetItem nodes with mutable Params. - - This substituter will replace all _GetItemExpression nodes with a - new Param. For example, this method will create expressions - suitable for passing to DAE integrators - """ - if type(expr) is IndexTemplate: - return expr - - _id = _GetItemIndexer(expr) - if _id not in _map: - _map[_id] = pyomo.core.base.param.Param(mutable=True) - _map[_id].construct() - _args = [] - _map[_id]._name = "%s[%s]" % ( - expr._base.name, ','.join(str(x) for x in _id._args) ) - return _map[_id] - - -def substitute_template_with_value(expr): - """A simple substituter to expand expression for current template - - This substituter will replace all _GetItemExpression / IndexTemplate - nodes with the actual _ComponentData based on the current value of - the IndexTamplate(s) - - """ - - if type(expr) is IndexTemplate: - return as_numeric(expr()) - else: - return expr.resolve_template() +from pyomo.common.deprecation import deprecation_warning +deprecation_warning( + 'The pyomo.core.base.template_expr module is deprecated. ' + 'Import expression template objects from pyomo.core.expr.template_expr.', + version='TBD') diff --git a/pyomo/core/base/units_container.py b/pyomo/core/base/units_container.py index d5bc3a01888..627f6c207d2 100644 --- a/pyomo/core/base/units_container.py +++ b/pyomo/core/base/units_container.py @@ -33,22 +33,23 @@ be used directly in expressions (e.g., defining constraints). You can also verify that the units are consistent on a model, or on individual components like the objective function, constraint, or expression using -`assert_units_consistent`. There are other methods that may be helpful -for verifying correct units on a model. +`assert_units_consistent` (from pyomo.util.check_units). +There are other methods there that may be helpful for verifying correct units on a model. .. doctest:: >>> from pyomo.environ import ConcreteModel, Var, Objective >>> from pyomo.environ import units as u + >>> from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent, check_units_equivalent >>> model = ConcreteModel() >>> model.acc = Var(initialize=5.0, units=u.m/u.s**2) >>> model.obj = Objective(expr=(model.acc - 9.81*u.m/u.s**2)**2) - >>> u.assert_units_consistent(model.obj) # raise exc if units invalid on obj - >>> u.assert_units_consistent(model) # raise exc if units invalid anywhere on the model - >>> u.assert_units_equivalent(model.obj.expr, u.m**2/u.s**4) # raise exc if units not equivalent + >>> assert_units_consistent(model.obj) # raise exc if units invalid on obj + >>> assert_units_consistent(model) # raise exc if units invalid anywhere on the model + >>> assert_units_equivalent(model.obj.expr, u.m**2/u.s**4) # raise exc if units not equivalent >>> print(u.get_units(model.obj.expr)) # print the units on the objective m ** 2 / s ** 4 - >>> print(u.check_units_equivalent(model.acc.get_units(), u.m/u.s**2)) + >>> print(check_units_equivalent(model.acc, u.m/u.s**2)) True The implementation is currently based on the `pint @@ -108,14 +109,7 @@ from pyomo.common.dependencies import attempt_import from pyomo.core.expr.numvalue import NumericValue, nonpyomo_leaf_types, value, native_numeric_types -from pyomo.core.base.constraint import Constraint -from pyomo.core.base.objective import Objective -from pyomo.core.base.block import Block, SubclassOf -from pyomo.core.base.expression import Expression -from pyomo.core.base.var import _VarData -from pyomo.core.base.param import _ParamData -from pyomo.core.base.external import ExternalFunction -from pyomo.core.base.template_expr import IndexTemplate +from pyomo.core.expr.template_expr import IndexTemplate from pyomo.core.expr import current as EXPR pint_module, pint_available = attempt_import( @@ -394,7 +388,7 @@ def pprint(self, ostream=None, verbose=False): # ostream.write('{:!~s}'.format(self._pint_unit)) -class _UnitExtractionVisitor(EXPR.StreamBasedExpressionVisitor): +class UnitExtractionVisitor(EXPR.StreamBasedExpressionVisitor): def __init__(self, pyomo_units_container, units_equivalence_tolerance=1e-12): """ Visitor class used to determine units of an expression. Do not use @@ -423,7 +417,7 @@ def __init__(self, pyomo_units_container, units_equivalence_tolerance=1e-12): particular method that should be called to return the units of the node based on the units of its child arguments. This map is used in exitNode. """ - super(_UnitExtractionVisitor, self).__init__() + super(UnitExtractionVisitor, self).__init__() self._pyomo_units_container = pyomo_units_container self._pint_registry = self._pyomo_units_container._pint_registry self._units_equivalence_tolerance = units_equivalence_tolerance @@ -767,12 +761,12 @@ def _get_unit_for_single_child(self, node, list_of_unit_tuples): pyomo_unit, pint_unit = list_of_unit_tuples[0] return (pyomo_unit, pint_unit) - def _get_units_with_dimensionless_children(self, node, list_of_unit_tuples): + def _get_units_ExternalFunction(self, node, list_of_unit_tuples): """ - Check to make sure that any child arguments are unitless / - dimensionless and return the value from node.get_units() This + Check to make sure that any child arguments are consistent with + arg_units return the value from node.get_units() This was written for ExternalFunctionExpression where the external - function has units assigned to its return value. + function has units assigned to its return value and arguments Parameters ---------- @@ -788,9 +782,18 @@ def _get_units_with_dimensionless_children(self, node, list_of_unit_tuples): : tuple (pyomo_unit, pint_unit) """ - for (pyomo_unit, pint_unit) in list_of_unit_tuples: - if not self._pint_unit_equivalent_to_dimensionless(pint_unit): - raise UnitsError('Expected no units or dimensionless units in {}, but found {}.'.format(str(node), str(pyomo_unit))) + # get the list of arg_units + arg_units = node.get_arg_units() + if arg_units is None: + # they should all be dimensionless + arg_units = [None]*len(list_of_unit_tuples) + + for (arg_unit, unit_tuple) in zip(arg_units, list_of_unit_tuples): + pyomo_arg_unit, pint_arg_unit = self._pyomo_units_container._get_units_tuple(arg_unit) + pint_child_unit = unit_tuple[1] + print(pint_arg_unit, pint_child_unit) + if not self._pint_units_equivalent(pint_arg_unit, pint_child_unit): + raise InconsistentUnitsError(arg_unit, unit_tuple[0], 'Inconsistent units found in ExternalFunction.') # now return the units in node.get_units return self._pyomo_units_container._get_units_tuple(node.get_units()) @@ -1034,8 +1037,8 @@ def _get_unit_sqrt(self, node, list_of_unit_tuples): EXPR.Expr_ifExpression: _get_unit_for_expr_if, IndexTemplate: _get_dimensionless_no_children, EXPR.GetItemExpression: _get_dimensionless_with_dimensionless_children, - EXPR.ExternalFunctionExpression: _get_units_with_dimensionless_children, - EXPR.NPV_ExternalFunctionExpression: _get_units_with_dimensionless_children, + EXPR.ExternalFunctionExpression: _get_units_ExternalFunction, + EXPR.NPV_ExternalFunctionExpression: _get_units_ExternalFunction, EXPR.LinearExpression: _get_unit_for_linear_expression } @@ -1307,13 +1310,21 @@ def _get_units_tuple(self, expr): ------- : tuple (PyomoUnit, pint unit) """ - pyomo_unit, pint_unit = _UnitExtractionVisitor(self).walk_expression(expr=expr) + if expr is None: + return (None, None) + pyomo_unit, pint_unit = UnitExtractionVisitor(self).walk_expression(expr=expr) + if pint_unit == self._pint_registry.dimensionless: + pint_unit = None + if pyomo_unit is self.dimensionless: + pyomo_unit = None + if pint_unit is not None: assert pyomo_unit is not None if type(pint_unit) != type(self._pint_registry.kg): pint_unit = pint_unit.units return (_PyomoUnit(pint_unit, self._pint_registry), pint_unit) + return (None, None) def get_units(self, expr): @@ -1408,28 +1419,17 @@ def convert(self, src, to_units=None): """ src_pyomo_unit, src_pint_unit = self._get_units_tuple(src) to_pyomo_unit, to_pint_unit = self._get_units_tuple(to_units) - - # check if any units have offset - # CDL: This is no longer necessary since we don't allow - # offset units, but let's keep the code in case we change - # our mind about offset units - # src_unit_container = pint.util.to_units_container(src_unit, self._pint_ureg) - # dest_unit_container = pint.util.to_units_container(dest_unit, self._pint_ureg) - # src_offset_units = [(u, e) for u, e in src_unit_container.items() - # if not self._pint_ureg._units[u].is_multiplicative] - # - # dest_offset_units = [(u, e) for u, e in dest_unit_container.items() - # if not self._pint_ureg._units[u].is_multiplicative] - - # if len(src_offset_units) + len(dest_offset_units) != 0: - # raise UnitsError('Offset unit detected in call to convert. Offset units are not supported at this time.') + + if src_pyomo_unit is None and to_pyomo_unit is None: + return src # no offsets, we only need a factor to convert between the two fac_b_src, base_units_src = self._pint_registry.get_base_units(src_pint_unit, check_nonmult=True) fac_b_dest, base_units_dest = self._pint_registry.get_base_units(to_pint_unit, check_nonmult=True) if base_units_src != base_units_dest: - raise UnitsError('Cannot convert {0:s} to {1:s}. Units are not compatible.'.format(str(src_pyomo_unit), str(to_pyomo_unit))) + raise InconsistentUnitsError(src_pint_unit, to_pint_unit, + 'Error in convert: units not compatible.') return fac_b_src/fac_b_dest*to_pyomo_unit/src_pyomo_unit*src @@ -1474,136 +1474,6 @@ def convert_value(self, num_value, from_units=None, to_units=None): dest_quantity = src_quantity.to(to_pint_unit) return dest_quantity.magnitude - def _assert_units_consistent_constraint_data(self, condata): - """ - Raise an exception if the any units in lower, body, upper on a - ConstraintData object are not consistent or are not equivalent - with each other. - """ - if condata.equality: - if condata.lower == 0.0: - # Pyomo can rearrange expressions, resulting in a value - # of 0 for the RHS that does not have units associated - # Therefore, if the RHS is 0, we allow it to be unitless - # and check the consistency of the body only - # ToDo: If we modify the constraint to keep the original - # expression, we should verify against that instead - assert condata.upper == 0.0 - self._assert_units_consistent_expression(condata.body) - else: - self.assert_units_equivalent(condata.lower, condata.body) - else: - self.assert_units_equivalent(condata.lower, condata.body, condata.upper) - - def _assert_units_consistent_expression(self, expr): - """ - Raise an exception if any units in expr are inconsistent. - - Parameters - ---------- - expr : Pyomo expression - The source expression to check. - - Raises - ------ - :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError` - - """ - # this call will raise an error if an inconsistency is found - pyomo_unit, pint_unit = self._get_units_tuple(expr=expr) - - def check_units_equivalent(self, *args): - """ - Returns True if the units associated with each of the - expressions passed as arguments are all equivalent (and False - otherwise). - - Note that this method will raise an exception if the units are - inconsistent within an expression (since the units for that - expression are not valid). - - Parameters - ---------- - args : an argument list of Pyomo expressions - - Returns - ------- - bool : True if all the expressions passed as argments have the same units - """ - pyomo_unit_compare, pint_unit_compare = self._get_units_tuple(args[0]) - for expr in args[1:]: - pyomo_unit, pint_unit = self._get_units_tuple(expr) - if not _UnitExtractionVisitor(self)._pint_units_equivalent(pint_unit_compare, pint_unit): - return False - # made it through all of them successfully - return True - - def assert_units_equivalent(self, *args): - """ - Raise an exception if the units are inconsistent within an - expression, or not equivalent across all the passed - expressions. - - Parameters - ---------- - args : an argument list of Pyomo expressions - The Pyomo expressions to test - - Raises - ------ - :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError` - """ - # this call will raise an exception if an inconsistency is found - pyomo_unit_compare, pint_unit_compare = self._get_units_tuple(args[0]) - for expr in args[1:]: - # this call will raise an exception if an inconsistency is found - pyomo_unit, pint_unit = self._get_units_tuple(expr) - if not _UnitExtractionVisitor(self)._pint_units_equivalent(pint_unit_compare, pint_unit): - raise UnitsError("Units between {} and {} are not consistent.".format(str(pyomo_unit_compare), str(pyomo_unit))) - - def assert_units_consistent(self, obj): - """ - This method raises an exception if the units are not - consistent on the passed in object. Argument obj can be one - of the following components: Pyomo Block (or Model), - Constraint, Objective, Expression, or it can be a Pyomo - expression object - - Parameters - ---------- - obj : Pyomo component (Block, Model, Constraint, Objective, or Expression) or Pyomo expression - The object or expression to test - - Raises - ------ - :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError` - """ - if isinstance(obj, Block): - # check all the constraints, objectives, and Expression objects - for cdata in obj.component_data_objects(ctype=SubclassOf(Constraint), descend_into=True): - self._assert_units_consistent_constraint_data(cdata) - - for data in obj.component_data_objects(ctype=(SubclassOf(Objective), SubclassOf(Expression)), descend_into=True): - self._assert_units_consistent_expression(data.expr) - - elif isinstance(obj, Constraint): - if obj.is_indexed(): - for cdata in obj.values(): - self._assert_units_consistent_constraint_data(cdata) - else: - self._assert_units_consistent_constraint_data(obj) - - elif isinstance(obj, Objective) or isinstance(obj, Expression): - if obj.is_indexed(): - for data in obj.values(): - self._assert_units_consistent_expression(data.expr) - else: - self._assert_units_consistent_expression(obj.expr) - else: - # doesn't appear to be one of the components: Block, Constraint, Objective, or Expression - # therefore, let's just check the units of the object itself - self._assert_units_consistent_expression(obj) - class DeferredUnitsSingleton(PyomoUnitsContainer): """A class supporting deferred interrogation of pint_available. diff --git a/pyomo/core/base/var.py b/pyomo/core/base/var.py index 824bf2fffb3..29470d6ed4b 100644 --- a/pyomo/core/base/var.py +++ b/pyomo/core/base/var.py @@ -148,18 +148,10 @@ def is_constant(self): """Returns False because this is not a constant in an expression.""" return False - def is_parameter_type(self): - """Returns False because this is not a parameter object.""" - return False - def is_variable_type(self): """Returns True because this is a variable.""" return True - def is_expression_type(self): - """Returns False because this is not an expression""" - return False - def is_potentially_variable(self): """Returns True because this is a variable.""" return True @@ -561,10 +553,6 @@ def __init__(self, *args, **kwd): elif bounds is not None: raise ValueError("Variable 'bounds' keyword must be a tuple or function") - def is_expression_type(self): - """Returns False because this is not an expression""" - return False - def flag_as_stale(self): """ Set the 'stale' attribute of every variable data object to True. diff --git a/pyomo/core/expr/calculus/diff_with_pyomo.py b/pyomo/core/expr/calculus/diff_with_pyomo.py index 9234975c13d..af6063b463e 100644 --- a/pyomo/core/expr/calculus/diff_with_pyomo.py +++ b/pyomo/core/expr/calculus/diff_with_pyomo.py @@ -299,6 +299,22 @@ def _diff_UnaryFunctionExpression(node, val_dict, der_dict): raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node))) +def _diff_ExternalFunctionExpression(node, val_dict, der_dict): + """ + + Parameters + ---------- + node: pyomo.core.expr.numeric_expr.ProductExpression + val_dict: ComponentMap + der_dict: ComponentMap + """ + der = der_dict[node] + vals = tuple(val_dict[i] for i in node.args) + derivs = node._fcn.evaluate_fgh(vals)[1] + for ndx, arg in enumerate(node.args): + der_dict[arg] += der * derivs[ndx] + + _diff_map = dict() _diff_map[_expr.ProductExpression] = _diff_ProductExpression _diff_map[_expr.DivisionExpression] = _diff_DivisionExpression @@ -308,6 +324,50 @@ def _diff_UnaryFunctionExpression(node, val_dict, der_dict): _diff_map[_expr.MonomialTermExpression] = _diff_ProductExpression _diff_map[_expr.NegationExpression] = _diff_NegationExpression _diff_map[_expr.UnaryFunctionExpression] = _diff_UnaryFunctionExpression +_diff_map[_expr.ExternalFunctionExpression] = _diff_ExternalFunctionExpression + + +class _NamedExpressionCollector(ExpressionValueVisitor): + def __init__(self): + self.named_expressions = list() + + def visit(self, node, values): + return None + + def visiting_potential_leaf(self, node): + if node.__class__ in nonpyomo_leaf_types: + return True, None + + if not node.is_expression_type(): + return True, None + + if node.is_named_expression_type(): + self.named_expressions.append(node) + return False, None + + return False, None + + +def _collect_ordered_named_expressions(expr): + """ + The purpose of this function is to collect named expressions in a + particular order. The order is very important. In the resulting + list each named expression can only appear once, and any named + expressions that are used in other named expressions have to come + after the named expression that use them. + """ + visitor = _NamedExpressionCollector() + visitor.dfs_postorder_stack(expr) + named_expressions = visitor.named_expressions + seen = set() + res = list() + for e in reversed(named_expressions): + if id(e) in seen: + continue + seen.add(id(e)) + res.append(e) + res = list(reversed(res)) + return res class _ReverseADVisitorLeafToRoot(ExpressionValueVisitor): @@ -364,16 +424,15 @@ def visiting_potential_leaf(self, node): if not node.is_expression_type(): return True, None + if node.is_named_expression_type(): + return True, None + if node.__class__ in _diff_map: _diff_map[node.__class__](node, self.val_dict, self.der_dict) - elif node.is_named_expression_type(): - der = self.der_dict[node] - self.der_dict[node.expr] += der + return False, None else: raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node))) - return False, None - def reverse_ad(expr): """ @@ -395,9 +454,13 @@ def reverse_ad(expr): visitorA = _ReverseADVisitorLeafToRoot(val_dict, der_dict) visitorA.dfs_postorder_stack(expr) + named_expressions = _collect_ordered_named_expressions(expr) der_dict[expr] = 1 visitorB = _ReverseADVisitorRootToLeaf(val_dict, der_dict) visitorB.dfs_postorder_stack(expr) + for named_expr in named_expressions: + der_dict[named_expr.expr] = der_dict[named_expr] + visitorB.dfs_postorder_stack(named_expr.expr) return der_dict @@ -456,16 +519,15 @@ def visiting_potential_leaf(self, node): if not node.is_expression_type(): return True, None + if node.is_named_expression_type(): + return True, None + if node.__class__ in _diff_map: _diff_map[node.__class__](node, self.val_dict, self.der_dict) - elif node.is_named_expression_type(): - der = self.der_dict[node] - self.der_dict[node.expr] += der + return False, None else: raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node))) - return False, None - def reverse_sd(expr): """ @@ -487,10 +549,12 @@ def reverse_sd(expr): visitorA = _ReverseSDVisitorLeafToRoot(val_dict, der_dict) visitorA.dfs_postorder_stack(expr) + named_expressions = _collect_ordered_named_expressions(expr) der_dict[expr] = 1 visitorB = _ReverseSDVisitorRootToLeaf(val_dict, der_dict) visitorB.dfs_postorder_stack(expr) + for named_expr in named_expressions: + der_dict[named_expr.expr] = der_dict[named_expr] + visitorB.dfs_postorder_stack(named_expr.expr) return der_dict - - diff --git a/pyomo/core/expr/current.py b/pyomo/core/expr/current.py index 874f79b06fb..050561366fd 100755 --- a/pyomo/core/expr/current.py +++ b/pyomo/core/expr/current.py @@ -43,6 +43,7 @@ class Mode(object): _generate_relational_expression, _chainedInequality, ) + from pyomo.core.expr.template_expr import * from pyomo.core.expr import visitor as _visitor from pyomo.core.expr.visitor import * # FIXME: we shouldn't need circular dependencies between modules diff --git a/pyomo/core/expr/logical_expr.py b/pyomo/core/expr/logical_expr.py index 1ef5d41aa45..d2844850dca 100644 --- a/pyomo/core/expr/logical_expr.py +++ b/pyomo/core/expr/logical_expr.py @@ -33,7 +33,7 @@ ) from .numeric_expr import _LinearOperatorExpression, _process_arg -if _using_chained_inequality: #pragma: no cover +if _using_chained_inequality: class _chainedInequality(object): prev = None @@ -70,7 +70,7 @@ def error_message(msg=None): if value(expression <= 5): """ % args -else: #pragma: no cover +else: _chainedInequality = None @@ -185,7 +185,7 @@ def __getstate__(self): return state def __nonzero__(self): - if _using_chained_inequality and not self.is_constant(): #pragma: no cover + if _using_chained_inequality and not self.is_constant(): deprecation_warning("Chained inequalities are deprecated. " "Use the inequality() function to " "express ranged inequality expressions.") # Remove in Pyomo 6.0 @@ -313,7 +313,7 @@ def is_potentially_variable(self): if _using_chained_inequality: - def _generate_relational_expression(etype, lhs, rhs): #pragma: no cover + def _generate_relational_expression(etype, lhs, rhs): # We cannot trust Python not to recycle ID's for temporary POD data # (e.g., floats). So, if it is a "native" type, we will record the # value, otherwise we will record the ID. The tuple for native @@ -406,7 +406,7 @@ def _generate_relational_expression(etype, lhs, rhs): #pragma: no elif etype == _lt: strict = True else: - raise ValueError("Unknown relational expression type '%s'" % etype) #pragma: no cover + raise ValueError("Unknown relational expression type '%s'" % etype) if lhs_is_relational: if lhs.__class__ is InequalityExpression: if rhs_is_relational: @@ -435,7 +435,7 @@ def _generate_relational_expression(etype, lhs, rhs): #pragma: no else: - def _generate_relational_expression(etype, lhs, rhs): #pragma: no cover + def _generate_relational_expression(etype, lhs, rhs): rhs_is_relational = False lhs_is_relational = False @@ -472,7 +472,7 @@ def _generate_relational_expression(etype, lhs, rhs): #pragma: no elif etype == _lt: strict = True else: - raise ValueError("Unknown relational expression type '%s'" % etype) #pragma: no cover + raise ValueError("Unknown relational expression type '%s'" % etype) if lhs_is_relational: if lhs.__class__ is InequalityExpression: if rhs_is_relational: diff --git a/pyomo/core/expr/numeric_expr.py b/pyomo/core/expr/numeric_expr.py index c90778b30a1..418061dad2c 100644 --- a/pyomo/core/expr/numeric_expr.py +++ b/pyomo/core/expr/numeric_expr.py @@ -19,6 +19,7 @@ from pyutilib.math.util import isclose from pyomo.common.deprecation import deprecated +from pyomo.common.errors import DeveloperError from .expr_common import ( _add, _sub, _mul, _div, @@ -629,10 +630,7 @@ def getname(self, *args, **kwds): #pragma: no cover return self._fcn.getname(*args, **kwds) def _compute_polynomial_degree(self, result): - # If the expression is constant, then - # this is detected earlier. Hence, we can safely - # return None. - return None + return 0 if all(arg == 0 for arg in result) else None def _apply_operation(self, result): return self._fcn.evaluate( result ) @@ -640,8 +638,12 @@ def _apply_operation(self, result): def _to_string(self, values, verbose, smap, compute_values): return "{0}({1})".format(self.getname(), ", ".join(values)) + def get_arg_units(self): + """ Return the units for this external functions arguments """ + return self._fcn.get_arg_units() + def get_units(self): - """ Return the units for this external function expression """ + """ Get the units of the return value for this external function """ return self._fcn.get_units() class NPV_ExternalFunctionExpression(ExternalFunctionExpression): @@ -1061,85 +1063,6 @@ def add(self, new_arg): return self -class GetItemExpression(ExpressionBase): - """ - Expression to call :func:`__getitem__` on the base object. - """ - __slots__ = ('_base',) - PRECEDENCE = 1 - - def _precedence(self): #pragma: no cover - return GetItemExpression.PRECEDENCE - - def __init__(self, args, base=None): - """Construct an expression with an operation and a set of arguments""" - self._args_ = args - self._base = base - - def nargs(self): - return len(self._args_) - - def create_node_with_local_data(self, args): - return self.__class__(args, self._base) - - def __getstate__(self): - state = super(GetItemExpression, self).__getstate__() - for i in GetItemExpression.__slots__: - state[i] = getattr(self, i) - return state - - def getname(self, *args, **kwds): - return self._base.getname(*args, **kwds) - - def is_potentially_variable(self): - if any(arg.is_potentially_variable() for arg in self._args_ - if arg.__class__ not in nonpyomo_leaf_types): - return True - for x in itervalues(self._base): - if x.__class__ not in nonpyomo_leaf_types \ - and x.is_potentially_variable(): - return True - return False - - def is_fixed(self): - if any(self._args_): - for x in itervalues(self._base): - if not x.__class__ in nonpyomo_leaf_types and not x.is_fixed(): - return False - return True - - def _is_fixed(self, values): - for x in itervalues(self._base): - if not x.__class__ in nonpyomo_leaf_types and not x.is_fixed(): - return False - return True - - def _compute_polynomial_degree(self, result): # TODO: coverage - if any(x != 0 for x in result): - return None - ans = 0 - for x in itervalues(self._base): - if x.__class__ in nonpyomo_leaf_types: - continue - tmp = x.polynomial_degree() - if tmp is None: - return None - elif tmp > ans: - ans = tmp - return ans - - def _apply_operation(self, result): # TODO: coverage - return value(self._base.__getitem__( tuple(result) )) - - def _to_string(self, values, verbose, smap, compute_values): - if verbose: - return "{0}({1})".format(self.getname(), values[0]) - return "%s%s" % (self.getname(), values[0]) - - def resolve_template(self): # TODO: coverage - return self._base.__getitem__(tuple(value(i) for i in self._args_)) - - class Expr_ifExpression(ExpressionBase): """ A logical if-then-else expression:: @@ -1181,11 +1104,13 @@ def getname(self, *args, **kwds): def _is_fixed(self, args): assert(len(args) == 3) - if args[0]: #self._if.is_constant(): + if args[0]: # self._if.is_fixed(): + if args[1] and args[2]: + return True if value(self._if): - return args[1] #self._then.is_constant() + return args[1] # self._then.is_fixed() else: - return args[2] #self._else.is_constant() + return args[2] # self._else.is_fixed() else: return False @@ -1207,6 +1132,8 @@ def is_potentially_variable(self): def _compute_polynomial_degree(self, result): _if, _then, _else = result if _if == 0: + if _then == _else: + return _then try: return _then if value(self._if) else _else except ValueError: @@ -1366,18 +1293,16 @@ def getname(self, *args, **kwds): return 'sum' def _compute_polynomial_degree(self, result): - return 1 if len(self.linear_vars) > 0 else 0 + return 1 if not self.is_fixed() else 0 def is_constant(self): return len(self.linear_vars) == 0 + def _is_fixed(self, values=None): + return all(v.fixed for v in self.linear_vars) + def is_fixed(self): - if len(self.linear_vars) == 0: - return True - for v in self.linear_vars: - if not v.fixed: - return False - return True + return self._is_fixed() def _to_string(self, values, verbose, smap, compute_values): tmp = [] @@ -1652,23 +1577,20 @@ def _decompose_linear_terms(expr, multiplier=1): def _process_arg(obj): - try: - if obj.is_parameter_type() and not obj._component()._mutable and obj._constructed: - # Return the value of an immutable SimpleParam or ParamData object - return obj() - - elif obj.__class__ is NumericConstant: - return obj.value - - return obj - except AttributeError: - if obj.is_indexed(): - raise TypeError( - "Argument for expression is an indexed numeric " - "value\nspecified without an index:\n\t%s\nIs this " - "value defined over an index that you did not specify?" - % (obj.name, ) ) - raise + # Note: caller is responsible for filtering out native types and + # expressions. + if obj.is_numeric_type() and obj.is_constant(): + # Resolve constants (e.g., immutable scalar Params & NumericConstants) + return value(obj) + # User assistance: provide a helpful exception when using an indexed + # object in an expression + if obj.is_component_type() and obj.is_indexed(): + raise TypeError( + "Argument for expression is an indexed numeric " + "value\nspecified without an index:\n\t%s\nIs this " + "value defined over an index that you did not specify?" + % (obj.name, ) ) + return obj #@profile diff --git a/pyomo/core/expr/numvalue.py b/pyomo/core/expr/numvalue.py index ea56cc4963c..250410dcbe7 100644 --- a/pyomo/core/expr/numvalue.py +++ b/pyomo/core/expr/numvalue.py @@ -24,6 +24,7 @@ _iadd, _isub, _imul, _idiv, _ipow, _lt, _le, _eq) +from pyomo.core.pyomoobject import PyomoObject from pyomo.core.expr.expr_errors import TemplateExpressionError logger = logging.getLogger('pyomo.core') @@ -108,7 +109,7 @@ def __setstate__(self, state): #: like numpy. #: #: :data:`native_types` = :data:`native_numeric_types ` + { str } -native_types = set([ bool, str, type(None) ]) +native_types = set([ bool, str, type(None), slice ]) if PY3: native_types.add(bytes) native_boolean_types.add(bytes) @@ -532,7 +533,7 @@ def check_if_numeric_type_and_cache(obj): return retval -class NumericValue(object): +class NumericValue(PyomoObject): """ This is the base class for numeric values used in Pyomo. """ @@ -614,6 +615,10 @@ def cname(self, *args, **kwds): "DEPRECATED: The cname() method has been renamed to getname()." ) return self.getname(*args, **kwds) + def is_numeric_type(self): + """Return True if this class is a Pyomo numeric object""" + return True + def is_constant(self): """Return True if this numeric value is a constant value""" return False @@ -622,28 +627,8 @@ def is_fixed(self): """Return True if this is a non-constant value that has been fixed""" return False - def is_parameter_type(self): - """Return False unless this class is a parameter object""" - return False - - def is_variable_type(self): - """Return False unless this class is a variable object""" - return False - def is_potentially_variable(self): """Return True if variables can appear in this expression""" - return True - - def is_named_expression_type(self): - """Return True if this numeric value is a named expression""" - return False - - def is_expression_type(self): - """Return True if this numeric value is an expression""" - return False - - def is_component_type(self): - """Return True if this class is a Pyomo component""" return False def is_relational(self): @@ -1024,9 +1009,6 @@ def is_constant(self): def is_fixed(self): return True - def is_potentially_variable(self): - return False - def _compute_polynomial_degree(self, result): return 0 diff --git a/pyomo/core/expr/sympy_tools.py b/pyomo/core/expr/sympy_tools.py index 2a831b6324a..cae5a0fff5d 100644 --- a/pyomo/core/expr/sympy_tools.py +++ b/pyomo/core/expr/sympy_tools.py @@ -139,9 +139,13 @@ def sympyVars(self): class Pyomo2SympyVisitor(EXPR.StreamBasedExpressionVisitor): def __init__(self, object_map): + sympy.Add # this ensures _configure_sympy gets run super(Pyomo2SympyVisitor, self).__init__() self.object_map = object_map + def initializeWalker(self, expr): + return self.beforeChild(None, expr, None) + def exitNode(self, node, values): if node.__class__ is EXPR.UnaryFunctionExpression: return _functionMap[node._name](values[0]) @@ -151,7 +155,7 @@ def exitNode(self, node, values): else: return _op(*tuple(values)) - def beforeChild(self, node, child): + def beforeChild(self, node, child, child_idx): # # Don't replace native or sympy types # @@ -175,9 +179,13 @@ def beforeChild(self, node, child): class Sympy2PyomoVisitor(EXPR.StreamBasedExpressionVisitor): def __init__(self, object_map): + sympy.Add # this ensures _configure_sympy gets run super(Sympy2PyomoVisitor, self).__init__() self.object_map = object_map + def initializeWalker(self, expr): + return self.beforeChild(None, expr, None) + def enterNode(self, node): return (node._args, []) @@ -191,7 +199,7 @@ def exitNode(self, node, values): "map" % type(_sympyOp) ) return _op(*tuple(values)) - def beforeChild(self, node, child): + def beforeChild(self, node, child, child_idx): if not child._args: item = self.object_map.getPyomoSymbol(child, None) if item is None: @@ -206,16 +214,9 @@ def sympyify_expression(expr): # object_map = PyomoSympyBimap() visitor = Pyomo2SympyVisitor(object_map) - is_expr, ans = visitor.beforeChild(None, expr) - if not is_expr: - return object_map, ans - return object_map, visitor.walk_expression(expr) def sympy2pyomo_expression(expr, object_map): visitor = Sympy2PyomoVisitor(object_map) - is_expr, ans = visitor.beforeChild(None, expr) - if not is_expr: - return ans return visitor.walk_expression(expr) diff --git a/pyomo/core/expr/template_expr.py b/pyomo/core/expr/template_expr.py new file mode 100644 index 00000000000..04a01e514a7 --- /dev/null +++ b/pyomo/core/expr/template_expr.py @@ -0,0 +1,790 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import copy +import itertools +import logging +import sys +from six import iteritems, itervalues +from six.moves import builtins + +from pyomo.core.expr.expr_errors import TemplateExpressionError +from pyomo.core.expr.numvalue import ( + NumericValue, native_numeric_types, native_types, nonpyomo_leaf_types, + as_numeric, value, +) +from pyomo.core.expr.numeric_expr import ExpressionBase, SumExpression +from pyomo.core.expr.visitor import ( + ExpressionReplacementVisitor, StreamBasedExpressionVisitor +) + +logger = logging.getLogger(__name__) + +class _NotSpecified(object): pass + +class GetItemExpression(ExpressionBase): + """ + Expression to call :func:`__getitem__` on the base object. + """ + PRECEDENCE = 1 + + def _precedence(self): + return GetItemExpression.PRECEDENCE + + def __init__(self, args): + """Construct an expression with an operation and a set of arguments""" + self._args_ = args + + def nargs(self): + return len(self._args_) + + def __getattr__(self, attr): + if attr.startswith('__') and attr.endswith('__'): + raise AttributeError() + return GetAttrExpression((self, attr)) + + def __iter__(self): + return iter(value(self)) + + def __len__(self): + return len(value(self)) + + def getname(self, *args, **kwds): + return self._args_[0].getname(*args, **kwds) + + def is_potentially_variable(self): + _false = lambda: False + if any( getattr(arg, 'is_potentially_variable', _false)() + for arg in self._args_ ): + return True + base = self._args_[0] + if base.is_expression_type(): + base = value(base) + # TODO: fix value iteration when generating templates + # + # There is a nasty problem here: we want to iterate over all the + # members of the base and see if *any* of them are potentially + # variable. Unfortunately, this method is called during + # expression generation, and we *could* be generating a + # template. When that occurs, iterating over the base will + # yield a new IndexTemplate (which will in turn raise an + # exception because IndexTemplates are not constant). The real + # solution is probably to re-think how we define + # is_potentially_variable, but for now we will only handle + # members that are explicitly stored in the _data dict. Not + # general (because a Component could implement a non-standard + # storage scheme), but as of now [30 Apr 20], there are no known + # Components where this assumption will cause problems. + return any( getattr(x, 'is_potentially_variable', _false)() + for x in itervalues(getattr(base, '_data', {})) ) + + def _is_fixed(self, values): + if not all(values[1:]): + return False + _true = lambda: True + return all( getattr(x, 'is_fixed', _true)() + for x in itervalues(values[0]) ) + + def _compute_polynomial_degree(self, result): + if any(x != 0 for x in result[1:]): + return None + ans = 0 + for x in itervalues(result[0]): + if x.__class__ in nonpyomo_leaf_types \ + or not hasattr(x, 'polynomial_degree'): + continue + tmp = x.polynomial_degree() + if tmp is None: + return None + elif tmp > ans: + ans = tmp + return ans + + def _apply_operation(self, result): + obj = result[0].__getitem__( tuple(result[1:]) ) + if obj.__class__ in nonpyomo_leaf_types: + return obj + # Note that because it is possible (likely) that the result + # could be an IndexedComponent_slice object, must test "is + # True", as the slice will return a list of values. + if obj.is_numeric_type() is True: + obj = value(obj) + return obj + + def _to_string(self, values, verbose, smap, compute_values): + values = tuple(_[1:-1] if _[0]=='(' and _[-1]==')' else _ + for _ in values) + if verbose: + return "getitem(%s, %s)" % (values[0], ', '.join(values[1:])) + return "%s[%s]" % (values[0], ','.join(values[1:])) + + def _resolve_template(self, args): + return args[0].__getitem__(tuple(args[1:])) + + +class GetAttrExpression(ExpressionBase): + """ + Expression to call :func:`__getattr__` on the base object. + """ + __slots__ = () + PRECEDENCE = 1 + + def _precedence(self): + return GetAttrExpression.PRECEDENCE + + def nargs(self): + return len(self._args_) + + def __getattr__(self, attr): + if attr.startswith('__') and attr.endswith('__'): + raise AttributeError() + return GetAttrExpression((self, attr)) + + def __getitem__(self, *idx): + return GetItemExpression((self,) + idx) + + def __iter__(self): + return iter(value(self)) + + def __len__(self): + return len(value(self)) + + def getname(self, *args, **kwds): + return 'getattr' + + def _compute_polynomial_degree(self, result): + if result[1] != 0: + return None + return result[0] + + def _apply_operation(self, result): + assert len(result) == 2 + obj = getattr(result[0], result[1]) + if obj.__class__ in nonpyomo_leaf_types: + return obj + # Note that because it is possible (likely) that the result + # could be an IndexedComponent_slice object, must test "is + # True", as the slice will return a list of values. + if obj.is_numeric_type() is True: + obj = value(obj) + return obj + + def _to_string(self, values, verbose, smap, compute_values): + assert len(values) == 2 + if verbose: + return "getattr(%s, %s)" % tuple(values) + # Note that the string argument for getattr comes quoted, so we + # need to remove the quotes. + attr = values[1] + if attr[0] in '\"\'' and attr[0] == attr[-1]: + attr = attr[1:-1] + return "%s.%s" % (values[0], attr) + + def _resolve_template(self, args): + return getattr(*tuple(args)) + + +class _TemplateSumExpression_argList(object): + """A virtual list to represent the expanded SumExpression args + + This class implements a "virtual args list" for + TemplateSumExpressions without actually generating the expanded + expression. It can be accessed either in "one-pass" without + generating a list of template argument values (more efficient), or + as a random-access list (where it will have to create the full list + of argument values (less efficient). + + The instance can be used as a context manager to both lock the + IndexTemplate values within this context and to restore their original + values upon exit. + + It is (intentionally) not iterable. + + """ + def __init__(self, TSE): + self._tse = TSE + self._i = 0 + self._init_vals = None + self._iter = self._get_iter() + self._lock = None + + def __len__(self): + return self._tse.nargs() + + def __getitem__(self, i): + if self._i == i: + self._set_iter_vals(next(self._iter)) + self._i += 1 + elif self._i is not None: + # Switch to random-access mode. If we have already + # retrieved one of the indices, then we need to regenerate + # the iterator from scratch. + self._iter = list(self._get_iter() if self._i else self._iter) + self._set_iter_vals(self._iter[i]) + else: + self._set_iter_vals(self._iter[i]) + return self._tse._local_args_[0] + + def __enter__(self): + self._lock = self + self._lock_iters() + + def __exit__(self, exc_type, exc_value, tb): + self._unlock_iters() + self._lock = None + + def _get_iter(self): + # Note: by definition, all _set pointers within an itergroup + # point to the same Set + _sets = tuple(iterGroup[0]._set for iterGroup in self._tse._iters) + return itertools.product(*_sets) + + def _lock_iters(self): + self._init_vals = tuple( + tuple( + it.lock(self._lock) for it in iterGroup + ) for iterGroup in self._tse._iters ) + + def _unlock_iters(self): + self._set_iter_vals(self._init_vals) + for iterGroup in self._tse._iters: + for it in iterGroup: + it.unlock(self._lock) + + def _set_iter_vals(self, val): + for i, iterGroup in enumerate(self._tse._iters): + if len(iterGroup) == 1: + iterGroup[0].set_value(val[i], self._lock) + else: + for j, v in enumerate(val[i]): + iterGroup[j].set_value(v, self._lock) + + +class TemplateSumExpression(ExpressionBase): + """ + Expression to represent an unexpanded sum over one or more sets. + """ + __slots__ = ('_iters', '_local_args_') + PRECEDENCE = 1 + + def _precedence(self): + return TemplateSumExpression.PRECEDENCE + + def __init__(self, args, _iters): + assert len(args) == 1 + self._args_ = args + self._iters = _iters + + def nargs(self): + # Note: by definition, all _set pointers within an itergroup + # point to the same Set + ans = 1 + for iterGroup in self._iters: + ans *= len(iterGroup[0]._set) + return ans + + @property + def args(self): + return _TemplateSumExpression_argList(self) + + @property + def _args_(self): + return _TemplateSumExpression_argList(self) + + @_args_.setter + def _args_(self, args): + self._local_args_ = args + + def create_node_with_local_data(self, args): + return self.__class__(args, self._iters) + + def __getstate__(self): + state = super(TemplateSumExpression, self).__getstate__() + for i in TemplateSumExpression.__slots__: + state[i] = getattr(self, i) + return state + + def getname(self, *args, **kwds): + return "SUM" + + def is_potentially_variable(self): + if any(arg.is_potentially_variable() for arg in self._local_args_ + if arg.__class__ not in nonpyomo_leaf_types): + return True + return False + + def _is_fixed(self, values): + return all(values) + + def _compute_polynomial_degree(self, result): + if None in result: + return None + return result[0] + + def _apply_operation(self, result): + return sum(result) + + def _to_string(self, values, verbose, smap, compute_values): + ans = '' + val = values[0] + if val[0]=='(' and val[-1]==')' and _balanced_parens(val[1:-1]): + val = val[1:-1] + iterStrGenerator = ( + ( ', '.join(str(i) for i in iterGroup), + iterGroup[0]._set.to_string(verbose=verbose) ) + for iterGroup in self._iters + ) + if verbose: + iterStr = ', '.join('iter(%s, %s)' % x for x in iterStrGenerator) + return 'templatesum(%s, %s)' % (val, iterStr) + else: + iterStr = ' '.join('for %s in %s' % x for x in iterStrGenerator) + return 'SUM(%s %s)' % (val, iterStr) + + def _resolve_template(self, args): + return SumExpression(args) + + +class IndexTemplate(NumericValue): + """A "placeholder" for an index value in template expressions. + + This class is a placeholder for an index value within a template + expression. That is, given the expression template for "m.x[i]", + where `m.z` is indexed by `m.I`, the expression tree becomes: + + _GetItem: + - m.x + - IndexTemplate(_set=m.I, _value=None) + + Constructor Arguments: + _set: the Set from which this IndexTemplate can take values + """ + + __slots__ = ('_set', '_value', '_index', '_id', '_lock') + + def __init__(self, _set, index=0, _id=None): + self._set = _set + self._value = _NotSpecified + self._index = index + self._id = _id + self._lock = None + + def __getstate__(self): + """ + This method must be defined because this class uses slots. + """ + state = super(IndexTemplate, self).__getstate__() + for i in IndexTemplate.__slots__: + state[i] = getattr(self, i) + return state + + def __deepcopy__(self, memo): + # Because we leverage deepcopy for expression cloning, we need + # to see if this is a clone operation and *not* copy the + # template. + # + # TODO: JDS: We should consider converting the IndexTemplate to + # a proper Component: that way it could leverage the normal + # logic of using the parent_block scope to dictate the behavior + # of deepcopy. + if '__block_scope__' in memo: + memo[id(self)] = self + return self + # + # "Normal" deepcopying outside the context of pyomo. + # + ans = memo[id(self)] = self.__class__.__new__(self.__class__) + ans.__setstate__(copy.deepcopy(self.__getstate__(), memo)) + return ans + + # Note: because NONE of the slots on this class need to be edited, + # we don't need to implement a specialized __setstate__ method. + + def __call__(self, exception=True): + """ + Return the value of this object. + """ + if self._value is _NotSpecified: + if exception: + raise TemplateExpressionError( + self, "Evaluating uninitialized IndexTemplate (%s)" + % (self,)) + return None + else: + return self._value + + def _resolve_template(self, args): + assert not args + return self() + + def is_fixed(self): + """ + Returns True because this value is fixed. + """ + return True + + def is_constant(self): + """ + Returns False because this cannot immediately be simplified. + """ + return False + + def is_potentially_variable(self): + """Returns False because index values cannot be variables. + + The IndexTemplate represents a placeholder for an index value + for an IndexedComponent, and at the moment, Pyomo does not + support variable indirection. + """ + return False + + def __str__(self): + return self.getname() + + def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): + if self._id is not None: + return "_%s" % (self._id,) + + _set_name = self._set.getname(fully_qualified, name_buffer, relative_to) + if self._index is not None and self._set.dimen != 1: + _set_name += "(%s)" % (self._index,) + return "{"+_set_name+"}" + + def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): + return self.name + + def set_value(self, values=_NotSpecified, lock=None): + # It might be nice to check if the value is valid for the base + # set, but things are tricky when the base set is not dimention + # 1. So, for the time being, we will just "trust" the user. + # After all, the actual Set will raise exceptions if the value + # is not present. + if lock is not self._lock: + raise RuntimeError( + "The TemplateIndex %s is currently locked by %s and " + "cannot be set through lock %s" % (self, self._lock, lock)) + if values is _NotSpecified: + self._value = _NotSpecified + return + if type(values) is not tuple: + values = (values,) + if self._index is not None: + if len(values) == 1: + self._value = values[0] + else: + raise ValueError("Passed multiple values %s to a scalar " + "IndexTemplate %s" % (values, self)) + else: + self._value = values + + def lock(self, lock): + assert self._lock is None + self._lock = lock + return self._value + + def unlock(self, lock): + assert self._lock is lock + self._lock = None + + +def resolve_template(expr): + """Resolve a template into a concrete expression + + This takes a template expression and returns the concrete equivalent + by substituting the current values of all IndexTemplate objects and + resolving (evaluating and removing) all GetItemExpression, + GetAttrExpression, and TemplateSumExpression expression nodes. + + """ + def beforeChild(node, child, child_idx): + # Efficiency: do not decend into leaf nodes. + if type(child) in native_types or not child.is_expression_type(): + if hasattr(child, '_resolve_template'): + return False, child._resolve_template(()) + return False, child + else: + return True, None + + def exitNode(node, args): + if hasattr(node, '_resolve_template'): + return node._resolve_template(args) + if len(args) == node.nargs() and all( + a is b for a,b in zip(node.args, args)): + return node + return node.create_node_with_local_data(args) + + return StreamBasedExpressionVisitor( + initializeWalker=lambda x: beforeChild(None, x, None), + beforeChild=beforeChild, + exitNode=exitNode, + ).walk_expression(expr) + + +class ReplaceTemplateExpression(ExpressionReplacementVisitor): + + def __init__(self, substituter, *args): + super(ReplaceTemplateExpression, self).__init__() + self.substituter = substituter + self.substituter_args = args + + def visiting_potential_leaf(self, node): + if type(node) is GetItemExpression or type(node) is IndexTemplate: + return True, self.substituter(node, *self.substituter_args) + + return super( + ReplaceTemplateExpression, self).visiting_potential_leaf(node) + + +def substitute_template_expression(expr, substituter, *args): + """Substitute IndexTemplates in an expression tree. + + This is a general utility function for walking the expression tree + and subtituting all occurances of IndexTemplate and + _GetItemExpression nodes. + + Args: + substituter: method taking (expression, *args) and returning + the new object + *args: these are passed directly to the substituter + + Returns: + a new expression tree with all substitutions done + """ + visitor = ReplaceTemplateExpression(substituter, *args) + return visitor.dfs_postorder_stack(expr) + + +class _GetItemIndexer(object): + # Note that this class makes the assumption that only one template + # ever appears in an expression for a single index + + def __init__(self, expr): + self._base = expr.arg(0) + self._args = [] + _hash = [ id(self._base) ] + for x in expr.args[1:]: + try: + logging.disable(logging.CRITICAL) + val = value(x) + self._args.append(val) + _hash.append(val) + except TemplateExpressionError as e: + if x is not e.template: + raise TypeError( + "Cannot use the param substituter with expression " + "templates\nwhere the component index has the " + "IndexTemplate in an expression.\n\tFound in %s" + % ( expr, )) + self._args.append(e.template) + _hash.append(id(e.template._set)) + finally: + logging.disable(logging.NOTSET) + + self._hash = tuple(_hash) + + def nargs(self): + return len(self._args) + + def arg(self, i): + return self._args[i] + + @property + def base(self): + return self._base + + @property + def args(self): + return self._args + + def __hash__(self): + return hash(self._hash) + + def __eq__(self, other): + if type(other) is _GetItemIndexer: + return self._hash == other._hash + else: + return False + + def __str__(self): + return "%s[%s]" % ( + self._base.name, ','.join(str(x) for x in self._args) ) + + +def substitute_getitem_with_param(expr, _map): + """A simple substituter to replace _GetItem nodes with mutable Params. + + This substituter will replace all _GetItemExpression nodes with a + new Param. For example, this method will create expressions + suitable for passing to DAE integrators + """ + import pyomo.core.base.param + if type(expr) is IndexTemplate: + return expr + + _id = _GetItemIndexer(expr) + if _id not in _map: + _map[_id] = pyomo.core.base.param.Param(mutable=True) + _map[_id].construct() + _map[_id]._name = "%s[%s]" % ( + _id.base.name, ','.join(str(x) for x in _id.args) ) + return _map[_id] + + +def substitute_template_with_value(expr): + """A simple substituter to expand expression for current template + + This substituter will replace all _GetItemExpression / IndexTemplate + nodes with the actual _ComponentData based on the current value of + the IndexTemplate(s) + + """ + + if type(expr) is IndexTemplate: + return as_numeric(expr()) + else: + return resolve_template(expr) + + +class _set_iterator_template_generator(object): + """Replacement iterator that returns IndexTemplates + + In order to generate template expressions, we hijack the normal Set + iteration mechanisms so that this iterator is returned instead of + the usual iterator. This iterator will return IndexTemplate + object(s) instead of the actual Set items the first time next() is + called. + """ + def __init__(self, _set, context): + self._set = _set + self.context = context + + def __iter__(self): + return self + + def __next__(self): + # Prevent context from ever being called more than once + if self.context is None: + raise StopIteration() + context, self.context = self.context, None + + _set = self._set + d = _set.dimen + if d is None or type(d) is not int: + idx = (IndexTemplate(_set, None, context.next_id()),) + else: + idx = tuple( + IndexTemplate(_set, i, context.next_id()) for i in range(d) + ) + context.cache.append(idx) + if len(idx) == 1: + return idx[0] + else: + return idx + + next = __next__ + +class _template_iter_context(object): + """Manage the iteration context when generating templatized rules + + This class manages the context tracking when generating templatized + rules. It has two methods (`sum_template` and `get_iter`) that + replace standard functions / methods (`sum` and + :py:meth:`_FiniteSetMixin.__iter__`, respectively). It also tracks + unique identifiers for IndexTemplate objects and their groupings + within `sum()` generators. + """ + def __init__(self): + self.cache = [] + self._id = 0 + + def get_iter(self, _set): + return _set_iterator_template_generator(_set, self) + + def npop_cache(self, n): + result = self.cache[-n:] + self.cache[-n:] = [] + return result + + def next_id(self): + self._id += 1 + return self._id + + def sum_template(self, generator): + init_cache = len(self.cache) + expr = next(generator) + final_cache = len(self.cache) + return TemplateSumExpression( + (expr,), self.npop_cache(final_cache-init_cache) + ) + + +def templatize_rule(block, rule, index_set): + import pyomo.core.base.set + context = _template_iter_context() + internal_error = None + _old_iters = ( + pyomo.core.base.set._FiniteSetMixin.__iter__, + GetItemExpression.__iter__, + GetAttrExpression.__iter__, + ) + _old_sum = builtins.sum + try: + # Override Set iteration to return IndexTemplates + pyomo.core.base.set._FiniteSetMixin.__iter__ \ + = GetItemExpression.__iter__ \ + = GetAttrExpression.__iter__ \ + = lambda x: context.get_iter(x).__iter__() + # Override sum with our sum + builtins.sum = context.sum_template + # Get the index templates needed for calling the rule + if index_set is not None: + if not index_set.isfinite(): + raise TemplateExpressionError( + None, + "Cannot templatize rule with non-finite indexing set") + indices = next(iter(index_set)) + try: + context.cache.pop() + except IndexError: + assert indices is None + indices = () + else: + indices = () + if type(indices) is not tuple: + indices = (indices,) + # Call the rule, returning the template expression and the + # top-level IndexTemplate(s) generated when calling the rule. + # + # TBD: Should this just return a "FORALL()" expression node that + # behaves similarly to the GetItemExpression node? + return rule(block, *indices), indices + except: + internal_error = sys.exc_info() + raise + finally: + pyomo.core.base.set._FiniteSetMixin.__iter__, \ + GetItemExpression.__iter__, \ + GetAttrExpression.__iter__ = _old_iters + builtins.sum = _old_sum + if len(context.cache): + if internal_error is not None: + logger.error("The following exception was raised when " + "templatizing the rule '%s':\n\t%s" + % (rule.__name__, internal_error[1])) + raise TemplateExpressionError( + None, + "Explicit iteration (for loops) over Sets is not supported " + "by template expressions. Encountered loop over %s" + % (context.cache[-1][0]._set,)) + return None, indices + + +def templatize_constraint(con): + return templatize_rule(con.parent_block(), con.rule, con.index_set()) diff --git a/pyomo/core/expr/visitor.py b/pyomo/core/expr/visitor.py index e939c506aa8..a1f0bc2b913 100644 --- a/pyomo/core/expr/visitor.py +++ b/pyomo/core/expr/visitor.py @@ -10,10 +10,19 @@ from __future__ import division +import inspect import logging +import six from copy import deepcopy from collections import deque +if six.PY2: + getargspec = inspect.getargspec +else: + # For our needs, getfullargspec is a drop-in replacement for + # getargspec (which was removed in Python 3.x) + getargspec = inspect.getfullargspec + logger = logging.getLogger('pyomo.core') from pyutilib.misc.visitor import SimpleVisitor, ValueVisitor @@ -22,6 +31,7 @@ from .symbol_map import SymbolMap from . import expr_common as common from .expr_errors import TemplateExpressionError +from pyomo.common.deprecation import deprecation_warning from pyomo.core.expr.numvalue import ( nonpyomo_leaf_types, native_numeric_types, @@ -49,6 +59,7 @@ class StreamBasedExpressionVisitor(object): through callback functions as the traversal enters and leaves nodes in the tree: + initializeWalker(expr) -> walk, result enterNode(N1) -> args, data {for N2 in args:} beforeChild(N1, N2) -> descend, child_result @@ -58,10 +69,20 @@ class StreamBasedExpressionVisitor(object): acceptChildResult(N1, data, child_result) -> data afterChild(N1, N2) -> None exitNode(N1, data) -> N1_result + finalizeWalker(result) -> result Individual event callbacks match the following signatures: - args, data = enterNode(self, node): + walk, result = initializeWalker(self, expr): + + initializeWalker() is called to set the walker up and perform + any preliminary processing on the root node. The method returns + a flag indicating if the tree should be walked and a result. If + `walk` is True, then result is ignored. If `walk` is False, + then `result` is returned as the final result from the walker, + bypassing all other callbacks (including finalizeResult). + + args, data = enterNode(self, node): enterNode() is called when the walker first enters a node (from above), and is passed the node being entered. It is expected to @@ -83,10 +104,11 @@ class StreamBasedExpressionVisitor(object): this node. If not specified, the default action is to return the data object from enterNode(). - descend, child_result = beforeChild(self, node, child): + descend, child_result = beforeChild(self, node, child, child_idx): beforeChild() is called by a node for every child before - entering the child node. The node and child nodes are passed as + entering the child node. The node, child node, and child index + (position in the args list from enterNode()) are passed as arguments. beforeChild should return a tuple (descend, child_result). If descend is False, the child node will not be entered and the value returned to child_result will be passed to @@ -94,24 +116,25 @@ class StreamBasedExpressionVisitor(object): equivalent to (True, None). The default behavior if not specified is equivalent to (True, None). - data = acceptChildResult(self, node, data, child_result): + data = acceptChildResult(self, node, data, child_result, child_idx): acceptChildResult() is called for each child result being returned to a node. This callback is responsible for recording the result for later processing or passing up the tree. It is - passed the node, the result data structure (see enterNode()), - and the child result. The data structure (possibly modified or - replaced) must be returned. If acceptChildResult is not - specified, it does nothing if data is None, otherwise it calls - data.append(result). + passed the node, result data structure (see enterNode()), child + result, and the child index (position in args from enterNode()). + The data structure (possibly modified or replaced) must be + returned. If acceptChildResult is not specified, it does + nothing if data is None, otherwise it calls data.append(result). - afterChild(self, node, child): + afterChild(self, node, child, child_idx): afterChild() is called by a node for every child node immediately after processing the node is complete before control - moves to the next child or up to the parent node. The node and - child node are passed, and nothing is returned. If afterChild - is not specified, no action takes place. + moves to the next child or up to the parent node. The node, + child node, an child index (position in args from enterNode()) + are passed, and nothing is returned. If afterChild is not + specified, no action takes place. finalizeResult(self, result): @@ -132,7 +155,7 @@ class StreamBasedExpressionVisitor(object): # derived classes or specified as callback functions to the class # constructor: client_methods = ('enterNode','exitNode','beforeChild','afterChild', - 'acceptChildResult','finalizeResult') + 'acceptChildResult','initializeWalker','finalizeResult') def __init__(self, **kwds): # This is slightly tricky: We want derived classes to be able to # override the "None" defaults here, and for keyword arguments @@ -147,6 +170,26 @@ def __init__(self, **kwds): if kwds: raise RuntimeError("Unrecognized keyword arguments: %s" % (kwds,)) + # Handle deprecated APIs + _fcns = (('beforeChild',2), ('acceptChildResult',3), ('afterChild',2)) + for name, nargs in _fcns: + fcn = getattr(self, name) + if fcn is None: + continue + _args = getargspec(fcn) + _self_arg = 1 if inspect.ismethod(fcn) else 0 + if len(_args.args) == nargs + _self_arg and _args.varargs is None: + deprecation_warning( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the %s() " + "method. Please update your walker callbacks." % (name,)) + def wrap(fcn, nargs): + def wrapper(*args): + return fcn(*args[:nargs]) + return wrapper + setattr(self, name, wrap(fcn, nargs)) + + def walk_expression(self, expr): """Walk an expression, calling registered callbacks. """ @@ -159,12 +202,16 @@ def walk_expression(self, expr): # tuple/list of child nodes (arguments), # number of child nodes (arguments), # data object to aggregate results from child nodes, - # current child node ) + # current child node index ) # # The walker only needs a single pointer to the end of the list # (ptr). The beginning of the list is indicated by a None # parent pointer. # + if self.initializeWalker is not None: + walk, result = self.initializeWalker(expr) + if not walk: + return result if self.enterNode is not None: tmp = self.enterNode(expr) if tmp is None: @@ -180,115 +227,130 @@ def walk_expression(self, expr): args = () else: args = expr.args + if hasattr(args, '__enter__'): + args.__enter__() node = expr - child_idx = 0 - ptr = (None, node, args, len(args), data, child_idx) - - while 1: - if child_idx < ptr[3]: - # This node still has children to process - child = ptr[2][child_idx] - # Increment the child index pointer here for - # consistency. Note that this means that for the bulk - # of the time, 'child_idx' is actually the index of the - # *next* child to be processed, and will not match the - # value of ptr[5]. This provides a modest performance - # improvement, as we only have to recreate the ptr tuple - # just before we descend further into the tree (i.e., we - # avoid recreating the tuples for the special case where - # beforeChild indicates that we should not descend - # further). - child_idx += 1 - - # Notify this node that we are about to descend into a - # child. - if self.beforeChild is not None: - tmp = self.beforeChild(node, child) - if tmp is None: - descend = True - child_result = None - else: - descend, child_result = tmp - if not descend: - # We are aborting processing of this child node. - # Tell this node to accept the child result and - # we will move along - if self.acceptChildResult is not None: - data = self.acceptChildResult( - node, data, child_result) - elif data is not None: - data.append(child_result) - # And let the node know that we are done with a - # child node - if self.afterChild is not None: - self.afterChild(node, child) - # Jump to the top to continue processing the - # next child node - continue - - # Update the child argument counter in the stack. - # Because we are using tuples, we need to recreate the - # "ptr" object (linked list node) - ptr = ptr[:4] + (data, child_idx,) - - # We are now going to actually enter this node. The - # node will tell us the list of its child nodes that we - # need to process - if self.enterNode is not None: - tmp = self.enterNode(child) - if tmp is None: - args = data = None + # Note that because we increment child_idx just before fetching + # the child node, it must be initialized to -1, and ptr[3] must + # always be *one less than* the number of arguments + child_idx = -1 + ptr = (None, node, args, len(args)-1, data, child_idx) + + try: + while 1: + if child_idx < ptr[3]: + # Increment the child index pointer here for + # consistency. Note that this means that for the bulk + # of the time, 'child_idx' will not match the value of + # ptr[5]. This provides a modest performance + # improvement, as we only have to recreate the ptr tuple + # just before we descend further into the tree (i.e., we + # avoid recreating the tuples for the special case where + # beforeChild indicates that we should not descend + # further). + child_idx += 1 + # This node still has children to process + child = ptr[2][child_idx] + + # Notify this node that we are about to descend into a + # child. + if self.beforeChild is not None: + tmp = self.beforeChild(node, child, child_idx) + if tmp is None: + descend = True + child_result = None + else: + descend, child_result = tmp + if not descend: + # We are aborting processing of this child node. + # Tell this node to accept the child result and + # we will move along + if self.acceptChildResult is not None: + data = self.acceptChildResult( + node, data, child_result, child_idx) + elif data is not None: + data.append(child_result) + # And let the node know that we are done with a + # child node + if self.afterChild is not None: + self.afterChild(node, child, child_idx) + # Jump to the top to continue processing the + # next child node + continue + + # Update the child argument counter in the stack. + # Because we are using tuples, we need to recreate the + # "ptr" object (linked list node) + ptr = ptr[:4] + (data, child_idx,) + + # We are now going to actually enter this node. The + # node will tell us the list of its child nodes that we + # need to process + if self.enterNode is not None: + tmp = self.enterNode(child) + if tmp is None: + args = data = None + else: + args, data = tmp else: - args, data = tmp - else: - args = None - data = [] - if args is None: - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): - # Leaves (either non-pyomo types or - # non-Expressions) have no child arguments, so - # are just put on the stack - args = () + args = None + data = [] + if args is None: + if type(child) in nonpyomo_leaf_types \ + or not child.is_expression_type(): + # Leaves (either non-pyomo types or + # non-Expressions) have no child arguments, so + # are just put on the stack + args = () + else: + args = child.args + if hasattr(args, '__enter__'): + args.__enter__() + node = child + child_idx = -1 + ptr = (ptr, node, args, len(args)-1, data, child_idx) + + else: # child_idx == ptr[3]: + # We are done with this node. Call exitNode to compute + # any result + if hasattr(ptr[2], '__exit__'): + ptr[2].__exit__(None, None, None) + if self.exitNode is not None: + node_result = self.exitNode(node, data) else: - args = child.args - node = child - child_idx = 0 - ptr = (ptr, node, args, len(args), data, child_idx) - - else: - # We are done with this node. Call exitNode to compute - # any result - if self.exitNode is not None: - node_result = self.exitNode(node, data) - else: - node_result = data - - # Pop the node off the linked list + node_result = data + + # Pop the node off the linked list + ptr = ptr[0] + # If we have returned to the beginning, return the final + # answer + if ptr is None: + if self.finalizeResult is not None: + return self.finalizeResult(node_result) + else: + return node_result + # Not done yet, update node to point to the new active + # node + node, child = ptr[1], node + data = ptr[4] + child_idx = ptr[5] + + # We need to alert the node to accept the child's result: + if self.acceptChildResult is not None: + data = self.acceptChildResult( + node, data, node_result, child_idx) + elif data is not None: + data.append(node_result) + + # And let the node know that we are done with a child node + if self.afterChild is not None: + self.afterChild(node, child, child_idx) + + finally: + while ptr is not None: + if hasattr(ptr[2], '__exit__'): + ptr[2].__exit__(None, None, None) ptr = ptr[0] - # If we have returned to the beginning, return the final - # answer - if ptr is None: - if self.finalizeResult is not None: - return self.finalizeResult(node_result) - else: - return node_result - # Not done yet, update node to point to the new active - # node - node, child = ptr[1], node - data = ptr[4] - child_idx = ptr[5] - - # We need to alert the node to accept the child's result: - if self.acceptChildResult is not None: - data = self.acceptChildResult(node, data, node_result) - elif data is not None: - data.append(node_result) - - # And let the node know that we are done with a child node - if self.afterChild is not None: - self.afterChild(node, child) - class SimpleExpressionVisitor(object): @@ -864,7 +926,7 @@ def sizeof_expression(expr): """ def enter(node): return None, 1 - def accept(node, data, child_result): + def accept(node, data, child_result, child_idx): return data + child_result return StreamBasedExpressionVisitor( enterNode=enter, @@ -890,13 +952,15 @@ def visiting_potential_leaf(self, node): if node.__class__ in nonpyomo_leaf_types: return True, node - if node.is_variable_type(): - return True, value(node) + if node.is_expression_type(): + return False, None - if not node.is_expression_type(): + if node.is_numeric_type(): return True, value(node) + else: + return True, node + - return False, None class FixedExpressionError(Exception): @@ -926,22 +990,33 @@ def visiting_potential_leaf(self, node): if node.__class__ in nonpyomo_leaf_types: return True, node - if node.is_parameter_type(): - if node._component()._mutable: - raise FixedExpressionError() - return True, value(node) - + if node.is_expression_type(): + return False, None - if node.is_variable_type(): - if node.fixed: - raise FixedExpressionError() - else: + if node.is_numeric_type(): + # Get the object value. This will also cause templates to + # raise TemplateExpressionErrors + try: + val = value(node) + except TemplateExpressionError: + raise + except: + # Uninitialized Var/Param objects should be given the + # opportunity to map the error to a NonConstant / Fixed + # expression error + if not node.is_fixed(): + raise NonConstantExpressionError() + if not node.is_constant(): + raise FixedExpressionError() + raise + + if not node.is_fixed(): raise NonConstantExpressionError() + if not node.is_constant(): + raise FixedExpressionError() + return True, val - if not node.is_expression_type(): - return True, value(node) - - return False, None + return True, node def evaluate_expression(exp, exception=True, constant=False): @@ -973,29 +1048,18 @@ def evaluate_expression(exp, exception=True, constant=False): try: return visitor.dfs_postorder_stack(exp) - except NonConstantExpressionError: #pragma: no cover - if exception: - raise - return None - - except FixedExpressionError: #pragma: no cover - if exception: - raise - return None - - except TemplateExpressionError: #pragma: no cover - if exception: - raise - return None - - except ValueError: - if exception: - raise - return None - - except TypeError: - # This can be raised in Python3 when evaluating a operation - # returns a complex number (e.g., sqrt(-1)) + except ( TemplateExpressionError, ValueError, TypeError, + NonConstantExpressionError, FixedExpressionError ): + # Errors that we want to be able to suppress: + # + # TemplateExpressionError: raised when generating expression + # templates + # FixedExpressionError, NonConstantExpressionError: raised + # when processing expressions that are expected to be fixed + # (e.g., indices) + # ValueError: "standard" expression value errors + # TypeError: This can be raised in Python3 when evaluating a + # operation returns a complex number (e.g., sqrt(-1)) if exception: raise return None @@ -1164,13 +1228,16 @@ def visiting_potential_leaf(self, node): Return True if the node is not expanded. """ - if node.__class__ in nonpyomo_leaf_types or not node.is_potentially_variable(): + if node.__class__ in nonpyomo_leaf_types: return True, 0 - if not node.is_expression_type(): - return True, 0 if node.is_fixed() else 1 + if node.is_expression_type(): + return False, None - return False, None + if node.is_numeric_type(): + return True, 0 if node.is_fixed() else 1 + else: + return True, node def polynomial_degree(node): @@ -1209,13 +1276,16 @@ def visiting_potential_leaf(self, node): Return True if the node is not expanded. """ - if node.__class__ in nonpyomo_leaf_types or not node.is_potentially_variable(): + if node.__class__ in nonpyomo_leaf_types: return True, True - elif not node.is_expression_type(): + elif node.is_expression_type(): + return False, None + + elif node.is_numeric_type(): return True, node.is_fixed() - return False, None + return True, node def _expression_is_fixed(node): @@ -1288,15 +1358,18 @@ def visiting_potential_leaf(self, node): if node.__class__ in nonpyomo_leaf_types: return True, str(node) + if node.is_expression_type(): + return False, None + if node.is_variable_type(): if not node.fixed: return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=False) return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=self.compute_values) - if not node.is_expression_type(): + if hasattr(node, 'to_string'): return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=self.compute_values) - - return False, None + else: + return True, str(node) def expression_to_string(expr, verbose=None, labeler=None, smap=None, compute_values=False): diff --git a/pyomo/core/plugins/transform/discrete_vars.py b/pyomo/core/plugins/transform/discrete_vars.py index 80dc76dea9f..65e480a7674 100644 --- a/pyomo/core/plugins/transform/discrete_vars.py +++ b/pyomo/core/plugins/transform/discrete_vars.py @@ -44,11 +44,15 @@ def _apply_to(self, model, **kwds): v.setub(bounds[1]) model.del_component("_relaxed_integer_vars") return + # True by default, you can specify False if you want + descend = kwds.get('transform_deactivated_blocks', + options.get('transform_deactivated_blocks', True)) + active = None if descend else True # Relax the model relaxed_vars = {} _base_model_vars = model.component_data_objects( - Var, active=True, descend_into=True ) + Var, active=active, descend_into=True ) for var in _base_model_vars: if not var.is_integer(): continue diff --git a/pyomo/core/pyomoobject.py b/pyomo/core/pyomoobject.py new file mode 100644 index 00000000000..40854d7aa7a --- /dev/null +++ b/pyomo/core/pyomoobject.py @@ -0,0 +1,37 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + + +class PyomoObject(object): + __slots__ = () + + def is_component_type(self): + """Return True if this class is a Pyomo component""" + return False + + def is_numeric_type(self): + """Return True if this class is a Pyomo numeric object""" + return False + + def is_parameter_type(self): + """Return False unless this class is a parameter object""" + return False + + def is_variable_type(self): + """Return False unless this class is a variable object""" + return False + + def is_expression_type(self): + """Return True if this numeric value is an expression""" + return False + + def is_named_expression_type(self): + """Return True if this numeric value is a named expression""" + return False diff --git a/pyomo/core/tests/transform/test_transform.py b/pyomo/core/tests/transform/test_transform.py index c47bf568090..15eb6ff97ec 100644 --- a/pyomo/core/tests/transform/test_transform.py +++ b/pyomo/core/tests/transform/test_transform.py @@ -101,7 +101,7 @@ def test_relax_integrality1(self): self.model.e = Var(within=Boolean) self.model.f = Var(domain=Boolean) instance=self.model.create_instance() - xfrm = TransformationFactory('core.relax_integrality') + xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance) self.assertEqual(type(rinst.a.domain), RealSet) self.assertEqual(type(rinst.b.domain), RealSet) @@ -126,7 +126,7 @@ def test_relax_integrality2(self): self.model.e = Var([1,2,3], within=Boolean, dense=True) self.model.f = Var([1,2,3], domain=Boolean, dense=True) instance=self.model.create_instance() - xfrm = TransformationFactory('core.relax_integrality') + xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance) self.assertEqual(type(rinst.a[1].domain), RealSet) self.assertEqual(type(rinst.b[1].domain), RealSet) @@ -152,7 +152,7 @@ def test_relax_integrality_cloned(self): self.model.f = Var(domain=Boolean) instance=self.model.create_instance() instance_cloned = instance.clone() - xfrm = TransformationFactory('core.relax_integrality') + xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance_cloned) self.assertEqual(type(rinst.a.domain), RealSet) self.assertEqual(type(rinst.b.domain), RealSet) @@ -172,7 +172,7 @@ def test_relax_integrality(self): self.model.d = Var(within=Integers, bounds=(-2,3)) instance=self.model.create_instance() instance_cloned = instance.clone() - xfrm = TransformationFactory('core.relax_integrality') + xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance_cloned) self.assertEqual(type(rinst.d.domain), RealSet) self.assertEqual(rinst.d.bounds, (-2,3)) @@ -190,6 +190,40 @@ def test_relax_integrality_simple_cloned(self): self.assertIs(instance.x.domain, Integers) self.assertIs(instance_cloned.x.domain, Integers) + def test_relax_integrality_on_deactivated_blocks(self): + self.model.x = Var(domain=NonNegativeIntegers) + self.model.b = Block() + self.model.b.x = Var(domain=Binary) + self.model.b.y = Var(domain=Integers, bounds=(-3,2)) + instance = self.model.create_instance() + instance.b.deactivate() + relax_integrality = TransformationFactory('core.relax_integer_vars') + relax_integrality.apply_to(instance) + self.assertIs(instance.b.x.domain, Reals) + self.assertEqual(instance.b.x.lb, 0) + self.assertEqual(instance.b.x.ub, 1) + self.assertIs(instance.b.y.domain, Reals) + self.assertEqual(instance.b.y.lb, -3) + self.assertEqual(instance.b.y.ub, 2) + self.assertIs(instance.x.domain, Reals) + self.assertEqual(instance.x.lb, 0) + self.assertIsNone(instance.x.ub) + + def test_relax_integrality_only_active_blocks(self): + self.model.x = Var(domain=NonNegativeIntegers) + self.model.b = Block() + self.model.b.x = Var(domain=Binary) + self.model.b.y = Var(domain=Integers, bounds=(-3,2)) + instance = self.model.create_instance() + instance.b.deactivate() + relax_integrality = TransformationFactory('core.relax_integer_vars') + relax_integrality.apply_to(instance, transform_deactivated_blocks=False) + self.assertIs(instance.b.x.domain, Binary) + self.assertIs(instance.b.y.domain, Integers) + self.assertIs(instance.x.domain, Reals) + self.assertEqual(instance.x.lb, 0) + self.assertIsNone(instance.x.ub) + def test_nonnegativity_transformation_1(self): self.model.a = Var() self.model.b = Var(within=NonNegativeIntegers) diff --git a/pyomo/core/tests/unit/test_block.py b/pyomo/core/tests/unit/test_block.py index bc31fa2409b..a5223e59d82 100644 --- a/pyomo/core/tests/unit/test_block.py +++ b/pyomo/core/tests/unit/test_block.py @@ -61,6 +61,7 @@ def generate_model(self): model = ConcreteModel() model.q = Set(initialize=[1,2]) model.Q = Set(model.q,initialize=[1,2]) + model.qq = NonNegativeIntegers*model.q model.x = Var(initialize=-1) model.X = Var(model.q,initialize=-1) model.e = Expression(initialize=-1) @@ -152,8 +153,8 @@ def B_rule(block,i): model.component_lists = {} model.component_data_lists = {} - model.component_lists[Set] = [model.q, model.Q] - model.component_data_lists[Set] = [model.q, model.Q[1], model.Q[2]] + model.component_lists[Set] = [model.q, model.Q, model.qq] + model.component_data_lists[Set] = [model.q, model.Q[1], model.Q[2], model.qq] model.component_lists[Var] = [model.x, model.X] model.component_data_lists[Var] = [model.x, model.X[1], model.X[2]] model.component_lists[Expression] = [model.e, model.E] @@ -186,7 +187,8 @@ def generator_test(self, ctype): generator = list(block.component_objects(ctype, active=True, descend_into=False)) except: if issubclass(ctype, Component): - self.fail("component_objects(active=True) failed with ctype %s" % ctype) + print("component_objects(active=True) failed with ctype %s" % ctype) + raise else: if not issubclass(ctype, Component): self.fail("component_objects(active=True) should have failed with ctype %s" % ctype) @@ -205,7 +207,8 @@ def generator_test(self, ctype): generator = list(block.component_objects(ctype, descend_into=False)) except: if issubclass(ctype, Component): - self.fail("components failed with ctype %s" % ctype) + print("components failed with ctype %s" % ctype) + raise else: if not issubclass(ctype, Component): self.fail("components should have failed with ctype %s" % ctype) @@ -224,7 +227,8 @@ def generator_test(self, ctype): generator = list(block.component_data_iterindex(ctype, active=True, sort=False, descend_into=False)) except: if issubclass(ctype, Component): - self.fail("component_data_objects(active=True, sort_by_keys=False) failed with ctype %s" % ctype) + print("component_data_objects(active=True, sort_by_keys=False) failed with ctype %s" % ctype) + raise else: if not issubclass(ctype, Component): self.fail("component_data_objects(active=True, sort_by_keys=False) should have failed with ctype %s" % ctype) @@ -243,7 +247,8 @@ def generator_test(self, ctype): generator = list(block.component_data_iterindex(ctype, active=True, sort=True, descend_into=False)) except: if issubclass(ctype, Component): - self.fail("component_data_objects(active=True, sort=True) failed with ctype %s" % ctype) + print("component_data_objects(active=True, sort=True) failed with ctype %s" % ctype) + raise else: if not issubclass(ctype, Component): self.fail("component_data_objects(active=True, sort=True) should have failed with ctype %s" % ctype) @@ -262,7 +267,8 @@ def generator_test(self, ctype): generator = list(block.component_data_iterindex(ctype, sort=False, descend_into=False)) except: if issubclass(ctype, Component): - self.fail("components_data(sort_by_keys=True) failed with ctype %s" % ctype) + print("components_data(sort_by_keys=True) failed with ctype %s" % ctype) + raise else: if not issubclass(ctype, Component): self.fail("components_data(sort_by_keys=True) should have failed with ctype %s" % ctype) @@ -281,7 +287,8 @@ def generator_test(self, ctype): generator = list(block.component_data_iterindex(ctype, sort=True, descend_into=False)) except: if issubclass(ctype, Component): - self.fail("components_data(sort_by_keys=False) failed with ctype %s" % ctype) + print("components_data(sort_by_keys=False) failed with ctype %s" % ctype) + raise else: if not issubclass(ctype, Component): self.fail("components_data(sort_by_keys=False) should have failed with ctype %s" % ctype) @@ -2427,7 +2434,80 @@ def pprint(self, ostream=None, verbose=False, prefix=""): b.pprint(ostream=stream) self.assertEqual(correct_s, stream.getvalue()) + def test_block_rules(self): + m = ConcreteModel() + m.I = Set() + _rule_ = [] + def _block_rule(b,i): + _rule_.append(i) + b.x = Var(range(i)) + m.b = Block(m.I, rule=_block_rule) + # I is empty: no rules called + self.assertEqual(_rule_, []) + m.I.update([1,3,5]) + # Fetching a new block will call the rule + _b = m.b[3] + self.assertEqual(len(m.b), 1) + self.assertEqual(_rule_, [3]) + self.assertIn('x', _b.component_map()) + self.assertIn('x', m.b[3].component_map()) + + # If you transfer the attributes directly, the rule will still + # be called. + _tmp = Block() + _tmp.y = Var(range(3)) + m.b[5].transfer_attributes_from(_tmp) + self.assertEqual(len(m.b), 2) + self.assertEqual(_rule_, [3,5]) + self.assertIn('x', m.b[5].component_map()) + self.assertIn('y', m.b[5].component_map()) + + # We do not support block assignment (and the rule will NOT be + # called) + _tmp = Block() + _tmp.y = Var(range(3)) + with self.assertRaisesRegex( + RuntimeError, "Block components do not support " + "assignment or set_value"): + m.b[1] = _tmp + self.assertEqual(len(m.b), 2) + self.assertEqual(_rule_, [3,5]) + + # Blocks with non-finite indexing sets cannot be automatically + # populated (even if they have a rule!) + def _bb_rule(b, i, j): + _rule_.append((i,j)) + b.x = Var(RangeSet(i)) + b.y = Var(RangeSet(j)) + m.bb = Block(m.I, NonNegativeIntegers, rule=_bb_rule) + self.assertEqual(_rule_, [3,5]) + _b = m.bb[3,5] + self.assertEqual(_rule_, [3,5,(3,5)]) + self.assertEqual(len(m.bb), 1) + self.assertEqual(len(_b.x), 3) + self.assertEqual(len(_b.y), 5) + + def test_derived_block_construction(self): + # This tests a case where a derived block doesn't follow the + # assumption that unconstructed scalar blocks initialize + # `_data[None] = self` (therefore doesn't fully support abstract + # models). At one point, that was causing the block rule to + # fire twice during construction. + class ConcreteBlock(Block): + pass + + class ScalarConcreteBlock(_BlockData, ConcreteBlock): + def __init__(self, *args, **kwds): + _BlockData.__init__(self, component=self) + ConcreteBlock.__init__(self, *args, **kwds) + + _buf = [] + def _rule(b): + _buf.append(1) + m = ConcreteModel() + m.b = ScalarConcreteBlock(rule=_rule) + self.assertEqual(_buf, [1]) if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_derivs.py b/pyomo/core/tests/unit/test_derivs.py index 47c4ba998a4..812e50555eb 100644 --- a/pyomo/core/tests/unit/test_derivs.py +++ b/pyomo/core/tests/unit/test_derivs.py @@ -1,6 +1,7 @@ import pyutilib.th as unittest import pyomo.environ as pe from pyomo.core.expr.calculus.diff_with_pyomo import reverse_ad, reverse_sd +from pyomo.common.getGSL import find_GSL tol = 6 @@ -190,3 +191,32 @@ def e2(m, i): derivs = reverse_ad(m.o.expr) symbolic = reverse_sd(m.o.expr) self.assertAlmostEqual(derivs[m.x], pe.value(symbolic[m.x]), tol) + + def test_multiple_named_expressions(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.y = pe.Var() + m.x.value = 1 + m.y.value = 1 + m.E = pe.Expression(expr=m.x*m.y) + e = m.E - m.E + derivs = reverse_ad(e) + self.assertAlmostEqual(derivs[m.x], 0) + self.assertAlmostEqual(derivs[m.y], 0) + symbolic = reverse_sd(e) + self.assertAlmostEqual(pe.value(symbolic[m.x]), 0) + self.assertAlmostEqual(pe.value(symbolic[m.y]), 0) + + def test_external(self): + DLL = find_GSL() + if not DLL: + self.skipTest('Could not find the amplgsl.dll library') + + m = pe.ConcreteModel() + m.hypot = pe.ExternalFunction(library=DLL, function='gsl_hypot') + m.x = pe.Var(initialize=0.5) + m.y = pe.Var(initialize=1.5) + e = 2 * m.hypot(m.x, m.x*m.y) + derivs = reverse_ad(e) + self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) + self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) diff --git a/pyomo/core/tests/unit/test_indexed_slice.py b/pyomo/core/tests/unit/test_indexed_slice.py index a7c468f6e88..225092a7e44 100644 --- a/pyomo/core/tests/unit/test_indexed_slice.py +++ b/pyomo/core/tests/unit/test_indexed_slice.py @@ -18,7 +18,7 @@ from pyomo.environ import * from pyomo.core.base.block import _BlockData -from pyomo.core.base.indexed_component import _IndexedComponent_slice +from pyomo.core.base.indexed_component_slice import IndexedComponent_slice def _x_init(m, k): return k @@ -60,25 +60,25 @@ def test_simple_getitem(self): def test_simple_getslice(self): _slicer = self.m.b[:,4] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, ['b[1,4]', 'b[2,4]', 'b[3,4]'] ) _slicer = self.m.b[1,4].c[:,4] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, ['b[1,4].c[1,4]', 'b[1,4].c[2,4]', 'b[1,4].c[3,4]'] ) def test_wildcard_slice(self): _slicer = self.m.b[:] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [] ) _slicer = self.m.b[...] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'b[1,4]', 'b[1,5]', 'b[1,6]', @@ -87,14 +87,14 @@ def test_wildcard_slice(self): ] ) _slicer = self.m.b[1,...] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'b[1,4]', 'b[1,5]', 'b[1,6]', ] ) _slicer = self.m.b[...,5] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'b[1,5]', @@ -103,14 +103,14 @@ def test_wildcard_slice(self): ] ) _slicer = self.m.bb[2,...,8] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'bb[2,4,8]', 'bb[2,5,8]', 'bb[2,6,8]', ] ) _slicer = self.m.bb[:,...,8] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]', @@ -119,7 +119,7 @@ def test_wildcard_slice(self): ] ) _slicer = self.m.bb[:,:,...,8] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]', @@ -128,7 +128,7 @@ def test_wildcard_slice(self): ] ) _slicer = self.m.bb[:,...,:,8] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]', @@ -137,19 +137,19 @@ def test_wildcard_slice(self): ] ) _slicer = self.m.b[1,4,...] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [ 'b[1,4]', ] ) _slicer = self.m.b[1,2,3,...] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [] ) _slicer = self.m.b[1,:,2] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [] ) @@ -160,20 +160,20 @@ def test_wildcard_slice(self): def test_nonterminal_slice(self): _slicer = self.m.b[:,4].x - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, ['b[1,4].x', 'b[2,4].x', 'b[3,4].x'] ) _slicer = self.m.b[:,4].x[7] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, ['b[1,4].x[7]', 'b[2,4].x[7]', 'b[3,4].x[7]'] ) def test_nested_slices(self): _slicer = self.m.b[1,:].c[:,4].x - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x', @@ -182,7 +182,7 @@ def test_nested_slices(self): ] ) _slicer = self.m.b[1,:].c[:,4].x[8] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, @@ -193,7 +193,7 @@ def test_nested_slices(self): def test_component_function_slices(self): _slicer = self.m.component('b')[1,:].component('c')[:,4].component('x') - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x', @@ -233,7 +233,7 @@ def test_setattr_slices(self): _slice = self.m.b[...].c[...].x[:] with self.assertRaisesRegexp( AttributeError, ".*VarData' object has no attribute 'bogus'"): - _slice.duplicate().bogus = 0 + _slice.bogus = 0 # but disabling the exception flag will run without error _slice.attribute_errors_generate_exceptions = False # This doesn't do anything ... simply not raising an exception @@ -250,15 +250,15 @@ def test_delattr_slices(self): _slice = self.m.b[1,:].c[:,4].x.foo _slice._call_stack[-1] = ( - _IndexedComponent_slice.del_attribute, + IndexedComponent_slice.del_attribute, _slice._call_stack[-1][1] ) # call the iterator to delete the attributes - list(_slice.duplicate()) + list(_slice) self.assertEqual(sum(list(1 if hasattr(x,'foo') else 0 for x in self.m.b[:,:].c[:,:].x)), 0) # calling the iterator again will raise an exception with self.assertRaisesRegexp(AttributeError, 'foo'): - list(_slice.duplicate()) + list(_slice) # but disabling the exception flag will run without error _slice.attribute_errors_generate_exceptions = False # This doesn't do anything ... simply not raising an exception @@ -284,7 +284,7 @@ def test_setitem_slices(self): with self.assertRaisesRegexp( KeyError, "Index 'bogus' is not valid for indexed " "component 'b\[1,4\]\.c\[1,4\]\.x'"): - _slice.duplicate()['bogus'] = 0 + _slice['bogus'] = 0 # but disabling the exception flag will run without error _slice.key_errors_generate_exceptions = False # This doesn't do anything ... simply not raising an exception @@ -337,7 +337,7 @@ def test_delitem_slices(self): with self.assertRaisesRegexp( KeyError, "Index 'bogus' is not valid for indexed " "component 'b\[2,4\]\.c\[1,4\]\.x'"): - del _slice.duplicate()['bogus'] + del _slice['bogus'] # but disabling the exception flag will run without error _slice.key_errors_generate_exceptions = False # This doesn't do anything ... simply not raising an exception @@ -366,45 +366,45 @@ def test_delitem_component(self): def test_empty_slices(self): _slicer = self.m.b[1,:].c[:,1].x - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [] ) _slicer = self.m.b[1,:].c[:,4].x[1] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.key_errors_generate_exceptions = False ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [] ) _slicer = self.m.b[1,:].c[:,4].y - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.attribute_errors_generate_exceptions = False ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [] ) _slicer = self.m.b[1,:].c[:,4].component('y', False) - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.call_errors_generate_exceptions = False ans = [ str(x) for x in _slicer ] self.assertEqual( ans, [] ) _slicer = self.m.b[1,:].c[:,4].x[1] - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.key_errors_generate_exceptions = True self.assertRaises( KeyError, _slicer.next ) _slicer = self.m.b[1,:].c[:,4].y - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.attribute_errors_generate_exceptions = True self.assertRaises( AttributeError, _slicer.next ) _slicer = self.m.b[1,:].c[:,4].component('y', False) - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.call_errors_generate_exceptions = True self.assertRaises( TypeError,_slicer.next ) _slicer = self.m.b[1,:].c[:,4].component() - self.assertIsInstance(_slicer, _IndexedComponent_slice) + self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.call_errors_generate_exceptions = True self.assertRaises( TypeError, _slicer.next ) @@ -514,5 +514,39 @@ def test_clone_on_model(self): self.assertIs(x.model(), m) self.assertIs(y.model(), n) + def test_hash_eqality(self): + m = self.m + a = m.b[1,:].c[:,...,4].x + b = m.b[1,:].c[1,...,:].x + self.assertNotEqual(a, b) + self.assertNotEqual(a, m) + + self.assertEqual(a, a) + self.assertEqual(a, m.b[1,:].c[:,...,4].x) + + _set = set([a,b]) + self.assertEqual(len(_set), 2) + _set.add(m.b[1,:].c[:,...,4].x) + self.assertEqual(len(_set), 2) + _set.add(m.b[1,:].c[:,4].x) + self.assertEqual(len(_set), 3) + + def test_duplicate(self): + m = self.m + a = m.b[1,:].c[:,...,4] + + b = a.x + self.assertIs(a._call_stack, b._call_stack) + self.assertEqual(a._len+1, b._len) + + c = a.y + self.assertEqual(a._len+1, c._len) + self.assertIsNot(a._call_stack, c._call_stack) + + b1 = b.duplicate() + self.assertIsNot(a._call_stack, b1._call_stack) + self.assertEqual(a._len+1, b1._len) + self.assertEqual(hash(b), hash(b1)) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_numeric_expr.py b/pyomo/core/tests/unit/test_numeric_expr.py index a36b95bfe14..7026515bf15 100644 --- a/pyomo/core/tests/unit/test_numeric_expr.py +++ b/pyomo/core/tests/unit/test_numeric_expr.py @@ -16,6 +16,8 @@ import math import os import re +from collections import defaultdict + import six import sys from os.path import abspath, dirname @@ -55,7 +57,7 @@ from pyomo.core.base.var import SimpleVar from pyomo.core.base.param import _ParamData, SimpleParam from pyomo.core.base.label import * -from pyomo.core.base.template_expr import IndexTemplate +from pyomo.core.expr.template_expr import IndexTemplate from pyomo.core.expr.expr_errors import TemplateExpressionError from pyomo.repn import generate_standard_repn @@ -2089,7 +2091,7 @@ def test_getitem(self): t = IndexTemplate(m.I) e = m.x[t+m.P[t+1]] + 3 - self.assertEqual("sum(x(sum({I}, P(sum({I}, 1)))), 3)", str(e)) + self.assertEqual("sum(getitem(x, sum({I}, getitem(P, sum({I}, 1)))), 3)", str(e)) def test_small_expression(self): # @@ -2326,7 +2328,7 @@ def test_getitem(self): t = IndexTemplate(m.I) e = m.x[t+m.P[t+1]] + 3 - self.assertEqual("x({I} + P({I} + 1)) + 3", str(e)) + self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e)) def test_associativity_rules(self): m = ConcreteModel() @@ -3429,10 +3431,19 @@ def test_Expr_if(self): expr = Expr_if(m.e,1,0) self.assertEqual(expr.polynomial_degree(), 0) # + # A nonconstant expression has degree if both arguments have the + # same degree, as long as the IF is fixed (even if it is not + # defined) + # + expr = Expr_if(m.e,m.a,0) + self.assertEqual(expr.polynomial_degree(), 0) + expr = Expr_if(m.e,5*m.b,1+m.b) + self.assertEqual(expr.polynomial_degree(), 1) + # # A nonconstant expression has degree None because # m.e is an uninitialized parameter # - expr = Expr_if(m.e,m.a,0) + expr = Expr_if(m.e,m.b,0) self.assertEqual(expr.polynomial_degree(), None) @@ -4002,7 +4013,7 @@ def test_getitem(self): e = m.x[t+m.P[t+1]] + 3 e_ = e.clone() - self.assertEqual("x({I} + P({I} + 1)) + 3", str(e_)) + self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e_)) # total = counter.count - start self.assertEqual(total, 1) @@ -5012,7 +5023,7 @@ def test_getitem(self): e = m.x[t+m.P[t+1]] + 3 s = pickle.dumps(e) e_ = pickle.loads(s) - self.assertEqual("x({I} + P({I} + 1)) + 3", str(e)) + self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e)) def test_abs(self): M = ConcreteModel() @@ -5212,5 +5223,43 @@ def test_LinearExpression_expression(self): self.assertTrue(len(repn.linear_coefs) == N) self.assertTrue(len(repn.linear_vars) == N) + def test_LinearExpression_polynomial_degree(self): + m = ConcreteModel() + m.S = RangeSet(2) + m.var_1 = Var(initialize=0) + m.var_2 = Var(initialize=0) + m.var_3 = Var(m.S, initialize=0) + + def con_rule(model): + return model.var_1 - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) <= 0 + + m.c1 = Constraint(rule=con_rule) + + m.var_1.fix(1) + m.var_2.fix(1) + m.var_3.fix(1) + + self.assertTrue(is_fixed(m.c1.body)) + self.assertEqual(polynomial_degree(m.c1.body), 0) + + def test_LinearExpression_is_fixed(self): + m = ConcreteModel() + m.S = RangeSet(2) + m.var_1 = Var(initialize=0) + m.var_2 = Var(initialize=0) + m.var_3 = Var(m.S, initialize=0) + + def con_rule(model): + return model.var_1 - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) <= 0 + + m.c1 = Constraint(rule=con_rule) + + m.var_1.fix(1) + m.var_2.fix(1) + + self.assertFalse(is_fixed(m.c1.body)) + self.assertEqual(polynomial_degree(m.c1.body), 1) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_reference.py b/pyomo/core/tests/unit/test_reference.py index 3f490106f67..883b2442f5a 100644 --- a/pyomo/core/tests/unit/test_reference.py +++ b/pyomo/core/tests/unit/test_reference.py @@ -706,16 +706,52 @@ def b(b, i): self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 3) - # While (2,1) appears to be a valid member of the slice, because 2 - # was not in the Set when the Block rule fired, there is no - # m.b[2] block data. Attempting to add m.xx[2,1] will correctly - # instantiate the block and then promptly fail because we don't - # automatically fire rules after construction. - with self.assertRaisesRegexp( - AttributeError, "'_BlockData' object has no attribute 'x'"): - m.xx.add((2,1)) + # While (2,2) appears to be a valid member of the slice, because + # 2 was not in the Set when the Block rule fired, there is no + # m.b[2] block data. Accessing m.xx[2,1] will construct the + # b[2] block data, fire the rule, and then add the new value to + # the Var x. + self.assertEqual(len(m.xx), 3) + m.xx[2,2] = 10 + self.assertEqual(len(m.b), 2) + self.assertEqual(len(list(m.b[2].component_objects())), 1) + self.assertEqual(len(m.xx), 4) + self.assertIs(m.xx[2,2], m.b[2].x[2]) + self.assertEqual(value(m.b[2].x[2]), 10) + + def test_insert_var(self): + m = ConcreteModel() + m.T = Set(initialize=[1,5]) + m.x = Var(m.T, initialize=lambda m,i: i) + @m.Block(m.T) + def b(b, i): + b.y = Var(initialize=lambda b: 10*b.index()) + ref_x = Reference(m.x[:]) + ref_y = Reference(m.b[:].y) + + self.assertEqual(len(m.x), 2) + self.assertEqual(len(ref_x), 2) self.assertEqual(len(m.b), 2) - self.assertEqual(len(list(m.b[2].component_objects())), 0) + self.assertEqual(len(ref_y), 2) + self.assertEqual(value(ref_x[1]), 1) + self.assertEqual(value(ref_x[5]), 5) + self.assertEqual(value(ref_y[1]), 10) + self.assertEqual(value(ref_y[5]), 50) + + m.T.add(2) + _x = ref_x[2] + self.assertEqual(len(m.x), 3) + self.assertIs(_x, m.x[2]) + self.assertEqual(value(_x), 2) + self.assertEqual(value(m.x[2]), 2) + self.assertEqual(value(ref_x[2]), 2) + + _y = ref_y[2] + self.assertEqual(len(m.b), 3) + self.assertIs(_y, m.b[2].y) + self.assertEqual(value(_y), 20) + self.assertEqual(value(ref_y[2]), 20) + self.assertEqual(value(m.b[2].y), 20) if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_template_expr.py b/pyomo/core/tests/unit/test_template_expr.py index b31939bf95e..9d6d818ec5e 100644 --- a/pyomo/core/tests/unit/test_template_expr.py +++ b/pyomo/core/tests/unit/test_template_expr.py @@ -2,8 +2,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -11,19 +11,24 @@ import pyutilib.th as unittest -from pyomo.environ import ConcreteModel, RangeSet, Param, Var, Set, value +from pyomo.environ import ( + ConcreteModel, AbstractModel, RangeSet, Param, Var, Set, value, +) import pyomo.core.expr.current as EXPR -from pyomo.core.base.template_expr import ( - IndexTemplate, +from pyomo.core.expr.template_expr import ( + IndexTemplate, + TemplateExpressionError, _GetItemIndexer, - substitute_template_expression, + resolve_template, + templatize_constraint, + substitute_template_expression, substitute_getitem_with_param, substitute_template_with_value, ) import six -class ExpressionObjectTester(object): +class TestTemplateExpressions(unittest.TestCase): def setUp(self): self.m = m = ConcreteModel() m.I = RangeSet(1,9) @@ -33,150 +38,211 @@ def setUp(self): m.p = Param(m.I, m.J, initialize=lambda m,i,j: 100*i+j) m.s = Set(m.I, initialize=lambda m,i:range(i)) + def test_nonTemplates(self): + m = self.m + self.assertIs(resolve_template(m.x[1]), m.x[1]) + e = m.x[1] + m.x[2] + self.assertIs(resolve_template(e), e) + + def test_IndexTemplate(self): + m = self.m + i = IndexTemplate(m.I) + with self.assertRaisesRegex( + TemplateExpressionError, + "Evaluating uninitialized IndexTemplate"): + value(i) + + self.assertEqual(str(i), "{I}") + + i.set_value(5) + self.assertEqual(value(i), 5) + self.assertIs(resolve_template(i), 5) + def test_template_scalar(self): m = self.m t = IndexTemplate(m.I) e = m.x[t] self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.x) - self.assertEqual(tuple(e.args), (t,)) + self.assertEqual(e.args, (m.x, t)) self.assertFalse(e.is_constant()) self.assertFalse(e.is_fixed()) self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(str(e), "x[{I}]") t.set_value(5) - self.assertEqual(e(), 6) - self.assertIs(e.resolve_template(), m.x[5]) - t.set_value(None) + v = e() + self.assertIn(type(v), (int, float)) + self.assertEqual(v, 6) + self.assertIs(resolve_template(e), m.x[5]) + t.set_value() e = m.p[t,10] self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.p) - self.assertEqual(tuple(e.args), (t,10)) + self.assertEqual(e.args, (m.p,t,10)) self.assertFalse(e.is_constant()) self.assertTrue(e.is_fixed()) self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(str(e), "p[{I},10]") t.set_value(5) - self.assertEqual(e(), 510) - self.assertIs(e.resolve_template(), m.p[5,10]) - t.set_value(None) + v = e() + self.assertIn(type(v), (int, float)) + self.assertEqual(v, 510) + self.assertIs(resolve_template(e), m.p[5,10]) + t.set_value() e = m.p[5,t] self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.p) - self.assertEqual(tuple(e.args), (5,t)) + self.assertEqual(e.args, (m.p,5,t)) self.assertFalse(e.is_constant()) self.assertTrue(e.is_fixed()) self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(str(e), "p[5,{I}]") t.set_value(10) - self.assertEqual(e(), 510) - self.assertIs(e.resolve_template(), m.p[5,10]) - t.set_value(None) + v = e() + self.assertIn(type(v), (int, float)) + self.assertEqual(v, 510) + self.assertIs(resolve_template(e), m.p[5,10]) + t.set_value() - # TODO: Fixing this test requires fixing Set - def _test_template_scalar_with_set(self): + def test_template_scalar_with_set(self): m = self.m t = IndexTemplate(m.I) e = m.s[t] self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.s) - self.assertEqual(tuple(e.args), (t,)) + self.assertEqual(e.args, (m.s,t)) self.assertFalse(e.is_constant()) self.assertTrue(e.is_fixed()) self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(str(e), "s[{I}]") t.set_value(5) - self.assertRaises(TypeError, e) - self.assertIs(e.resolve_template(), m.s[5]) - t.set_value(None) + v = e() + self.assertIs(v, m.s[5]) + self.assertIs(resolve_template(e), m.s[5]) + t.set_value() def test_template_operation(self): m = self.m t = IndexTemplate(m.I) e = m.x[t+m.P[5]] self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(e.arg(0).arg(1), m.P[5]) - + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(e.arg(1).arg(1), m.P[5]) + self.assertEqual(str(e), "x[{I} + P[5]]") def test_nested_template_operation(self): m = self.m t = IndexTemplate(m.I) e = m.x[t+m.P[t+1]] self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) - + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) + self.assertEqual(str(e), "x[{I} + P[{I} + 1]]") + + def test_block_templates(self): + m = ConcreteModel() + m.T = RangeSet(3) + @m.Block(m.T) + def b(b, i): + b.x = Var(initialize=i) + + @b.Block(m.T) + def bb(bb, j): + bb.I =RangeSet(i*j) + bb.y = Var(bb.I, initialize=lambda m,i:i) + t = IndexTemplate(m.T) + e = m.b[t].x + self.assertIs(type(e), EXPR.GetAttrExpression) + self.assertEqual(e.nargs(), 2) + self.assertIs(type(e.arg(0)), EXPR.GetItemExpression) + self.assertIs(e.arg(0).arg(0), m.b) + self.assertEqual(e.arg(0).nargs(), 2) + self.assertIs(e.arg(0).arg(1), t) + self.assertEqual(str(e), "b[{T}].x") + t.set_value(2) + v = e() + self.assertIn(type(v), (int, float)) + self.assertEqual(v, 2) + self.assertIs(resolve_template(e), m.b[2].x) + t.set_value() + + e = m.b[t].bb[t].y[1] + self.assertIs(type(e), EXPR.GetItemExpression) + self.assertEqual(e.nargs(), 2) + self.assertEqual(str(e), "b[{T}].bb[{T}].y[1]") + t.set_value(2) + v = e() + self.assertIn(type(v), (int, float)) + self.assertEqual(v, 1) + self.assertIs(resolve_template(e), m.b[2].bb[2].y[1]) def test_template_name(self): m = self.m t = IndexTemplate(m.I) E = m.x[t+m.P[1+t]] + m.P[1] - self.assertEqual( str(E), "x({I} + P(1 + {I})) + P[1]") + self.assertEqual( str(E), "x[{I} + P[1 + {I}]] + P[1]") E = m.x[t+m.P[1+t]**2.]**2. + m.P[1] - self.assertEqual( str(E), "x({I} + P(1 + {I})**2.0)**2.0 + P[1]") - + self.assertEqual( str(E), "x[{I} + P[1 + {I}]**2.0]**2.0 + P[1]") def test_template_in_expression(self): m = self.m t = IndexTemplate(m.I) E = m.x[t+m.P[t+1]] + m.P[1] - self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) + self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) E = m.P[1] + m.x[t+m.P[t+1]] - self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) + self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(1) self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) E = m.x[t+m.P[t+1]] + 1 - self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) + self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) E = 1 + m.x[t+m.P[t+1]] - self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) + self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(E.nargs()-1) self.assertIs(type(e), EXPR.GetItemExpression) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) - + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) def test_clone(self): m = self.m @@ -184,21 +250,21 @@ def test_clone(self): E_base = m.x[t+m.P[t+1]] + m.P[1] E = E_base.clone() - self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) + self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) self.assertIs(type(e), EXPR.GetItemExpression) self.assertIsNot(e, E_base.arg(0)) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(0).arg(1)), - type(E_base.arg(0).arg(0).arg(1))) - self.assertIsNot(e.arg(0).arg(1), - E_base.arg(0).arg(0).arg(1)) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), + type(E_base.arg(0).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), + E_base.arg(0).arg(1).arg(1)) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) E_base = m.P[1] + m.x[t+m.P[t+1]] E = E_base.clone() @@ -206,65 +272,282 @@ def test_clone(self): e = E.arg(1) self.assertIs(type(e), EXPR.GetItemExpression) self.assertIsNot(e, E_base.arg(0)) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(0).arg(1)), - type(E_base.arg(1).arg(0).arg(1))) - self.assertIsNot(e.arg(0).arg(1), - E_base.arg(1).arg(0).arg(1)) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), + type(E_base.arg(1).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), + E_base.arg(1).arg(1).arg(1)) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) E_base = m.x[t+m.P[t+1]] + 1 E = E_base.clone() - self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) + self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) self.assertIs(type(e), EXPR.GetItemExpression) self.assertIsNot(e, E_base.arg(0)) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(0).arg(1)), - type(E_base.arg(0).arg(0).arg(1))) - self.assertIsNot(e.arg(0).arg(1), - E_base.arg(0).arg(0).arg(1)) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), + type(E_base.arg(0).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), + E_base.arg(0).arg(1).arg(1)) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) E_base = 1 + m.x[t+m.P[t+1]] E = E_base.clone() - self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) + self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(-1) self.assertIs(type(e), EXPR.GetItemExpression) self.assertIsNot(e, E_base.arg(0)) - self.assertIs(e._base, m.x) - self.assertEqual(e.nargs(), 1) - self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(0), t) - self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(0).arg(1)), - type(E_base.arg(-1).arg(0).arg(1))) - self.assertIsNot(e.arg(0).arg(1), - E_base.arg(-1).arg(0).arg(1)) - self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase)) - self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t) - - -class TestTemplate_expressionObjects\ - ( ExpressionObjectTester, unittest.TestCase ): - - def setUp(self): - # This class tests the Pyomo 4.x expression trees - ExpressionObjectTester.setUp(self) - - @unittest.expectedFailure - def test_template_scalar_with_set(self): - self._test_template_scalar_with_set() + self.assertEqual(e.nargs(), 2) + self.assertIs(e.arg(0), m.x) + self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(0), t) + self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), + type(E_base.arg(-1).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), + E_base.arg(-1).arg(1).arg(1)) + self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) + self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) + + +class TestTemplatizeRule(unittest.TestCase): + def test_simple_rule(self): + m = ConcreteModel() + m.I = RangeSet(3) + m.x = Var(m.I) + @m.Constraint(m.I) + def c(m, i): + return m.x[i] <= 0 + + template, indices = templatize_constraint(m.c) + self.assertEqual(len(indices), 1) + self.assertIs(indices[0]._set, m.I) + self.assertEqual(str(template), "x[_1] <= 0.0") + # Test that the RangeSet iterator was put back + self.assertEqual(list(m.I), list(range(1,4))) + # Evaluate the template + indices[0].set_value(2) + self.assertEqual(str(resolve_template(template)), 'x[2] <= 0.0') + + def test_simple_abstract_rule(self): + m = AbstractModel() + m.I = RangeSet(3) + m.x = Var(m.I) + @m.Constraint(m.I) + def c(m, i): + return m.x[i] <= 0 + + # Note: the constraint can be abstract, but the Set/Var must + # have been constructed (otherwise accessing the Set raises an + # exception) + + with self.assertRaisesRegex( + ValueError, ".*has not been constructed"): + template, indices = templatize_constraint(m.c) + + m.I.construct() + m.x.construct() + template, indices = templatize_constraint(m.c) + self.assertEqual(len(indices), 1) + self.assertIs(indices[0]._set, m.I) + self.assertEqual(str(template), "x[_1] <= 0.0") + + def test_simple_sum_rule(self): + m = ConcreteModel() + m.I = RangeSet(3) + m.J = RangeSet(3) + m.x = Var(m.I,m.J) + @m.Constraint(m.I) + def c(m, i): + return sum(m.x[i,j] for j in m.J) <= 0 + + template, indices = templatize_constraint(m.c) + self.assertEqual(len(indices), 1) + self.assertIs(indices[0]._set, m.I) + self.assertEqual( + template.to_string(verbose=True), + "templatesum(getitem(x, _1, _2), iter(_2, J)) <= 0.0" + ) + self.assertEqual( + str(template), + "SUM(x[_1,_2] for _2 in J) <= 0.0" + ) + # Evaluate the template + indices[0].set_value(2) + self.assertEqual( + str(resolve_template(template)), + 'x[2,1] + x[2,2] + x[2,3] <= 0.0' + ) + + def test_nested_sum_rule(self): + m = ConcreteModel() + m.I = RangeSet(3) + m.J = RangeSet(3) + m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) + m.x = Var(m.I,m.J,[10,20,30]) + @m.Constraint() + def c(m): + return sum( sum(m.x[i,j,k] for k in m.K[i]) + for j in m.J for i in m.I) <= 0 + + template, indices = templatize_constraint(m.c) + self.assertEqual(len(indices), 0) + self.assertEqual( + template.to_string(verbose=True), + "templatesum(" + "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), " + "iter(_1, J), iter(_2, I)) <= 0.0" + ) + self.assertEqual( + str(template), + "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) " + "for _1 in J for _2 in I) <= 0.0" + ) + # Evaluate the template + self.assertEqual( + str(resolve_template(template)), + 'x[1,1,10] + ' + '(x[2,1,10] + x[2,1,20]) + ' + '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + ' + '(x[1,2,10]) + ' + '(x[2,2,10] + x[2,2,20]) + ' + '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' + '(x[1,3,10]) + ' + '(x[2,3,10] + x[2,3,20]) + ' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0' + ) + + def test_multidim_nested_sum_rule(self): + m = ConcreteModel() + m.I = RangeSet(3) + m.J = RangeSet(3) + m.JI = m.J*m.I + m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) + m.x = Var(m.I,m.J,[10,20,30]) + @m.Constraint() + def c(m): + return sum( sum(m.x[i,j,k] for k in m.K[i]) + for j,i in m.JI) <= 0 + + template, indices = templatize_constraint(m.c) + self.assertEqual(len(indices), 0) + self.assertEqual( + template.to_string(verbose=True), + "templatesum(" + "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), " + "iter(_1, _2, JI)) <= 0.0" + ) + self.assertEqual( + str(template), + "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) " + "for _1, _2 in JI) <= 0.0" + ) + # Evaluate the template + self.assertEqual( + str(resolve_template(template)), + 'x[1,1,10] + ' + '(x[2,1,10] + x[2,1,20]) + ' + '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + ' + '(x[1,2,10]) + ' + '(x[2,2,10] + x[2,2,20]) + ' + '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' + '(x[1,3,10]) + ' + '(x[2,3,10] + x[2,3,20]) + ' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0' + ) + + def test_multidim_nested_sum_rule(self): + m = ConcreteModel() + m.I = RangeSet(3) + m.J = RangeSet(3) + m.JI = m.J*m.I + m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) + m.x = Var(m.I,m.J,[10,20,30]) + @m.Constraint() + def c(m): + return sum( sum(m.x[i,j,k] for k in m.K[i]) + for j,i in m.JI) <= 0 + + template, indices = templatize_constraint(m.c) + self.assertEqual(len(indices), 0) + self.assertEqual( + template.to_string(verbose=True), + "templatesum(" + "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), " + "iter(_1, _2, JI)) <= 0.0" + ) + self.assertEqual( + str(template), + "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) " + "for _1, _2 in JI) <= 0.0" + ) + # Evaluate the template + self.assertEqual( + str(resolve_template(template)), + 'x[1,1,10] + ' + '(x[2,1,10] + x[2,1,20]) + ' + '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + ' + '(x[1,2,10]) + ' + '(x[2,2,10] + x[2,2,20]) + ' + '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' + '(x[1,3,10]) + ' + '(x[2,3,10] + x[2,3,20]) + ' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0' + ) + + def test_multidim_nested_getattr_sum_rule(self): + m = ConcreteModel() + m.I = RangeSet(3) + m.J = RangeSet(3) + m.JI = m.J*m.I + m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) + m.x = Var(m.I,m.J,[10,20,30]) + @m.Block(m.I) + def b(b, i): + b.K = RangeSet(10, 10*i, 10) + @m.Constraint() + def c(m): + return sum( sum(m.x[i,j,k] for k in m.b[i].K) + for j,i in m.JI) <= 0 + + template, indices = templatize_constraint(m.c) + self.assertEqual(len(indices), 0) + self.assertEqual( + template.to_string(verbose=True), + "templatesum(" + "templatesum(getitem(x, _2, _1, _3), " + "iter(_3, getattr(getitem(b, _2), 'K'))), " + "iter(_1, _2, JI)) <= 0.0" + ) + self.assertEqual( + str(template), + "SUM(SUM(x[_2,_1,_3] for _3 in b[_2].K) " + "for _1, _2 in JI) <= 0.0" + ) + # Evaluate the template + self.assertEqual( + str(resolve_template(template)), + 'x[1,1,10] + ' + '(x[2,1,10] + x[2,1,20]) + ' + '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + ' + '(x[1,2,10]) + ' + '(x[2,2,10] + x[2,2,20]) + ' + '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' + '(x[1,3,10]) + ' + '(x[2,3,10] + x[2,3,20]) + ' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0' + ) class TestTemplateSubstitution(unittest.TestCase): @@ -296,22 +579,22 @@ def diffeq(m,t, i): self.assertEqual( len(_map), 3 ) idx1 = _GetItemIndexer( m.x[t,1] ) - self.assertIs( idx1._base, m.x ) self.assertEqual( idx1.nargs(), 2 ) + self.assertIs( idx1.base, m.x ) self.assertIs( idx1.arg(0), t ) self.assertEqual( idx1.arg(1), 1 ) self.assertIn( idx1, _map ) idx2 = _GetItemIndexer( m.dxdt[t,2] ) - self.assertIs( idx2._base, m.dxdt ) self.assertEqual( idx2.nargs(), 2 ) + self.assertIs( idx2.base, m.dxdt ) self.assertIs( idx2.arg(0), t ) self.assertEqual( idx2.arg(1), 2 ) self.assertIn( idx2, _map ) idx3 = _GetItemIndexer( m.x[t,3] ) - self.assertIs( idx3._base, m.x ) self.assertEqual( idx3.nargs(), 2 ) + self.assertIs( idx3.base, m.x ) self.assertIs( idx3.arg(0), t ) self.assertEqual( idx3.arg(1), 3 ) self.assertIn( idx3, _map ) diff --git a/pyomo/core/tests/unit/test_units.py b/pyomo/core/tests/unit/test_units.py index 7cebe303c6b..8a2e647b9f1 100644 --- a/pyomo/core/tests/unit/test_units.py +++ b/pyomo/core/tests/unit/test_units.py @@ -13,6 +13,7 @@ import pyutilib.th as unittest from pyomo.environ import * +from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent from pyomo.core.base.template_expr import IndexTemplate from pyomo.core.expr import inequality import pyomo.core.expr.current as EXPR @@ -58,25 +59,25 @@ def test_PyomoUnit_NumericValueMethods(self): with self.assertRaises(TypeError): x = int(kg) - uc.assert_units_consistent(kg < m.kg) - uc.assert_units_consistent(kg > m.kg) - uc.assert_units_consistent(kg <= m.kg) - uc.assert_units_consistent(kg >= m.kg) - uc.assert_units_consistent(kg == m.kg) - uc.assert_units_consistent(kg + m.kg) - uc.assert_units_consistent(kg - m.kg) + assert_units_consistent(kg < m.kg) + assert_units_consistent(kg > m.kg) + assert_units_consistent(kg <= m.kg) + assert_units_consistent(kg >= m.kg) + assert_units_consistent(kg == m.kg) + assert_units_consistent(kg + m.kg) + assert_units_consistent(kg - m.kg) with self.assertRaises(InconsistentUnitsError): - uc.assert_units_consistent(kg + 3) + assert_units_consistent(kg + 3) with self.assertRaises(InconsistentUnitsError): - uc.assert_units_consistent(kg - 3) + assert_units_consistent(kg - 3) with self.assertRaises(InconsistentUnitsError): - uc.assert_units_consistent(3 + kg) + assert_units_consistent(3 + kg) with self.assertRaises(InconsistentUnitsError): - uc.assert_units_consistent(3 - kg) + assert_units_consistent(3 - kg) # should not assert # check __mul__ @@ -93,7 +94,7 @@ def test_PyomoUnit_NumericValueMethods(self): # check rpow x = 2 ** kg # creation is allowed, only fails when units are "checked" with self.assertRaises(UnitsError): - uc.assert_units_consistent(x) + assert_units_consistent(x) x = kg x += kg @@ -143,7 +144,7 @@ def _get_check_units_ok(self, x, pyomo_units_container, str_check=None, expected if expected_type is not None: self.assertEqual(expected_type, type(x)) - pyomo_units_container.assert_units_consistent(x) + assert_units_consistent(x) if str_check is not None: self.assertEqual(str_check, str(pyomo_units_container.get_units(x))) else: @@ -155,7 +156,7 @@ def _get_check_units_fail(self, x, pyomo_units_container, expected_type=None, ex self.assertEqual(expected_type, type(x)) with self.assertRaises(expected_error): - pyomo_units_container.assert_units_consistent(x) + assert_units_consistent(x) # we also expect get_units to fail with self.assertRaises(expected_error): @@ -176,6 +177,8 @@ def test_get_check_units_on_all_expressions(self): model.y = Var() model.z = Var() model.p = Param(initialize=42.0, mutable=True) + model.xkg = Var(units=kg) + model.ym = Var(units=m) # test equality self._get_check_units_ok(3.0*kg == 1.0*kg, uc, 'kg', EXPR.EqualityExpression) @@ -354,11 +357,11 @@ def test_get_check_units_on_all_expressions(self): self._get_check_units_fail(EXPR.Expr_if(IF=model.x >= 2.0, THEN=m, ELSE=kg), uc, EXPR.Expr_ifExpression) - # test IndexTemplate and GetItemExpression + # test EXPR.IndexTemplate and GetItemExpression model.S = Set() - i = IndexTemplate(model.S) - j = IndexTemplate(model.S) - self._get_check_units_ok(i, uc, None, IndexTemplate) + i = EXPR.IndexTemplate(model.S) + j = EXPR.IndexTemplate(model.S) + self._get_check_units_ok(i, uc, None, EXPR.IndexTemplate) model.mat = Var(model.S, model.S) self._get_check_units_ok(model.mat[i,j+1], uc, None, EXPR.GetItemExpression) @@ -377,6 +380,16 @@ def test_get_check_units_on_all_expressions(self): self._get_check_units_fail(model.ef2(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError) self._get_check_units_fail(model.ef2(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError) + # test ExternalFunctionExpression, NPV_ExternalFunctionExpression + model.ef3 = ExternalFunction(python_callback_function, units=uc.kg, arg_units=[uc.kg, uc.m]) + self._get_check_units_fail(model.ef3(model.x, model.y), uc, EXPR.ExternalFunctionExpression) + self._get_check_units_fail(model.ef3(1.0, 2.0), uc, EXPR.NPV_ExternalFunctionExpression) + self._get_check_units_fail(model.ef3(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError) + self._get_check_units_fail(model.ef3(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError) + self._get_check_units_ok(model.ef3(2.0*kg, 1.0*uc.m), uc, 'kg', EXPR.NPV_ExternalFunctionExpression) + self._get_check_units_ok(model.ef3(model.x*kg, model.y*m), uc, 'kg', EXPR.ExternalFunctionExpression) + self._get_check_units_ok(model.ef3(model.xkg, model.ym), uc, 'kg', EXPR.ExternalFunctionExpression) + self._get_check_units_fail(model.ef3(model.ym, model.xkg), uc, EXPR.ExternalFunctionExpression, InconsistentUnitsError) # @unittest.skip('Skipped testing LinearExpression since StreamBasedExpressionVisitor does not handle LinearExpressions') def test_linear_expression(self): @@ -440,11 +453,11 @@ def test_temperatures(self): ex = 2.0*delta_degC + 3.0*delta_degC + 1.0*delta_degC self.assertEqual(type(ex), EXPR.NPV_SumExpression) - uc.assert_units_consistent(ex) + assert_units_consistent(ex) ex = 2.0*delta_degF + 3.0*delta_degF self.assertEqual(type(ex), EXPR.NPV_SumExpression) - uc.assert_units_consistent(ex) + assert_units_consistent(ex) self._get_check_units_fail(2.0*K + 3.0*R, uc, EXPR.NPV_SumExpression) self._get_check_units_fail(2.0*delta_degC + 3.0*delta_degF, uc, EXPR.NPV_SumExpression) @@ -510,30 +523,22 @@ def test_convert(self): self.assertAlmostEqual(value(m.dy_con.body), 0.0, places=5) self.assertAlmostEqual(value(m.ground.body), 0.0, places=5) - def test_assert_units_consistent(self): + def test_convert_dimensionless(self): u = units m = ConcreteModel() - m.dx = Var(units=u.m, initialize=0.10188943773836046) - m.dy = Var(units=u.m, initialize=0.0) - m.vx = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.vy = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.t = Var(units=u.min, bounds=(1e-5,10.0), initialize=0.0024015570927624456) - m.theta = Var(bounds=(0, 0.49*3.14), initialize=0.7853981693583533, units=u.radians) - m.a = Param(initialize=-32.2, units=u.ft/u.s**2) - - m.obj = Objective(expr = m.dx, sense=maximize) - m.vx_con = Constraint(expr = m.vx == 1.0*u.m/u.s*cos(m.theta)) - m.vy_con = Constraint(expr = m.vy == 1.0*u.m/u.s*sin(m.theta)) - m.dx_con = Constraint(expr = m.dx == m.vx*u.convert(m.t, to_units=u.s)) - m.dy_con = Constraint(expr = m.dy == m.vy*u.convert(m.t, to_units=u.s) - + 0.5*(u.convert(m.a, to_units=u.m/u.s**2))*(u.convert(m.t, to_units=u.s))**2) - m.ground = Constraint(expr = m.dy == 0) - - print(isinstance(m, Block)) - u.assert_units_consistent(m) - m.broken = Constraint(expr = m.dy == 42.0*u.kg) - with self.assertRaises(UnitsError): - u.assert_units_consistent(m) + m.x = Var() + foo = u.convert(m.x, to_units=u.dimensionless) + foo = u.convert(m.x, to_units=None) + foo = u.convert(m.x, to_units=1.0) + with self.assertRaises(InconsistentUnitsError): + foo = u.convert(m.x, to_units=u.kg) + m.y = Var(units=u.kg) + with self.assertRaises(InconsistentUnitsError): + foo = u.convert(m.y, to_units=u.dimensionless) + with self.assertRaises(InconsistentUnitsError): + foo = u.convert(m.y, to_units=None) + with self.assertRaises(InconsistentUnitsError): + foo = u.convert(m.y, to_units=1.0) def test_usd(self): u = units diff --git a/pyomo/core/tests/unit/test_visitor.py b/pyomo/core/tests/unit/test_visitor.py index 734f1ede225..9c3227f88be 100644 --- a/pyomo/core/tests/unit/test_visitor.py +++ b/pyomo/core/tests/unit/test_visitor.py @@ -26,6 +26,7 @@ from pyomo.environ import * import pyomo.kernel +from pyomo.common.log import LoggingIntercept from pyomo.core.expr.numvalue import ( native_types, nonpyomo_leaf_types, NumericConstant, as_numeric, is_potentially_variable, @@ -55,7 +56,7 @@ from pyomo.core.base.var import SimpleVar from pyomo.core.base.param import _ParamData, SimpleParam from pyomo.core.base.label import * -from pyomo.core.base.template_expr import IndexTemplate +from pyomo.core.expr.template_expr import IndexTemplate from pyomo.core.expr.expr_errors import TemplateExpressionError @@ -730,7 +731,7 @@ def test_default(self): self.assertEqual(ans, ref) def test_beforeChild(self): - def before(node, child): + def before(node, child, child_idx): if type(child) in nonpyomo_leaf_types \ or not child.is_expression_type(): return False, [child] @@ -752,10 +753,40 @@ def before(node, child): ref = [] self.assertEqual(str(ans), str(ref)) + def test_old_beforeChild(self): + def before(node, child): + if type(child) in nonpyomo_leaf_types \ + or not child.is_expression_type(): + return False, [child] + os = six.StringIO() + with LoggingIntercept(os, 'pyomo'): + walker = StreamBasedExpressionVisitor(beforeChild=before) + self.assertIn( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the beforeChild() " + "method", os.getvalue().replace('\n',' ')) + + ans = walker.walk_expression(self.e) + m = self.m + ref = [ + [[m.x], [2]], + [m.y], + [[m.z], [[m.x], [m.y]]] + ] + self.assertEqual(str(ans), str(ref)) + + ans = walker.walk_expression(m.x) + ref = [] + self.assertEqual(str(ans), str(ref)) + + ans = walker.walk_expression(2) + ref = [] + self.assertEqual(str(ans), str(ref)) + def test_reduce_in_accept(self): def enter(node): return None, 1 - def accept(node, data, child_result): + def accept(node, data, child_result, child_idx): return data + child_result walker = StreamBasedExpressionVisitor( enterNode=enter, acceptChildResult=accept) @@ -878,6 +909,24 @@ def exit(node, data): self.assertEqual(str(ans), str(ref)) def test_beforeChild_acceptChildResult_afterChild(self): + counts = [0,0,0] + def before(node, child, child_idx): + counts[0] += 1 + if type(child) in nonpyomo_leaf_types \ + or not child.is_expression_type(): + return False, None + def accept(node, data, child_result, child_idx): + counts[1] += 1 + def after(node, child, child_idx): + counts[2] += 1 + walker = StreamBasedExpressionVisitor( + beforeChild=before, acceptChildResult=accept, afterChild=after) + ans = walker.walk_expression(self.e) + m = self.m + self.assertEqual(ans, None) + self.assertEquals(counts, [9,9,9]) + + def test_OLD_beforeChild_acceptChildResult_afterChild(self): counts = [0,0,0] def before(node, child): counts[0] += 1 @@ -888,8 +937,24 @@ def accept(node, data, child_result): counts[1] += 1 def after(node, child): counts[2] += 1 - walker = StreamBasedExpressionVisitor( - beforeChild=before, acceptChildResult=accept, afterChild=after) + + os = six.StringIO() + with LoggingIntercept(os, 'pyomo'): + walker = StreamBasedExpressionVisitor( + beforeChild=before, acceptChildResult=accept, afterChild=after) + self.assertIn( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the " + "beforeChild() method", os.getvalue().replace('\n',' ')) + self.assertIn( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the " + "acceptChildResult() method", os.getvalue().replace('\n',' ')) + self.assertIn( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the " + "afterChild() method", os.getvalue().replace('\n',' ')) + ans = walker.walk_expression(self.e) m = self.m self.assertEqual(ans, None) @@ -897,11 +962,11 @@ def after(node, child): def test_enterNode_acceptChildResult_beforeChild(self): ans = [] - def before(node, child): + def before(node, child, child_idx): if type(child) in nonpyomo_leaf_types \ or not child.is_expression_type(): return False, child - def accept(node, data, child_result): + def accept(node, data, child_result, child_idx): if data is not child_result: data.append(child_result) return data @@ -916,11 +981,11 @@ def enter(node): def test_finalize(self): ans = [] - def before(node, child): + def before(node, child, child_idx): if type(child) in nonpyomo_leaf_types \ or not child.is_expression_type(): return False, child - def accept(node, data, child_result): + def accept(node, data, child_result, child_idx): if data is not child_result: data.append(child_result) return data @@ -945,11 +1010,11 @@ def enter(node): ans.append("Enter %s" % (name(node))) def exit(node, data): ans.append("Exit %s" % (name(node))) - def before(node, child): + def before(node, child, child_idx): ans.append("Before %s (from %s)" % (name(child), name(node))) - def accept(node, data, child_result): + def accept(node, data, child_result, child_idx): ans.append("Accept into %s" % (name(node))) - def after(node, child): + def after(node, child, child_idx): ans.append("After %s (from %s)" % (name(child), name(node))) def finalize(result): ans.append("Finalize") @@ -1007,6 +1072,81 @@ def finalize(result): Finalize""") def test_all_derived_class(self): + def name(x): + if type(x) in nonpyomo_leaf_types: + return str(x) + else: + return x.name + class all_callbacks(StreamBasedExpressionVisitor): + def __init__(self): + self.ans = [] + super(all_callbacks, self).__init__() + def enterNode(self, node): + self.ans.append("Enter %s" % (name(node))) + def exitNode(self, node, data): + self.ans.append("Exit %s" % (name(node))) + def beforeChild(self, node, child, child_idx): + self.ans.append("Before %s (from %s)" + % (name(child), name(node))) + def acceptChildResult(self, node, data, child_result, child_idx): + self.ans.append("Accept into %s" % (name(node))) + def afterChild(self, node, child, child_idx): + self.ans.append("After %s (from %s)" + % (name(child), name(node))) + def finalizeResult(self, result): + self.ans.append("Finalize") + walker = all_callbacks() + self.assertIsNone( walker.walk_expression(self.e) ) + self.assertEqual("\n".join(walker.ans),"""Enter sum +Before pow (from sum) +Enter pow +Before x (from pow) +Enter x +Exit x +Accept into pow +After x (from pow) +Before 2 (from pow) +Enter 2 +Exit 2 +Accept into pow +After 2 (from pow) +Exit pow +Accept into sum +After pow (from sum) +Before y (from sum) +Enter y +Exit y +Accept into sum +After y (from sum) +Before prod (from sum) +Enter prod +Before z (from prod) +Enter z +Exit z +Accept into prod +After z (from prod) +Before sum (from prod) +Enter sum +Before x (from sum) +Enter x +Exit x +Accept into sum +After x (from sum) +Before y (from sum) +Enter y +Exit y +Accept into sum +After y (from sum) +Exit sum +Accept into prod +After sum (from prod) +Exit prod +Accept into sum +After prod (from sum) +Exit sum +Finalize""") + + def test_all_derived_class_oldAPI(self): def name(x): if type(x) in nonpyomo_leaf_types: return str(x) @@ -1030,7 +1170,22 @@ def afterChild(self, node, child): % (name(child), name(node))) def finalizeResult(self, result): self.ans.append("Finalize") - walker = all_callbacks() + os = six.StringIO() + with LoggingIntercept(os, 'pyomo'): + walker = all_callbacks() + self.assertIn( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the " + "beforeChild() method", os.getvalue().replace('\n',' ')) + self.assertIn( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the " + "acceptChildResult() method", os.getvalue().replace('\n',' ')) + self.assertIn( + "Note that the API for the StreamBasedExpressionVisitor " + "has changed to include the child index for the " + "afterChild() method", os.getvalue().replace('\n',' ')) + self.assertIsNone( walker.walk_expression(self.e) ) self.assertEqual("\n".join(walker.ans),"""Enter sum Before pow (from sum) diff --git a/pyomo/dae/flatten.py b/pyomo/dae/flatten.py index 43b34eaa736..ea6e392fe40 100644 --- a/pyomo/dae/flatten.py +++ b/pyomo/dae/flatten.py @@ -9,7 +9,7 @@ # ___________________________________________________________________________ from pyomo.core.base import Block, Var, Reference from pyomo.core.base.block import SubclassOf -from pyomo.core.base.indexed_component_slice import _IndexedComponent_slice +from pyomo.core.base.indexed_component_slice import IndexedComponent_slice def generate_time_only_slices(obj, time): @@ -49,7 +49,7 @@ def generate_time_only_slices(obj, time): tmp_sliced = {i: slice(None) for i in regular_idx} tmp_fixed = {time_idx: time.first()} tmp_ellipsis = ellipsis_idx - _slice = _IndexedComponent_slice( + _slice = IndexedComponent_slice( obj, tmp_fixed, tmp_sliced, tmp_ellipsis ) # For each combination of regular indices, we can generate a single @@ -62,7 +62,7 @@ def generate_time_only_slices(obj, time): (i, val) if i tol or + value(condata.lower) - value(condata.body) > tol): + inconsistent.add(condata) + + for blk in model.component_objects(Block, active=True): + # What if there are time-indexed blocks at multiple levels + # of a hierarchy? + # My preferred convention is to only check the first (highest- + # level) time index, but distinguishing between different-level + # time indices is an expensive operation. + if not is_explicitly_indexed_by(blk, time): + continue + if is_in_block_indexed_by(blk, time): + continue + info = get_index_set_except(blk, time) + non_time_set = info['set_except'] + index_getter = info['index_getter'] + for non_time_index in non_time_set: + index = index_getter(non_time_index, t0) + blkdata = blk[index] + for condata in blkdata.component_data_objects(Constraint, + active=True): + if (value(condata.body) - value(condata.upper) > tol or + value(condata.lower) - value(condata.body) > tol): + if condata in inconsistent: + raise ValueError( + '%s has already been visited. The only way this ' + 'should happen is if the model has nested time-' + 'indexed blocks, which is not supported.') + inconsistent.add(condata) + + return list(inconsistent) + + +def solve_consistent_initial_conditions(model, time, solver): + """ + Solves a model with all Constraints and Blocks deactivated except + at the initial value of the Set time. Reactivates Constraints and + Blocks that got deactivated. + + Args: + model: Model that will be solved + time: Set whose initial conditions will remain active for solve + solver: Something that implements a solve method that accepts + a model as an argument + + Returns: + The object returned by the solver's solve method + """ + # Need to deactivate discretization equations, wrt time, at t == 0 + # This is challenging as the only way (to my knowledge) to do this + # is to identify_variables in the expression, find the (assume only one?) + # DerivativeVar, and access its get_continuousset_list + # I would like a get_continuousset_list for discretization equations. + # Possibly as a ComponentMap, possibly as an attribute of some new + # DiscEquation subclass of Constraint + # Until I have this, this function will only work for backward + # discretization schemes + + # Also, would like to be able to check for zero degrees of freedom here + + scheme = time.get_discretization_info()['scheme'] + if scheme != 'LAGRANGE-RADAU' and scheme != 'BACKWARD Difference': + raise NotImplementedError( + '%s discretization scheme is not supported' % scheme) + + t0 = time.first() + timelist = list(time)[1:] + deactivated_dict = deactivate_model_at(model, time, timelist) + + result = solver.solve(model) + + for t in timelist: + for comp in deactivated_dict[t]: + comp.activate() + + return result + diff --git a/pyomo/dae/misc.py b/pyomo/dae/misc.py index 682c31c8709..32a75073fb2 100644 --- a/pyomo/dae/misc.py +++ b/pyomo/dae/misc.py @@ -333,23 +333,10 @@ def _update_block(blk): 'function on Block-derived components that override ' 'construct()' % blk.name) - # Code taken from the construct() method of Block missing_idx = getattr(blk, '_dae_missing_idx', set([])) for idx in list(missing_idx): - _block = blk[idx] - obj = apply_indexed_rule( - blk, blk._rule, _block, idx, blk._options) - - if isinstance(obj, _BlockData) and obj is not _block: - # If the user returns a block, use their block instead - # of the empty one we just created. - for c in list(obj.component_objects(descend_into=False)): - obj.del_component(c) - _block.add_component(c.local_name, c) - # transfer over any other attributes that are not components - for name, val in iteritems(obj.__dict__): - if not hasattr(_block, name) and not hasattr(blk, name): - super(_BlockData, _block).__setattr__(name, val) + # Trigger block creation (including calling the Block's rule) + blk[idx] # Remove book-keeping data after Block is discretized if hasattr(blk, '_dae_missing_idx'): diff --git a/pyomo/dae/set_utils.py b/pyomo/dae/set_utils.py index cae836796fb..c1a979c9cf5 100644 --- a/pyomo/dae/set_utils.py +++ b/pyomo/dae/set_utils.py @@ -9,10 +9,15 @@ # ___________________________________________________________________________ from collections import Counter -from pyomo.kernel import ComponentSet +from pyomo.core.base import Constraint, Block +from pyomo.core.kernel.component_set import ComponentSet from pyomo.core.base.set import SetProduct +def index_warning(name, index): + return 'WARNING: %s has no index %s' % (name, index) + + def is_explicitly_indexed_by(comp, *sets, **kwargs): """ Function for determining whether a pyomo component is indexed by a @@ -137,8 +142,10 @@ def get_index_set_except(comp, *sets): raise ValueError(msg) # Need to know the location of each set within comp's index_set # location will map: - # location_in_comp_index_set -> location_in_sets + # location in comp's subsets() -> location in input sets location = {} + # location should be well defined even for higher dimension sets + # because this maps between lists of sets, not lists of indices other_ind_sets = [] for ind_loc, ind_set in enumerate(projection_sets): found_set = False @@ -150,8 +157,8 @@ def get_index_set_except(comp, *sets): if not found_set: other_ind_sets.append(ind_set) else: - # If index_set has no set_tuple, it must be a SimpleSet, and - # len(sets) == 1 (because comp is indexed by every set in sets). + # If index_set is not a SetProduct, only one set must have been + # provided, so len(sets) == 1 # Location in sets and in comp's indexing set are the same. location = {0: 0} other_ind_sets = [] @@ -219,3 +226,59 @@ def _complete_index(loc, index, *newvals): newval = (newval,) index = index[0:i] + newval + index[i:] return index + + +def deactivate_model_at(b, cset, pts, allow_skip=True, + suppress_warnings=False): + """ + Finds any block or constraint in block b, indexed explicitly (and not + implicitly) by cset, and deactivates it at points specified. + Implicitly indexed components are excluded because one of their parent + blocks will be deactivated, so deactivating them too would be redundant. + + Args: + b : Block to search + cset : ContinuousSet of interest + pts : Value or list of values, in ContinuousSet, to deactivate at + + Returns: + A dictionary mapping points in pts to lists of + component data that have been deactivated there + """ + if type(pts) is not list: + pts = [pts] + for pt in pts: + if pt not in cset: + msg = str(pt) + ' is not in ContinuousSet ' + cset.name + raise ValueError(msg) + deactivated = {pt: [] for pt in pts} + + visited = set() + for comp in b.component_objects([Block, Constraint], active=True): + # Record components that have been visited in case component_objects + # contains duplicates (due to references) + if id(comp) in visited: + continue + visited.add(id(comp)) + + if (is_explicitly_indexed_by(comp, cset) and + not is_in_block_indexed_by(comp, cset)): + info = get_index_set_except(comp, cset) + non_cset_set = info['set_except'] + index_getter = info['index_getter'] + + for non_cset_index in non_cset_set: + for pt in pts: + index = index_getter(non_cset_index, pt) + try: + comp[index].deactivate() + deactivated[pt].append(comp[index]) + except KeyError: + # except KeyError to allow Constraint/Block.Skip + if not suppress_warnings: + print(index_warning(comp.name, index)) + if not allow_skip: + raise + continue + + return deactivated diff --git a/pyomo/dae/simulator.py b/pyomo/dae/simulator.py index dcc59d4d061..b021ec13de3 100644 --- a/pyomo/dae/simulator.py +++ b/pyomo/dae/simulator.py @@ -6,14 +6,18 @@ # the U.S. Government retains certain rights in this software. # This software is distributed under the BSD License. # _________________________________________________________________________ -from pyomo.core.base import Constraint, Param, value, Suffix, Block +from pyomo.core.base import Constraint, Param, Var, value, Suffix, Block from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.diffvar import DAE_Error from pyomo.core.expr import current as EXPR -from pyomo.core.expr.numvalue import NumericValue, native_numeric_types -from pyomo.core.base.template_expr import IndexTemplate, _GetItemIndexer +from pyomo.core.expr.numvalue import ( + NumericValue, native_numeric_types, nonpyomo_leaf_types, +) +from pyomo.core.expr.template_expr import IndexTemplate, _GetItemIndexer +from pyomo.core.base.indexed_component_slice import IndexedComponent_slice +from pyomo.core.base.reference import Reference from six import iterkeys, itervalues @@ -75,7 +79,7 @@ def _check_getitemexpression(expr, i): GetItemExpression for the :py:class:`DerivativeVar` and the RHS. If not, return None. """ - if type(expr.arg(i)._base) is DerivativeVar: + if type(expr.arg(i).arg(0)) is DerivativeVar: return [expr.arg(i), expr.arg(1 - i)] else: return None @@ -106,7 +110,7 @@ def _check_productexpression(expr, i): elif curr.__class__ is EXPR.ReciprocalExpression: stack.append((curr.arg(0), - e_)) elif type(curr) is EXPR.GetItemExpression and \ - type(curr._base) is DerivativeVar: + type(curr.arg(0)) is DerivativeVar: dv = (curr, e_) else: pterms.append((curr, e_)) @@ -140,7 +144,7 @@ def _check_negationexpression(expr, i): arg = expr.arg(i).arg(0) if type(arg) is EXPR.GetItemExpression and \ - type(arg._base) is DerivativeVar: + type(arg.arg(0)) is DerivativeVar: return [arg, - expr.arg(1 - i)] if type(arg) is EXPR.ProductExpression: @@ -151,7 +155,7 @@ def _check_negationexpression(expr, i): not lhs.is_potentially_variable()): return None if not (type(rhs) is EXPR.GetItemExpression and - type(rhs._base) is DerivativeVar): + type(rhs.arg(0)) is DerivativeVar): return None return [rhs, - expr.arg(1 - i) / lhs] @@ -178,7 +182,7 @@ def _check_viewsumexpression(expr, i): if dv is not None: items.append(item) elif type(item) is EXPR.GetItemExpression and \ - type(item._base) is DerivativeVar: + type(item.arg(0)) is DerivativeVar: dv = item elif type(item) is EXPR.ProductExpression: # This will contain the constant coefficient if there is one @@ -188,7 +192,7 @@ def _check_viewsumexpression(expr, i): if (type(lhs) in native_numeric_types or not lhs.is_potentially_variable()) \ and (type(rhs) is EXPR.GetItemExpression and - type(rhs._base) is DerivativeVar): + type(rhs.arg(0)) is DerivativeVar): dv = rhs dvcoef = lhs else: @@ -224,9 +228,8 @@ def visiting_potential_leaf(self, node): if _id not in self.templatemap: self.templatemap[_id] = Param(mutable=True) self.templatemap[_id].construct() - _args = [] self.templatemap[_id]._name = "%s[%s]" % ( - node._base.name, ','.join(str(x) for x in _id._args)) + _id.base.name, ','.join(str(x) for x in _id.args)) return True, self.templatemap[_id] return super( @@ -283,7 +286,7 @@ def visiting_potential_leaf(self, node): _id = _GetItemIndexer(node) if _id not in self.templatemap: name = "%s[%s]" % ( - node._base.name, ','.join(str(x) for x in _id._args)) + _id.base.name, ','.join(str(x) for x in _id.args)) self.templatemap[_id] = casadi.SX.sym(name) return True, self.templatemap[_id] @@ -615,7 +618,7 @@ def __init__(self, m, package='scipy'): diffvars = [] for deriv in derivlist: - sv = deriv._base.get_state_var() + sv = deriv.base.get_state_var() diffvars.append(_GetItemIndexer(sv[deriv._args])) # Create ordered list of algebraic variables and time-varying @@ -623,7 +626,7 @@ def __init__(self, m, package='scipy'): algvars = [] for item in iterkeys(templatemap): - if item._base.name in derivs: + if item.base.name in derivs: # Make sure there are no DerivativeVars in the # template map raise DAE_Error( @@ -653,7 +656,7 @@ def _rhsfun(t, x): for _id in diffvars: if _id not in templatemap: name = "%s[%s]" % ( - _id._base.name, ','.join(str(x) for x in _id._args)) + _id.base.name, ','.join(str(x) for x in _id.args)) templatemap[_id] = casadi.SX.sym(name) self._contset = contset diff --git a/pyomo/dae/tests/test_flatten.py b/pyomo/dae/tests/test_flatten.py index 1bb0fe340e3..04dbc76f269 100644 --- a/pyomo/dae/tests/test_flatten.py +++ b/pyomo/dae/tests/test_flatten.py @@ -9,7 +9,7 @@ # ___________________________________________________________________________ import pyutilib.th as unittest -from pyomo.environ import ConcreteModel, Block, Var, Reference +from pyomo.environ import ConcreteModel, Block, Var, Reference, Set from pyomo.dae import ContinuousSet # This inport will have to change when we decide where this should go... from pyomo.dae.flatten import flatten_dae_variables @@ -126,6 +126,30 @@ def test_2dim_set(self): for ref in dae: self.assertIn(self._hashRef(ref), ref_data) + + def test_indexed_block(self): + m = ConcreteModel() + m.time = ContinuousSet(bounds=(0,1)) + m.comp = Set(initialize=['a', 'b']) + + def bb_rule(bb, t): + bb.dae_var = Var() + + def b_rule(b, c): + b.bb = Block(m.time, rule=bb_rule) + + m.b = Block(m.comp, rule=b_rule) + + scalar, dae = flatten_dae_variables(m, m.time) + self.assertEqual(len(scalar), 0) + ref_data = { + self._hashRef(Reference(m.b['a'].bb[:].dae_var)), + self._hashRef(Reference(m.b['b'].bb[:].dae_var)), + } + self.assertEqual(len(dae), len(ref_data)) + for ref in dae: + self.assertIn(self._hashRef(ref), ref_data) + # TODO: Add tests for Sets with dimen==None diff --git a/pyomo/dae/tests/test_initialization.py b/pyomo/dae/tests/test_initialization.py new file mode 100644 index 00000000000..08425902b38 --- /dev/null +++ b/pyomo/dae/tests/test_initialization.py @@ -0,0 +1,115 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +""" +Unit Tests for pyomo.dae.init_cond +""" +import os +from os.path import abspath, dirname + +from six import StringIO + +import pyutilib.th as unittest + +from pyomo.core.base import * +from pyomo.environ import SolverFactory +from pyomo.common.log import LoggingIntercept +from pyomo.dae import * +from pyomo.dae.initialization import * +from pyomo.core.kernel.component_map import ComponentMap + +currdir = dirname(abspath(__file__)) + os.sep + +ipopt_available = SolverFactory('ipopt').available() + + +def make_model(): + m = ConcreteModel() + m.time = ContinuousSet(bounds=(0, 10)) + m.space = ContinuousSet(bounds=(0, 5)) + m.set1 = Set(initialize=['a', 'b', 'c']) + m.set2 = Set(initialize=['d', 'e', 'f']) + m.fs = Block() + + m.fs.v0 = Var(m.space, initialize=1) + + @m.fs.Block() + def b1(b): + b.v = Var(m.time, m.space, initialize=1) + b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0) + + b.con = Constraint(m.time, m.space, + rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x]) + # Inconsistent + + @b.Block(m.time) + def b2(b, t): + b.v = Var(initialize=2) + + @m.fs.Block(m.time, m.space) + def b2(b, t, x): + b.v = Var(m.set1, initialize=2) + + @b.Block(m.set1) + def b3(b, c): + b.v = Var(m.set2, initialize=3) + + @b.Constraint(m.set2) + def con(b, s): + return (5*b.v[s] == + m.fs.b2[m.time.first(), m.space.first()].v[c]) + # inconsistent + + @m.fs.Constraint(m.time) + def con1(fs, t): + return fs.b1.v[t, m.space.last()] == 5 + # Will be inconsistent + + @m.fs.Constraint(m.space) + def con2(fs, x): + return fs.b1.v[m.time.first(), x] == fs.v0[x] + # will be consistent + + disc = TransformationFactory('dae.collocation') + disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU') + disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU') + + return m + + +class TestDaeInitCond(unittest.TestCase): + + def test_get_inconsistent_initial_conditions(self): + m = make_model() + inconsistent = get_inconsistent_initial_conditions(m, m.time) + + self.assertIn(m.fs.b1.con[m.time[1], m.space[1]], inconsistent) + self.assertIn(m.fs.b2[m.time[1], m.space[1]].b3['a'].con['d'], + inconsistent) + self.assertIn(m.fs.con1[m.time[1]], inconsistent) + self.assertNotIn(m.fs.con2[m.space[1]], inconsistent) + + + @unittest.skipIf(not ipopt_available, 'ipopt is not available') + def test_solve_consistent_initial_conditions(self): + m = make_model() + solver = SolverFactory('ipopt') + solve_consistent_initial_conditions(m, m.time, solver) + inconsistent = get_inconsistent_initial_conditions(m, m.time) + self.assertFalse(inconsistent) + + self.assertTrue(m.fs.con1[m.time[1]].active) + self.assertTrue(m.fs.con1[m.time[3]].active) + self.assertTrue(m.fs.b1.con[m.time[1], m.space[1]].active) + self.assertTrue(m.fs.b1.con[m.time[3], m.space[1]].active) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/dae/tests/test_set_utils.py b/pyomo/dae/tests/test_set_utils.py index 213b86d469b..182192f7ee8 100644 --- a/pyomo/dae/tests/test_set_utils.py +++ b/pyomo/dae/tests/test_set_utils.py @@ -18,7 +18,8 @@ import pyutilib.th as unittest -from pyomo.environ import * +from pyomo.core.base import (Block, Constraint, ConcreteModel, Var, Set, + TransformationFactory) from pyomo.common.log import LoggingIntercept from pyomo.dae import * from pyomo.dae.set_utils import * @@ -27,6 +28,60 @@ currdir = dirname(abspath(__file__)) + os.sep +def make_model(): + m = ConcreteModel() + m.time = ContinuousSet(bounds=(0, 10)) + m.space = ContinuousSet(bounds=(0, 5)) + m.set1 = Set(initialize=['a', 'b', 'c']) + m.set2 = Set(initialize=['d', 'e', 'f']) + m.fs = Block() + + m.fs.v0 = Var(m.space, initialize=1) + + @m.fs.Block() + def b1(b): + b.v = Var(m.time, m.space, initialize=1) + b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0) + + b.con = Constraint(m.time, m.space, + rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x]) + # Inconsistent + + @b.Block(m.time) + def b2(b, t): + b.v = Var(initialize=2) + + @m.fs.Block(m.time, m.space) + def b2(b, t, x): + b.v = Var(m.set1, initialize=2) + + @b.Block(m.set1) + def b3(b, c): + b.v = Var(m.set2, initialize=3) + + @b.Constraint(m.set2) + def con(b, s): + return (5*b.v[s] == + m.fs.b2[m.time.first(), m.space.first()].v[c]) + # inconsistent + + @m.fs.Constraint(m.time) + def con1(fs, t): + return fs.b1.v[t, m.space.last()] == 5 + # Will be inconsistent + + @m.fs.Constraint(m.space) + def con2(fs, x): + return fs.b1.v[m.time.first(), x] == fs.v0[x] + # will be consistent + + disc = TransformationFactory('dae.collocation') + disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU') + disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU') + + return m + + class TestDaeSetUtils(unittest.TestCase): # Test explicit/implicit index detection functions @@ -256,6 +311,28 @@ def test_get_index_set_except(self): with self.assertRaises(ValueError): info = get_index_set_except(m.v8, m.space) + def test_deactivate_model_at(self): + m = make_model() + + deactivate_model_at(m, m.time, m.time[2]) + self.assertTrue(m.fs.con1[m.time[1]].active) + self.assertFalse(m.fs.con1[m.time[2]].active) + self.assertTrue(m.fs.con2[m.space[1]].active) + self.assertFalse(m.fs.b1.con[m.time[2], m.space[1]].active) + self.assertFalse(m.fs.b2[m.time[2], m.space.last()].active) + self.assertTrue(m.fs.b2[m.time[2], m.space.last()].b3['a'].con['e'].active) + + deactivate_model_at(m, m.time, [m.time[1], m.time[3]]) + # disc equations at time.first() + self.assertFalse(m.fs.con1[m.time[1]].active) + self.assertFalse(m.fs.con1[m.time[3]].active) + self.assertFalse(m.fs.b1.con[m.time[1], m.space[1]].active) + self.assertFalse(m.fs.b1.con[m.time[3], m.space[1]].active) + + with self.assertRaises(KeyError): + deactivate_model_at(m, m.time, m.time[1], allow_skip=False, + suppress_warnings=True) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/dae/tests/test_simulator.py b/pyomo/dae/tests/test_simulator.py index 2d18838769f..9d4a9906443 100644 --- a/pyomo/dae/tests/test_simulator.py +++ b/pyomo/dae/tests/test_simulator.py @@ -29,7 +29,7 @@ _check_viewsumexpression, substitute_pyomo2casadi, ) -from pyomo.core.base.template_expr import ( +from pyomo.core.expr.template_expr import ( IndexTemplate, _GetItemIndexer, ) @@ -922,8 +922,8 @@ def test_check_getitemexpression(self): temp = _check_getitemexpression(e, 0) self.assertIs(e.arg(0), temp[0]) self.assertIs(e.arg(1), temp[1]) - self.assertIs(m.dv, temp[0]._base) - self.assertIs(m.v, temp[1]._base) + self.assertIs(m.dv, temp[0].arg(0)) + self.assertIs(m.v, temp[1].arg(0)) temp = _check_getitemexpression(e, 1) self.assertIsNone(temp) @@ -931,8 +931,8 @@ def test_check_getitemexpression(self): temp = _check_getitemexpression(e, 1) self.assertIs(e.arg(0), temp[1]) self.assertIs(e.arg(1), temp[0]) - self.assertIs(m.dv, temp[0]._base) - self.assertIs(m.v, temp[1]._base) + self.assertIs(m.dv, temp[0].arg(0)) + self.assertIs(m.v, temp[1].arg(0)) temp = _check_getitemexpression(e, 0) self.assertIsNone(temp) @@ -954,36 +954,36 @@ def test_check_productexpression(self): # Check multiplication by constant e = 5 * m.dv[t] == m.v[t] temp = _check_productexpression(e, 0) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) e = m.v[t] == 5 * m.dv[t] temp = _check_productexpression(e, 1) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) # Check multiplication by fixed param e = m.p * m.dv[t] == m.v[t] temp = _check_productexpression(e, 0) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) e = m.v[t] == m.p * m.dv[t] temp = _check_productexpression(e, 1) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) # Check multiplication by mutable param e = m.mp * m.dv[t] == m.v[t] temp = _check_productexpression(e, 0) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal self.assertIs(e.arg(1), temp[1].arg(0)) e = m.v[t] == m.mp * m.dv[t] temp = _check_productexpression(e, 1) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal self.assertIs(e.arg(0), temp[1].arg(0)) @@ -991,14 +991,14 @@ def test_check_productexpression(self): # Check multiplication by var e = m.y * m.dv[t] / m.z == m.v[t] temp = _check_productexpression(e, 0) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(e.arg(1), temp[1].arg(0).arg(0)) self.assertIs(m.z, temp[1].arg(0).arg(1)) e = m.v[t] == m.y * m.dv[t] / m.z temp = _check_productexpression(e, 1) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(e.arg(0), temp[1].arg(0).arg(0)) self.assertIs(m.z, temp[1].arg(0).arg(1)) @@ -1006,14 +1006,14 @@ def test_check_productexpression(self): # Check having the DerivativeVar in the denominator e = m.y / (m.dv[t] * m.z) == m.mp temp = _check_productexpression(e, 0) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(m.y, temp[1].arg(0)) self.assertIs(e.arg(1), temp[1].arg(1).arg(0)) e = m.mp == m.y / (m.dv[t] * m.z) temp = _check_productexpression(e, 1) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(m.y, temp[1].arg(0)) self.assertIs(e.arg(0), temp[1].arg(1).arg(0)) @@ -1035,8 +1035,8 @@ def test_check_negationexpression(self): temp = _check_negationexpression(e, 0) self.assertIs(e.arg(0).arg(0), temp[0]) self.assertIs(e.arg(1), temp[1].arg(0)) - self.assertIs(m.dv, temp[0]._base) - self.assertIs(m.v, temp[1].arg(0)._base) + self.assertIs(m.dv, temp[0].arg(0)) + self.assertIs(m.v, temp[1].arg(0).arg(0)) temp = _check_negationexpression(e, 1) self.assertIsNone(temp) @@ -1044,8 +1044,8 @@ def test_check_negationexpression(self): temp = _check_negationexpression(e, 1) self.assertIs(e.arg(0), temp[1].arg(0)) self.assertIs(e.arg(1).arg(0), temp[0]) - self.assertIs(m.dv, temp[0]._base) - self.assertIs(m.v, temp[1].arg(0)._base) + self.assertIs(m.dv, temp[0].arg(0)) + self.assertIs(m.v, temp[1].arg(0).arg(0)) temp = _check_negationexpression(e, 0) self.assertIsNone(temp) @@ -1068,7 +1068,7 @@ def test_check_viewsumexpression(self): e = m.dv[t] + m.y + m.z == m.v[t] temp = _check_viewsumexpression(e, 0) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.SumExpression) self.assertIs(type(temp[1].arg(0)), EXPR.GetItemExpression) self.assertIs(type(temp[1].arg(1)), EXPR.MonomialTermExpression) @@ -1080,7 +1080,7 @@ def test_check_viewsumexpression(self): e = m.v[t] == m.y + m.dv[t] + m.z temp = _check_viewsumexpression(e, 1) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.SumExpression) self.assertIs(type(temp[1].arg(0)), EXPR.GetItemExpression) self.assertIs(type(temp[1].arg(1)), EXPR.MonomialTermExpression) @@ -1090,7 +1090,7 @@ def test_check_viewsumexpression(self): e = 5 * m.dv[t] + 5 * m.y - m.z == m.v[t] temp = _check_viewsumexpression(e, 0) - self.assertIs(m.dv, temp[0]._base) + self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(type(temp[1].arg(0).arg(0)), EXPR.GetItemExpression) diff --git a/pyomo/gdp/__init__.py b/pyomo/gdp/__init__.py index 62c7dd66fc8..7667064aa20 100644 --- a/pyomo/gdp/__init__.py +++ b/pyomo/gdp/__init__.py @@ -13,5 +13,5 @@ # Do not import these files: importing them registers the transformation # plugins with the pyomo script so that they get automatically invoked. #import pyomo.gdp.bigm -#import pyomo.gdp.chull +#import pyomo.gdp.hull diff --git a/pyomo/gdp/chull.py b/pyomo/gdp/chull.py index 662fba7f09c..ecdf76bee29 100644 --- a/pyomo/gdp/chull.py +++ b/pyomo/gdp/chull.py @@ -12,15 +12,17 @@ from pyomo.common.plugin import Plugin, implements from pyomo.core import IPyomoScriptModifyInstance, TransformationFactory -# This import ensures that gdp.chull is registered, even if pyomo.environ +# This is now deprecated in so many ways... + +# This import ensures that gdp.hull is registered, even if pyomo.environ # was never imported. -import pyomo.gdp.plugins.chull +import pyomo.gdp.plugins.hull @deprecated('The GDP Pyomo script plugins are deprecated. ' 'Use BuildActions or the --transform option.', version='5.4') class ConvexHull_Transformation_PyomoScript_Plugin(Plugin): - """Plugin to automatically call the GDP Convex Hull relaxation within + """Plugin to automatically call the GDP Hull Reformulation within the Pyomo script. """ @@ -32,7 +34,7 @@ def apply(self, **kwds): # Not sure why the ModifyInstance callback started passing the # model along with the instance. We will ignore it. model = kwds.pop('model', None) - xform = TransformationFactory('gdp.chull') + xform = TransformationFactory('gdp.hull') return xform.apply_to(instance, **kwds) diff --git a/pyomo/gdp/plugins/__init__.py b/pyomo/gdp/plugins/__init__.py index e4b30840bf6..778b0b2e456 100644 --- a/pyomo/gdp/plugins/__init__.py +++ b/pyomo/gdp/plugins/__init__.py @@ -10,7 +10,7 @@ def load(): import pyomo.gdp.plugins.bigm - import pyomo.gdp.plugins.chull + import pyomo.gdp.plugins.hull import pyomo.gdp.plugins.bilinear import pyomo.gdp.plugins.gdp_var_mover import pyomo.gdp.plugins.cuttingplane diff --git a/pyomo/gdp/plugins/bigm.py b/pyomo/gdp/plugins/bigm.py index 9855591bf9f..e63897829bf 100644 --- a/pyomo/gdp/plugins/bigm.py +++ b/pyomo/gdp/plugins/bigm.py @@ -17,25 +17,30 @@ from pyomo.contrib.fbbt.interval import inf from pyomo.core import ( Block, Connector, Constraint, Param, Set, Suffix, Var, - Expression, SortComponents, TraversalStrategy, Any, value, - RangeSet) + Expression, SortComponents, TraversalStrategy, value, + RangeSet, NonNegativeIntegers) +from pyomo.core.base.external import ExternalFunction from pyomo.core.base import Transformation, TransformationFactory from pyomo.core.base.component import ComponentUID, ActiveComponent from pyomo.core.base.PyomoModel import ConcreteModel, AbstractModel from pyomo.core.kernel.component_map import ComponentMap from pyomo.core.kernel.component_set import ComponentSet +import pyomo.core.expr.current as EXPR from pyomo.gdp import Disjunct, Disjunction, GDP_Error -from pyomo.gdp.disjunct import _DisjunctData -from pyomo.gdp.util import target_list, is_child_of +from pyomo.gdp.util import (target_list, is_child_of, get_src_disjunction, + get_src_constraint, get_transformed_constraints, + _get_constraint_transBlock, get_src_disjunct, + _warn_for_active_disjunction, + _warn_for_active_disjunct) from pyomo.gdp.plugins.gdp_var_mover import HACK_GDP_Disjunct_Reclassifier from pyomo.repn import generate_standard_repn from pyomo.common.config import ConfigBlock, ConfigValue from pyomo.common.modeling import unique_component_name from pyomo.common.deprecation import deprecation_warning + +from functools import wraps from six import iterkeys, iteritems from weakref import ref as weakref_ref -import sys - logger = logging.getLogger('pyomo.gdp.bigm') @@ -63,7 +68,7 @@ class BigM_Transformation(Transformation): 1) if the constraint appears in the bigM argument dict 2) if the constraint parent_component appears in the bigM argument dict - 3) if any block which is an ancestor to the constraint appears in + 3) if any block which is an ancestor to the constraint appears in the bigM argument dict 3) if 'None' is in the bigM argument dict 4) if the constraint or the constraint parent_component appear in @@ -79,19 +84,19 @@ class BigM_Transformation(Transformation): Specifying "bigM=N" is automatically mapped to "bigM={None: N}". The transformation will create a new Block with a unique - name beginning "_pyomo_gdp_bigm_relaxation". That Block will + name beginning "_pyomo_gdp_bigm_reformulation". That Block will contain an indexed Block named "relaxedDisjuncts", which will hold the relaxed disjuncts. This block is indexed by an integer indicating the order in which the disjuncts were relaxed. Each block has a dictionary "_constraintMap": - + 'srcConstraints': ComponentMap(: ) 'transformedConstraints': ComponentMap(: ) All transformed Disjuncts will have a pointer to the block their transformed - constraints are on, and all transformed Disjunctions will have a + constraints are on, and all transformed Disjunctions will have a pointer to the corresponding OR or XOR constraint. """ @@ -119,6 +124,23 @@ class BigM_Transformation(Transformation): M-values found through model Suffixes or that would otherwise be calculated using variable domains.""" )) + CONFIG.declare('assume_fixed_vars_permanent', ConfigValue( + default=False, + domain=bool, + description="Boolean indicating whether or not to transform so that the " + "the transformed model will still be valid when fixed Vars are unfixed.", + doc=""" + This is only relevant when the transformation will be estimating values + for M. If True, the transformation will calculate M values assuming that + fixed variables will always be fixed to their current values. This means + that if a fixed variable is unfixed after transformation, the + transformed model is potentially no longer valid. By default, the + transformation will assume fixed variables could be unfixed in the + future and will use their bounds to calculate the M value rather than + their value. Note that this could make for a weaker LP relaxation + while the variables remain fixed. + """ + )) def __init__(self): """Initialize transformation object.""" @@ -140,18 +162,26 @@ def __init__(self): Disjunction: self._warn_for_active_disjunction, Disjunct: self._warn_for_active_disjunct, Block: self._transform_block_on_disjunct, + ExternalFunction: False, } - def _get_bigm_suffix_list(self, block): + def _get_bigm_suffix_list(self, block, stopping_block=None): # Note that you can only specify suffixes on BlockData objects or # SimpleBlocks. Though it is possible at this point to stick them # on whatever components you want, we won't pick them up. suffix_list = [] - while block is not None: + orig_block = block + + # go searching above block in the tree, stop when we hit stopping_block + # (This is so that we can search on each Disjunct once, but get any + # information between a constraint and its Disjunct while transforming + # the constraint). + while block is not stopping_block: bigm = block.component('BigM') if type(bigm) is Suffix: suffix_list.append(bigm) block = block.parent_block() + return suffix_list def _get_bigm_arg_list(self, bigm_args, block): @@ -182,7 +212,7 @@ def _apply_to(self, instance, **kwds): NAME_BUFFER.clear() # same for our bookkeeping about what we used from bigM arg dict self.used_args.clear() - + def _apply_to_impl(self, instance, **kwds): config = self.CONFIG(kwds.pop('options', {})) @@ -197,6 +227,7 @@ def _apply_to_impl(self, instance, **kwds): config.set_value(kwds) bigM = config.bigM + self.assume_fixed_vars_permanent = config.assume_fixed_vars_permanent targets = config.targets if targets is None: @@ -212,21 +243,22 @@ def _apply_to_impl(self, instance, **kwds): # check that t is in fact a child of instance if not is_child_of(parent=instance, child=t, knownBlocks=knownBlocks): - raise GDP_Error("Target %s is not a component on instance %s!" - % (t.name, instance.name)) + raise GDP_Error( + "Target '%s' is not a component on instance '%s'!" + % (t.name, instance.name)) elif t.ctype is Disjunction: - if t.parent_component() is t: + if t.is_indexed(): self._transform_disjunction(t, bigM) else: self._transform_disjunctionData( t, bigM, t.index()) elif t.ctype in (Block, Disjunct): - if t.parent_component() is t: + if t.is_indexed(): self._transform_block(t, bigM) else: self._transform_blockData(t, bigM) else: raise GDP_Error( - "Target %s was not a Block, Disjunct, or Disjunction. " + "Target '%s' was not a Block, Disjunct, or Disjunction. " "It was of type %s and can't be transformed." % (t.name, type(t))) @@ -260,10 +292,10 @@ def _add_transformation_block(self, instance): # on transBlockName = unique_component_name( instance, - '_pyomo_gdp_bigm_relaxation') + '_pyomo_gdp_bigm_reformulation') transBlock = Block() instance.add_component(transBlockName, transBlock) - transBlock.relaxedDisjuncts = Block(Any) + transBlock.relaxedDisjuncts = Block(NonNegativeIntegers) transBlock.lbub = Set(initialize=['lb', 'ub']) return transBlock @@ -293,7 +325,7 @@ def _add_xor_constraint(self, disjunction, transBlock): assert isinstance(disjunction, Disjunction) # first check if the constraint already exists - if not disjunction._algebraic_constraint is None: + if disjunction._algebraic_constraint is not None: return disjunction._algebraic_constraint() # add the XOR (or OR) constraints to parent block (with unique name) @@ -324,23 +356,16 @@ def _transform_disjunction(self, obj, bigM): else: transBlock = self._add_transformation_block(obj.parent_block()) - # If this is an IndexedDisjunction, we have to create the XOR constraint - # here because we want its index to match the disjunction. In any case, - # we might as well. - xorConstraint = self._add_xor_constraint(obj, transBlock) - # relax each of the disjunctionDatas for i in sorted(iterkeys(obj)): - self._transform_disjunctionData(obj[i], bigM, i, xorConstraint, - transBlock) + self._transform_disjunctionData(obj[i], bigM, i, transBlock) # deactivate so the writers don't scream obj.deactivate() - def _transform_disjunctionData(self, obj, bigM, index, xorConstraint=None, - transBlock=None): + def _transform_disjunctionData(self, obj, bigM, index, transBlock=None): if not obj.active: - return # Do not process a deactivated disjunction + return # Do not process a deactivated disjunction # We won't have these arguments if this got called straight from # targets. But else, we created them earlier, and have just been passing # them through. @@ -350,20 +375,20 @@ def _transform_disjunctionData(self, obj, bigM, index, xorConstraint=None, # the case, let's use the same transformation block. (Else it will # be really confusing that the XOR constraint goes to that old block # but we create a new one here.) - if not obj.parent_component()._algebraic_constraint is None: + if obj.parent_component()._algebraic_constraint is not None: transBlock = obj.parent_component()._algebraic_constraint().\ parent_block() else: transBlock = self._add_transformation_block(obj.parent_block()) - if xorConstraint is None: - xorConstraint = self._add_xor_constraint(obj.parent_component(), - transBlock) + # create or fetch the xor constraint + xorConstraint = self._add_xor_constraint(obj.parent_component(), + transBlock) xor = obj.xor or_expr = 0 - # Just because it's unlikely this is what someone meant to do... + # Just because it's unlikely this is what someone meant to do... if len(obj.disjuncts) == 0: - raise GDP_Error("Disjunction %s is empty. This is " + raise GDP_Error("Disjunction '%s' is empty. This is " "likely indicative of a modeling error." % obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)) @@ -387,7 +412,7 @@ def _transform_disjunctionData(self, obj, bigM, index, xorConstraint=None, # Mark the DisjunctionData as transformed by mapping it to its XOR # constraint. obj._algebraic_constraint = weakref_ref(xorConstraint[index]) - + # and deactivate for the writers obj.deactivate() @@ -401,23 +426,23 @@ def _transform_disjunct(self, obj, transBlock, bigM, arg_list, suffix_list): return else: raise GDP_Error( - "The disjunct %s is deactivated, but the " + "The disjunct '%s' is deactivated, but the " "indicator_var is fixed to %s. This makes no sense." % ( obj.name, value(obj.indicator_var) )) if obj._transformation_block is None: raise GDP_Error( - "The disjunct %s is deactivated, but the " + "The disjunct '%s' is deactivated, but the " "indicator_var is not fixed and the disjunct does not " "appear to have been relaxed. This makes no sense. " "(If the intent is to deactivate the disjunct, fix its " "indicator_var to 0.)" % ( obj.name, )) - - if not obj._transformation_block is None: + + if obj._transformation_block is not None: # we've transformed it, which means this is the second time it's # appearing in a Disjunction raise GDP_Error( - "The disjunct %s has been transformed, but a disjunction " + "The disjunct '%s' has been transformed, but a disjunction " "it appears in has not. Putting the same disjunct in " "multiple disjunctions is not supported." % obj.name) @@ -436,7 +461,7 @@ def _transform_disjunct(self, obj, transBlock, bigM, arg_list, suffix_list): # This is crazy, but if the disjunction has been previously # relaxed, the disjunct *could* be deactivated. This is a big - # deal for CHull, as it uses the component_objects / + # deal for Hull, as it uses the component_objects / # component_data_objects generators. For BigM, that is OK, # because we never use those generators with active=True. I am # only noting it here for the future when someone (me?) is @@ -452,11 +477,11 @@ def _transform_block_components(self, block, disjunct, bigM, arg_list, suffix_list): # We first need to find any transformed disjunctions that might be here # because we need to move their transformation blocks up onto the parent - # block before we transform anything else on this block + # block before we transform anything else on this block destinationBlock = disjunct._transformation_block().parent_block() for obj in block.component_data_objects( - Disjunction, - sort=SortComponents.deterministic, + Disjunction, + sort=SortComponents.deterministic, descend_into=(Block)): if obj.algebraic_constraint is None: # This could be bad if it's active since that means its @@ -464,7 +489,7 @@ def _transform_block_components(self, block, disjunct, bigM, arg_list, continue # get this disjunction's relaxation block. transBlock = obj.algebraic_constraint().parent_block() - + # move transBlock up to parent component self._transfer_transBlock_data(transBlock, destinationBlock) # we leave the transformation block because it still has the XOR @@ -480,7 +505,7 @@ def _transform_block_components(self, block, disjunct, bigM, arg_list, if handler is None: raise GDP_Error( "No BigM transformation handler registered " - "for modeling components of type %s. If your " + "for modeling components of type %s. If your " "disjuncts contain non-GDP Pyomo components that " "require transformation, please transform them first." % obj.ctype) @@ -495,6 +520,7 @@ def _transfer_transBlock_data(self, fromBlock, toBlock): # to move those over. We know the XOR constraints are on the block, and # we need to leave those on the disjunct. disjunctList = toBlock.relaxedDisjuncts + to_delete = [] for idx, disjunctBlock in iteritems(fromBlock.relaxedDisjuncts): newblock = disjunctList[len(disjunctList)] newblock.transfer_attributes_from(disjunctBlock) @@ -504,8 +530,12 @@ def _transfer_transBlock_data(self, fromBlock, toBlock): original._transformation_block = weakref_ref(newblock) newblock._srcDisjunct = weakref_ref(original) - # we delete this container because we just moved everything out - del fromBlock.relaxedDisjuncts + # save index of what we just moved so that we can delete it + to_delete.append(idx) + + # delete everything we moved. + for idx in to_delete: + del fromBlock.relaxedDisjuncts[idx] # Note that we could handle other components here if we ever needed # to, but we control what is on the transformation block and @@ -513,47 +543,11 @@ def _transfer_transBlock_data(self, fromBlock, toBlock): def _warn_for_active_disjunction(self, disjunction, disjunct, bigMargs, arg_list, suffix_list): - # this should only have gotten called if the disjunction is active - assert disjunction.active - problemdisj = disjunction - if disjunction.is_indexed(): - for i in sorted(iterkeys(disjunction)): - if disjunction[i].active: - # a _DisjunctionData is active, we will yell about - # it specifically. - problemdisj = disjunction[i] - break - - parentblock = problemdisj.parent_block() - # the disjunction should only have been active if it wasn't transformed - assert problemdisj.algebraic_constraint is None - _probDisjName = problemdisj.getname( - fully_qualified=True, name_buffer=NAME_BUFFER) - raise GDP_Error("Found untransformed disjunction %s in disjunct %s! " - "The disjunction must be transformed before the " - "disjunct. If you are using targets, put the " - "disjunction before the disjunct in the list." - % (_probDisjName, disjunct.name)) + _warn_for_active_disjunction(disjunction, disjunct, NAME_BUFFER) def _warn_for_active_disjunct(self, innerdisjunct, outerdisjunct, bigMargs, arg_list, suffix_list): - assert innerdisjunct.active - problemdisj = innerdisjunct - if innerdisjunct.is_indexed(): - for i in sorted(iterkeys(innerdisjunct)): - if innerdisjunct[i].active: - # This is shouldn't be true, we will complain about it. - problemdisj = innerdisjunct[i] - break - - raise GDP_Error("Found active disjunct {0} in disjunct {1}! " - "Either {0} " - "is not in a disjunction or the disjunction it is in " - "has not been transformed. " - "{0} needs to be deactivated " - "or its disjunction transformed before {1} can be " - "transformed.".format(problemdisj.name, - outerdisjunct.name)) + _warn_for_active_disjunct(innerdisjunct, outerdisjunct, NAME_BUFFER) def _transform_block_on_disjunct(self, block, disjunct, bigMargs, arg_list, suffix_list): @@ -574,12 +568,12 @@ def _get_constraint_map_dict(self, transBlock): return transBlock._constraintMap def _transform_constraint(self, obj, disjunct, bigMargs, arg_list, - suffix_list): + disjunct_suffix_list): # add constraint to the transformation block, we'll transform it there. transBlock = disjunct._transformation_block() bigm_src = transBlock.bigm_src constraintMap = self._get_constraint_map_dict(transBlock) - + disjunctionRelaxationBlock = transBlock.parent_block() # Though rare, it is possible to get naming conflicts here # since constraints from all blocks are getting moved onto the @@ -588,22 +582,18 @@ def _transform_constraint(self, obj, disjunct, bigMargs, arg_list, name = unique_component_name(transBlock, cons_name) if obj.is_indexed(): - try: - newConstraint = Constraint(obj.index_set(), - disjunctionRelaxationBlock.lbub) - # HACK: We get burned by #191 here... When #1319 is merged we - # can revist this and I think stop catching the AttributeError. - except (TypeError, AttributeError): - # The original constraint may have been indexed by a - # non-concrete set (like an Any). We will give up on - # strict index verification and just blindly proceed. - newConstraint = Constraint(Any) + newConstraint = Constraint(obj.index_set(), + disjunctionRelaxationBlock.lbub) + # we map the container of the original to the container of the + # transformed constraint. Don't do this if obj is a SimpleConstraint + # because we will treat that like a _ConstraintData and map to a + # list of transformed _ConstraintDatas + constraintMap['transformedConstraints'][obj] = newConstraint else: newConstraint = Constraint(disjunctionRelaxationBlock.lbub) transBlock.add_component(name, newConstraint) - # add mapping of original constraint to transformed constraint + # add mapping of transformed constraint to original constraint constraintMap['srcConstraints'][newConstraint] = obj - constraintMap['transformedConstraints'][obj] = newConstraint for i in sorted(iterkeys(obj)): c = obj[i] @@ -617,18 +607,23 @@ def _transform_constraint(self, obj, disjunct, bigMargs, arg_list, if __debug__ and logger.isEnabledFor(logging.DEBUG): _name = obj.getname( fully_qualified=True, name_buffer=NAME_BUFFER) - logger.debug("GDP(BigM): The value for M for constraint %s " + logger.debug("GDP(BigM): The value for M for constraint '%s' " "from the BigM argument is %s." % (cons_name, str(M))) # if we didn't get something from args, try suffixes: if M is None: + # first get anything parent to c but below disjunct + suffix_list = self._get_bigm_suffix_list(c.parent_block(), + stopping_block=disjunct) + # prepend that to what we already collected for the disjunct. + suffix_list.extend(disjunct_suffix_list) M = self._get_M_from_suffixes(c, suffix_list, bigm_src) if __debug__ and logger.isEnabledFor(logging.DEBUG): _name = obj.getname( fully_qualified=True, name_buffer=NAME_BUFFER) - logger.debug("GDP(BigM): The value for M for constraint %s " + logger.debug("GDP(BigM): The value for M for constraint '%s' " "after checking suffixes is %s." % (cons_name, str(M))) @@ -660,7 +655,7 @@ def _transform_constraint(self, obj, disjunct, bigMargs, arg_list, if __debug__ and logger.isEnabledFor(logging.DEBUG): _name = obj.getname( fully_qualified=True, name_buffer=NAME_BUFFER) - logger.debug("GDP(BigM): The value for M for constraint %s " + logger.debug("GDP(BigM): The value for M for constraint '%s' " "after estimating (if needed) is %s." % (cons_name, str(M))) @@ -677,16 +672,28 @@ def _transform_constraint(self, obj, disjunct, bigMargs, arg_list, if c.lower is not None: if M[0] is None: - raise GDP_Error("Cannot relax disjunctive constraint %s " + raise GDP_Error("Cannot relax disjunctive constraint '%s' " "because M is not defined." % name) M_expr = M[0] * (1 - disjunct.indicator_var) newConstraint.add(i_lb, c.lower <= c. body - M_expr) + constraintMap[ + 'transformedConstraints'][c] = [newConstraint[i_lb]] + constraintMap['srcConstraints'][newConstraint[i_lb]] = c if c.upper is not None: if M[1] is None: - raise GDP_Error("Cannot relax disjunctive constraint %s " + raise GDP_Error("Cannot relax disjunctive constraint '%s' " "because M is not defined." % name) M_expr = M[1] * (1 - disjunct.indicator_var) newConstraint.add(i_ub, c.body - M_expr <= c.upper) + transformed = constraintMap['transformedConstraints'].get(c) + if transformed is not None: + constraintMap['transformedConstraints'][ + c].append(newConstraint[i_ub]) + else: + constraintMap[ + 'transformedConstraints'][c] = [newConstraint[i_ub]] + constraintMap['srcConstraints'][newConstraint[i_ub]] = c + # deactivate because we relaxed c.deactivate() @@ -716,7 +723,7 @@ def _get_M_from_args(self, constraint, bigMargs, arg_list, bigm_src): self.used_args[block] = val bigm_src[constraint] = (bigMargs, block) return val - + # last check for value for None! if None in bigMargs: m = bigMargs[None] @@ -752,6 +759,15 @@ def _get_M_from_suffixes(self, constraint, suffix_list, bigm_src): return M def _estimate_M(self, expr, name): + # If there are fixed variables here, unfix them for this calculation, + # and we'll restore them at the end. + fixed_vars = ComponentMap() + if not self.assume_fixed_vars_permanent: + for v in EXPR.identify_variables(expr, include_fixed=True): + if v.fixed: + fixed_vars[v] = value(v) + v.fixed = False + # Calculate a best guess at M repn = generate_standard_repn(expr, quadratic=False) M = [0, 0] @@ -778,137 +794,55 @@ def _estimate_M(self, expr, name): raise GDP_Error( "Cannot estimate M for " "expressions with unbounded variables." - "\n\t(found unbounded var %s while processing " - "constraint %s)" % (var.name, name)) + "\n\t(found unbounded var '%s' while processing " + "constraint '%s')" % (var.name, name)) else: # expression is nonlinear. Try using `contrib.fbbt` to estimate. expr_lb, expr_ub = compute_bounds_on_expr(expr) if expr_lb is None or expr_ub is None: raise GDP_Error("Cannot estimate M for unbounded nonlinear " "expressions.\n\t(found while processing " - "constraint %s)" % name) + "constraint '%s')" % name) else: M = (expr_lb, expr_ub) + # clean up if we unfixed things (fixed_vars is empty if we were assuming + # fixed vars are fixed for life) + for v, val in iteritems(fixed_vars): + v.fix(val) + return tuple(M) - # These are all functions to retrieve transformed components from original - # ones and vice versa. + # These are all functions to retrieve transformed components from + # original ones and vice versa. + + @wraps(get_src_disjunct) def get_src_disjunct(self, transBlock): - """Return the Disjunct object whose transformed components are on - transBlock. + return get_src_disjunct(transBlock) - Parameters - ---------- - transBlock: _BlockData which is in the relaxedDisjuncts IndexedBlock - on a transformation block. - """ - try: - return transBlock._srcDisjunct() - except: - raise GDP_Error("Block %s doesn't appear to be a transformation " - "block for a disjunct. No source disjunct found." - "\n\t(original error: %s)" - % (transBlock.name, sys.exc_info()[1])) + @wraps(get_src_disjunction) + def get_src_disjunction(self, xor_constraint): + return get_src_disjunction(xor_constraint) + @wraps(get_src_constraint) def get_src_constraint(self, transformedConstraint): - """Return the original Constraint whose transformed counterpart is - transformedConstraint - - Parameters - ---------- - transformedConstraint: Constraint, which must be a component on one of - the BlockDatas in the relaxedDisjuncts Block of - a transformation block - """ - transBlock = transformedConstraint.parent_block() - # This should be our block, so if it's not, the user messed up and gave - # us the wrong thing. If they happen to also have a _constraintMap then - # the world is really against us. - if not hasattr(transBlock, "_constraintMap"): - raise GDP_Error("Constraint %s is not a transformed constraint" - % transformedConstraint.name) - # if something goes wrong here, it's a bug in the mappings. - return transBlock._constraintMap['srcConstraints'][transformedConstraint] - - def _find_parent_disjunct(self, constraint): - # traverse up until we find the disjunct this constraint lives on - parent_disjunct = constraint.parent_block() - while not isinstance(parent_disjunct, _DisjunctData): - if parent_disjunct is None: - raise GDP_Error( - "Constraint %s is not on a disjunct and so was not " - "transformed" % constraint.name) - parent_disjunct = parent_disjunct.parent_block() - - return parent_disjunct + return get_src_constraint(transformedConstraint) - def _get_constraint_transBlock(self, constraint): - parent_disjunct = self._find_parent_disjunct(constraint) - # we know from _find_parent_disjunct that parent_disjunct is a Disjunct, - # so the below is OK - transBlock = parent_disjunct._transformation_block - if transBlock is None: - raise GDP_Error("Constraint %s is on a disjunct which has not been " - "transformed" % constraint.name) - # if it's not None, it's the weakref we wanted. - transBlock = transBlock() - - return transBlock - - def get_transformed_constraint(self, srcConstraint): - """Return the transformed version of srcConstraint - - Parameters - ---------- - srcConstraint: Constraint, which must be in the subtree of a - transformed Disjunct - """ - transBlock = self._get_constraint_transBlock(srcConstraint) - - if hasattr(transBlock, "_constraintMap") and transBlock._constraintMap[ - 'transformedConstraints'].get(srcConstraint): - return transBlock._constraintMap['transformedConstraints'][ - srcConstraint] - raise GDP_Error("Constraint %s has not been transformed." - % srcConstraint.name) - - def get_src_disjunction(self, xor_constraint): - """Return the Disjunction corresponding to xor_constraint - - Parameters - ---------- - xor_constraint: Constraint, which must be the logical constraint - (located on the transformation block) of some - Disjunction - """ - # NOTE: This is indeed a linear search through the Disjunctions on the - # model. I am leaving it this way on the assumption that asking XOR - # constraints for their Disjunction is not going to be a common - # question. If we ever need efficiency then we should store a reverse - # map from the XOR constraint to the Disjunction on the transformation - # block while we do the transformation. And then this method could query - # that map. - m = xor_constraint.model() - for disjunction in m.component_data_objects(Disjunction): - if disjunction._algebraic_constraint: - if disjunction._algebraic_constraint() is xor_constraint: - return disjunction - raise GDP_Error("It appears that %s is not an XOR or OR constraint " - "resulting from transforming a Disjunction." - % xor_constraint.name) + @wraps(get_transformed_constraints) + def get_transformed_constraints(self, srcConstraint): + return get_transformed_constraints(srcConstraint) def get_m_value_src(self, constraint): - """Return a tuple indicating how the M value used to transform - constraint was specified. (In particular, this can be used to - verify which BigM Suffixes were actually necessary to the + """Return a tuple indicating how the M value used to transform + constraint was specified. (In particular, this can be used to + verify which BigM Suffixes were actually necessary to the transformation.) - If the M value came from an arg, returns (bigm_arg_dict, key), where - bigm_arg_dict is the dictionary itself and key is the key in that + If the M value came from an arg, returns (bigm_arg_dict, key), where + bigm_arg_dict is the dictionary itself and key is the key in that dictionary which gave us the M value. - If the M value came from a Suffix, returns (suffix, key) where suffix + If the M value came from a Suffix, returns (suffix, key) where suffix is the BigM suffix used and key is the key in that Suffix. If the transformation calculated the value, returns (M_lower, M_upper), @@ -917,10 +851,10 @@ def get_m_value_src(self, constraint): Parameters ---------- - constraint: Constraint, which must be in the subtree of a transformed + constraint: Constraint, which must be in the subtree of a transformed Disjunct """ - transBlock = self._get_constraint_transBlock(constraint) + transBlock = _get_constraint_transBlock(constraint) # This is a KeyError if it fails, but it is also my fault if it # fails... (That is, it's a bug in the mapping.) return transBlock.bigm_src[constraint] diff --git a/pyomo/gdp/plugins/chull.py b/pyomo/gdp/plugins/chull.py index a15c1c45f3b..c99d3feff25 100644 --- a/pyomo/gdp/plugins/chull.py +++ b/pyomo/gdp/plugins/chull.py @@ -1,844 +1,7 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ +from pyomo.common.deprecation import deprecation_warning +deprecation_warning( + 'The pyomo.gdp.plugins.chull module is deprecated. ' + 'Import the Hull reformulation objects from pyomo.gdp.plugins.hull.', + version='TBD') -import logging - -import pyomo.common.config as cfg -from pyomo.common.modeling import unique_component_name -from pyomo.core.expr.numvalue import ZeroConstant -from pyomo.core.base.component import ActiveComponent -from pyomo.core.kernel.component_map import ComponentMap -from pyomo.core.kernel.component_set import ComponentSet -import pyomo.core.expr.current as EXPR -from pyomo.core.base import Transformation, TransformationFactory -from pyomo.core import ( - Block, Connector, Constraint, Param, Set, Suffix, Var, - Expression, SortComponents, TraversalStrategy, - Any, RangeSet, Reals, value -) -from pyomo.gdp import Disjunct, Disjunction, GDP_Error -from pyomo.gdp.util import clone_without_expression_components, target_list, \ - is_child_of -from pyomo.gdp.plugins.gdp_var_mover import HACK_GDP_Disjunct_Reclassifier - -from six import iteritems, iterkeys - - -logger = logging.getLogger('pyomo.gdp.chull') - -NAME_BUFFER = {} - -@TransformationFactory.register('gdp.chull', doc="Relax disjunctive model by forming the convex hull.") -class ConvexHull_Transformation(Transformation): - """Relax disjunctive model by forming the convex hull. - - Relaxes a disjunctive model into an algebraic model by forming the - convex hull of each disjunction. - - This transformation accepts the following keyword arguments: - - Parameters - ---------- - perspective_function : str - The perspective function used for the disaggregated variables. - Must be one of 'FurmanSawayaGrossmann' (default), - 'LeeGrossmann', or 'GrossmannLee' - EPS : float - The value to use for epsilon [default: 1e-4] - targets : (block, disjunction, or list of those types) - The targets to transform. This can be a block, disjunction, or a - list of blocks and Disjunctions [default: the instance] - - After transformation, every transformed disjunct will have a - "_gdp_transformation_info" dict containing 2 entries: - - 'relaxed': True, - 'chull': { - 'relaxationBlock': , - 'relaxedConstraints': ComponentMap(constraint: relaxed_constraint) - 'disaggregatedVars': ComponentMap(var: list of disaggregated vars), - 'bigmConstraints': ComponentMap(disaggregated var: bigM constraint), - } - - In addition, any block or disjunct containing a relaxed disjunction - will have a "_gdp_transformation_info" dict with the following - entry: - - 'disjunction_or_constraint': - - Finally, the transformation will create a new Block with a unique - name beginning "_pyomo_gdp_chull_relaxation". That Block will - contain an indexed Block named "relaxedDisjuncts", which will hold - the relaxed disjuncts. This block is indexed by an integer - indicating the order in which the disjuncts were relaxed. Each - block will have a "_gdp_transformation_info" dict with the following - entries: - - 'src': - 'srcVars': ComponentMap(disaggregated var: original var), - 'srcConstraints': ComponentMap(relaxed_constraint: constraint) - 'boundConstraintToSrcVar': ComponentMap(bigm_constraint: orig_var), - - """ - - - CONFIG = cfg.ConfigBlock('gdp.chull') - CONFIG.declare('targets', cfg.ConfigValue( - default=None, - domain=target_list, - description="target or list of targets that will be relaxed", - doc=""" - - This specifies the target or list of targets to relax as either a - component or a list of components. If None (default), the entire model - is transformed. Note that if the transformation is done out of place, - the list of targets should be attached to the model before it is cloned, - and the list will specify the targets on the cloned instance.""" - )) - CONFIG.declare('perspective function', cfg.ConfigValue( - default='FurmanSawayaGrossmann', - domain=cfg.In(['FurmanSawayaGrossmann','LeeGrossmann','GrossmannLee']), - description='perspective function used for variable disaggregation', - doc=""" - The perspective function used for variable disaggregation - - "LeeGrossmann" is the original NL convex hull from Lee & - Grossmann (2000) [1]_, which substitutes nonlinear constraints - - h_ik(x) <= 0 - - with - - x_k = sum( nu_ik ) - y_ik * h_ik( nu_ik/y_ik ) <= 0 - - "GrossmannLee" is an updated formulation from Grossmann & - Lee (2003) [2]_, which avoids divide-by-0 errors by using: - - x_k = sum( nu_ik ) - (y_ik + eps) * h_ik( nu_ik/(y_ik + eps) ) <= 0 - - "FurmanSawayaGrossmann" (default) is an improved relaxation [3]_ - that is exact at 0 and 1 while avoiding numerical issues from - the Lee & Grossmann formulation by using: - - x_k = sum( nu_ik ) - ((1-eps)*y_ik + eps) * h_ik( nu_ik/((1-eps)*y_ik + eps) ) \ - - eps * h_ki(0) * ( 1-y_ik ) <= 0 - - References - ---------- - .. [1] Lee, S., & Grossmann, I. E. (2000). New algorithms for - nonlinear generalized disjunctive programming. Computers and - Chemical Engineering, 24, 2125-2141 - - .. [2] Grossmann, I. E., & Lee, S. (2003). Generalized disjunctive - programming: Nonlinear convex hull relaxation and algorithms. - Computational Optimization and Applications, 26, 83-100. - - .. [3] Furman, K., Sawaya, N., and Grossmann, I. A computationally - useful algebraic representation of nonlinear disjunctive convex - sets using the perspective function. Optimization Online - (2016). http://www.optimization-online.org/DB_HTML/2016/07/5544.html. - """ - )) - CONFIG.declare('EPS', cfg.ConfigValue( - default=1e-4, - domain=cfg.PositiveFloat, - description="Epsilon value to use in perspective function", - )) - - def __init__(self): - super(ConvexHull_Transformation, self).__init__() - self.handlers = { - Constraint : self._xform_constraint, - Var : False, - Connector : False, - Expression : False, - Param : False, - Set : False, - RangeSet: False, - Suffix : False, - Disjunction: self._warn_for_active_disjunction, - Disjunct: self._warn_for_active_disjunct, - Block: self._transform_block_on_disjunct, - } - - - def _apply_to(self, instance, **kwds): - assert not NAME_BUFFER - try: - self._apply_to_impl(instance, **kwds) - finally: - # Clear the global name buffer now that we are done - NAME_BUFFER.clear() - - - def _apply_to_impl(self, instance, **kwds): - self._config = self.CONFIG(kwds.pop('options', {})) - self._config.set_value(kwds) - - # make a transformation block - transBlockName = unique_component_name( - instance, - '_pyomo_gdp_chull_relaxation') - transBlock = Block() - instance.add_component(transBlockName, transBlock) - transBlock.relaxedDisjuncts = Block(Any) - transBlock.lbub = Set(initialize = ['lb','ub','eq']) - transBlock.disjContainers = ComponentSet() - - targets = self._config.targets - if targets is None: - targets = ( instance, ) - _HACK_transform_whole_instance = True - else: - _HACK_transform_whole_instance = False - knownBlocks = {} - for t in targets: - # check that t is in fact a child of instance - if not is_child_of(parent=instance, child=t, - knownBlocks=knownBlocks): - raise GDP_Error("Target %s is not a component on instance %s!" - % (t.name, instance.name)) - elif t.ctype is Disjunction: - if t.parent_component() is t: - self._transformDisjunction(t, transBlock) - else: - self._transformDisjunctionData(t, transBlock, t.index()) - elif t.ctype in (Block, Disjunct): - if t.parent_component() is t: - self._transformBlock(t, transBlock) - else: - self._transformBlockData(t, transBlock) - else: - raise GDP_Error( - "Target %s was not a Block, Disjunct, or Disjunction. " - "It was of type %s and can't be transformed" - % (t.name, type(t)) ) - - # Go through our dictionary of indexed things and deactivate - # the containers that don't have any active guys inside of - # them. So the invalid component logic will tell us if we - # missed something getting transformed. - for obj in transBlock.disjContainers: - if not obj.active: - continue - for i in obj: - if obj[i].active: - break - else: - # HACK due to active flag implementation. - # - # Ideally we would not have to do any of this (an - # ActiveIndexedComponent would get its active status by - # querring the active status of all the contained Data - # objects). As a fallback, we would like to call: - # - # obj._deactivate_without_fixing_indicator() - # - # However, the sreaightforward implementation of that - # method would have unintended side effects (fixing the - # contained _DisjunctData's indicator_vars!) due to our - # class hierarchy. Instead, we will directly call the - # relevant base class (safe-ish since we are verifying - # that all the contained _DisjunctionData are - # deactivated directly above). - ActiveComponent.deactivate(obj) - - # HACK for backwards compatibility with the older GDP transformations - # - # Until the writers are updated to find variables on things - # other than active blocks, we need to reclassify the Disjuncts - # as Blocks after transformation so that the writer will pick up - # all the variables that it needs (in this case, indicator_vars). - if _HACK_transform_whole_instance: - HACK_GDP_Disjunct_Reclassifier().apply_to(instance) - - - def _contained_in(self, var, block): - "Return True if a var is in the subtree rooted at block" - while var is not None: - if var.parent_component() is block: - return True - var = var.parent_block() - if var is block: - return True - return False - - def _transformBlock(self, obj, transBlock): - for i in sorted(iterkeys(obj)): - self._transformBlockData(obj[i], transBlock) - - - def _transformBlockData(self, obj, transBlock): - # Transform every (active) disjunction in the block - for disjunction in obj.component_objects( - Disjunction, - active=True, - sort=SortComponents.deterministic, - descend_into=(Block,Disjunct), - descent_order=TraversalStrategy.PostfixDFS): - self._transformDisjunction(disjunction, transBlock) - - - def _getDisjunctionConstraints(self, disjunction): - # Put the disjunction constraint on its parent block - - # We never do this for just a DisjunctionData because we need - # to know about the index set of its parent component. So if - # we called this on a DisjunctionData, we did something wrong. - assert isinstance(disjunction, Disjunction) - parent = disjunction.parent_block() - if hasattr(parent, "_gdp_transformation_info"): - infodict = parent._gdp_transformation_info - if type(infodict) is not dict: - raise GDP_Error( - "Component %s contains an attribute named " - "_gdp_transformation_info. The transformation requires " - "that it can create this attribute!" % parent.name) - try: - # On the off-chance that another GDP transformation went - # first, the infodict may exist, but the specific map we - # want will not be present - orConstraintMap = infodict['disjunction_or_constraint'] - except KeyError: - orConstraintMap = infodict['disjunction_or_constraint'] \ - = ComponentMap() - try: - disaggregationConstraintMap = infodict[ - 'disjunction_disaggregation_constraints'] - except KeyError: - disaggregationConstraintMap = infodict[ - 'disjunction_disaggregation_constraints'] \ - = ComponentMap() - else: - infodict = parent._gdp_transformation_info = {} - orConstraintMap = infodict['disjunction_or_constraint'] \ - = ComponentMap() - disaggregationConstraintMap = infodict[ - 'disjunction_disaggregation_constraints'] \ - = ComponentMap() - - if disjunction in disaggregationConstraintMap: - disaggregationConstraint = disaggregationConstraintMap[disjunction] - else: - # add the disaggregation constraint - disaggregationConstraint \ - = disaggregationConstraintMap[disjunction] = Constraint(Any) - parent.add_component( - unique_component_name( - parent, '_gdp_chull_relaxation_' + disjunction.getname( - fully_qualified=True, name_buffer=NAME_BUFFER - ) + '_disaggregation'), - disaggregationConstraint) - - # If the Constraint already exists, return it - if disjunction in orConstraintMap: - orC = orConstraintMap[disjunction] - else: - # add the XOR (or OR) constraints to parent block (with - # unique name) It's indexed if this is an - # IndexedDisjunction, not otherwise - orC = Constraint(disjunction.index_set()) if \ - disjunction.is_indexed() else Constraint() - parent.add_component( - unique_component_name( - parent, '_gdp_chull_relaxation_' + disjunction.getname( - fully_qualified=True, name_buffer=NAME_BUFFER - ) + '_xor'), - orC) - orConstraintMap[disjunction] = orC - - return orC, disaggregationConstraint - - - def _transformDisjunction(self, obj, transBlock): - # create the disjunction constraint and disaggregation - # constraints and then relax each of the disjunctionDatas - for i in sorted(iterkeys(obj)): - self._transformDisjunctionData(obj[i], transBlock, i) - - # deactivate so we know we relaxed - obj.deactivate() - - - def _transformDisjunctionData(self, obj, transBlock, index): - # Convex hull doesn't work if this is an or constraint. So if - # xor is false, give up - if not obj.xor: - raise GDP_Error("Cannot do convex hull transformation for " - "disjunction %s with or constraint. Must be an xor!" - % obj.name) - - parent_component = obj.parent_component() - transBlock.disjContainers.add(parent_component) - orConstraint, disaggregationConstraint \ - = self._getDisjunctionConstraints(parent_component) - - # We first go through and collect all the variables that we - # are going to disaggregate. - varOrder_set = ComponentSet() - varOrder = [] - varsByDisjunct = ComponentMap() - for disjunct in obj.disjuncts: - # This is crazy, but if the disjunct has been previously - # relaxed, the disjunct *could* be deactivated. - not_active = not disjunct.active - if not_active: - disjunct._activate_without_unfixing_indicator() - try: - disjunctVars = varsByDisjunct[disjunct] = ComponentSet() - for cons in disjunct.component_data_objects( - Constraint, - active = True, - sort=SortComponents.deterministic, - descend_into=Block): - # we aren't going to disaggregate fixed - # variables. This means there is trouble if they are - # unfixed later... - for var in EXPR.identify_variables( - cons.body, include_fixed=False): - # Note the use of a list so that we will - # eventually disaggregate the vars in a - # deterministic order (the order that we found - # them) - disjunctVars.add(var) - if var not in varOrder_set: - varOrder.append(var) - varOrder_set.add(var) - finally: - if not_active: - disjunct._deactivate_without_fixing_indicator() - - # We will only disaggregate variables that - # 1) appear in multiple disjuncts, or - # 2) are not contained in this disjunct, or - # 3) are not themselves disaggregated variables - varSet = [] - localVars = ComponentMap((d,[]) for d in obj.disjuncts) - for var in varOrder: - disjuncts = [d for d in varsByDisjunct if var in varsByDisjunct[d]] - if len(disjuncts) > 1: - varSet.append(var) - elif self._contained_in(var, disjuncts[0]): - localVars[disjuncts[0]].append(var) - elif self._contained_in(var, transBlock): - # There is nothing to do here: these are already - # disaggregated vars that can/will be forced to 0 when - # their disjunct is not active. - pass - else: - varSet.append(var) - - # Now that we know who we need to disaggregate, we will do it - # while we also transform the disjuncts. - or_expr = 0 - for disjunct in obj.disjuncts: - or_expr += disjunct.indicator_var - self._transform_disjunct(disjunct, transBlock, varSet, - localVars[disjunct]) - orConstraint.add(index, (or_expr, 1)) - - for i, var in enumerate(varSet): - disaggregatedExpr = 0 - for disjunct in obj.disjuncts: - if 'chull' not in disjunct._gdp_transformation_info: - if not disjunct.indicator_var.is_fixed() \ - or value(disjunct.indicator_var) != 0: - raise RuntimeError( - "GDP chull: disjunct was not relaxed, but " - "does not appear to be correctly deactivated.") - continue - disaggregatedVar = disjunct._gdp_transformation_info['chull'][ - 'disaggregatedVars'][var] - disaggregatedExpr += disaggregatedVar - if type(index) is tuple: - consIdx = index + (i,) - elif parent_component.is_indexed(): - consIdx = (index,) + (i,) - else: - consIdx = i - - disaggregationConstraint.add( - consIdx, - var == disaggregatedExpr) - - - def _transform_disjunct(self, obj, transBlock, varSet, localVars): - if hasattr(obj, "_gdp_transformation_info"): - infodict = obj._gdp_transformation_info - # If the user has something with our name that is not a dict, we - # scream. If they have a dict with this name then we are just going - # to use it... - if type(infodict) is not dict: - raise GDP_Error( - "Disjunct %s contains an attribute named " - "_gdp_transformation_info. The transformation requires " - "that it can create this attribute!" % obj.name) - else: - infodict = obj._gdp_transformation_info = {} - # deactivated means either we've already transformed or user deactivated - if not obj.active: - if obj.indicator_var.is_fixed(): - if value(obj.indicator_var) == 0: - # The user cleanly deactivated the disjunct: there - # is nothing for us to do here. - return - else: - raise GDP_Error( - "The disjunct %s is deactivated, but the " - "indicator_var is fixed to %s. This makes no sense." - % ( obj.name, value(obj.indicator_var) )) - if not infodict.get('relaxed', False): - raise GDP_Error( - "The disjunct %s is deactivated, but the " - "indicator_var is not fixed and the disjunct does not " - "appear to have been relaxed. This makes no sense." - % ( obj.name, )) - - if 'chull' in infodict: - # we've transformed it (with CHull), so don't do it again. - return - - # add reference to original disjunct to info dict on - # transformation block - relaxedDisjuncts = transBlock.relaxedDisjuncts - relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)] - relaxationBlockInfo = relaxationBlock._gdp_transformation_info = { - 'src': obj, - 'srcVars': ComponentMap(), - 'srcConstraints': ComponentMap(), - 'boundConstraintToSrcVar': ComponentMap(), - } - infodict['chull'] = chull = { - 'relaxationBlock': relaxationBlock, - 'relaxedConstraints': ComponentMap(), - 'disaggregatedVars': ComponentMap(), - 'bigmConstraints': ComponentMap(), - } - - # if this is a disjunctData from an indexed disjunct, we are - # going to want to check at the end that the container is - # deactivated if everything in it is. So we save it in our - # dictionary of things to check if it isn't there already. - disjParent = obj.parent_component() - if disjParent.is_indexed() and \ - disjParent not in transBlock.disjContainers: - transBlock.disjContainers.add(disjParent) - - # add the disaggregated variables and their bigm constraints - # to the relaxationBlock - for var in varSet: - lb = var.lb - ub = var.ub - if lb is None or ub is None: - raise GDP_Error("Variables that appear in disjuncts must be " - "bounded in order to use the chull " - "transformation! Missing bound for %s." - % (var.name)) - - disaggregatedVar = Var(within=Reals, - bounds=(min(0, lb), max(0, ub)), - initialize=var.value) - # naming conflicts are possible here since this is a bunch - # of variables from different blocks coming together, so we - # get a unique name - disaggregatedVarName = unique_component_name( - relaxationBlock, - var.getname(fully_qualified=False, name_buffer=NAME_BUFFER), - ) - relaxationBlock.add_component( - disaggregatedVarName, disaggregatedVar) - chull['disaggregatedVars'][var] = disaggregatedVar - relaxationBlockInfo['srcVars'][disaggregatedVar] = var - - bigmConstraint = Constraint(transBlock.lbub) - relaxationBlock.add_component( - disaggregatedVarName + "_bounds", bigmConstraint) - if lb: - bigmConstraint.add( - 'lb', obj.indicator_var*lb <= disaggregatedVar) - if ub: - bigmConstraint.add( - 'ub', disaggregatedVar <= obj.indicator_var*ub) - chull['bigmConstraints'][var] = bigmConstraint - relaxationBlockInfo['boundConstraintToSrcVar'][bigmConstraint] = var - - for var in localVars: - lb = var.lb - ub = var.ub - if lb is None or ub is None: - raise GDP_Error("Variables that appear in disjuncts must be " - "bounded in order to use the chull " - "transformation! Missing bound for %s." - % (var.name)) - if value(lb) > 0: - var.setlb(0) - if value(ub) < 0: - var.setub(0) - - # naming conflicts are possible here since this is a bunch - # of variables from different blocks coming together, so we - # get a unique name - conName = unique_component_name( - relaxationBlock, - var.getname(fully_qualified=False, name_buffer=NAME_BUFFER - ) + "_bounds" - ) - bigmConstraint = Constraint(transBlock.lbub) - relaxationBlock.add_component(conName, bigmConstraint) - bigmConstraint.add('lb', obj.indicator_var*lb <= var) - bigmConstraint.add('ub', var <= obj.indicator_var*ub) - chull['bigmConstraints'][var] = bigmConstraint - relaxationBlockInfo['boundConstraintToSrcVar'][bigmConstraint] = var - - var_substitute_map = dict((id(v), newV) for v, newV in - iteritems(chull['disaggregatedVars'])) - zero_substitute_map = dict((id(v), ZeroConstant) for v, newV in - iteritems(chull['disaggregatedVars'])) - zero_substitute_map.update((id(v), ZeroConstant) - for v in localVars) - - # Transform each component within this disjunct - self._transform_block_components(obj, obj, infodict, var_substitute_map, - zero_substitute_map) - - # deactivate disjunct so we know we've relaxed it - obj._deactivate_without_fixing_indicator() - infodict['relaxed'] = True - - - def _transform_block_components( - self, block, disjunct, infodict, - var_substitute_map, zero_substitute_map): - # Look through the component map of block and transform - # everything we have a handler for. Yell if we don't know how - # to handle it. - for name, obj in list(iteritems(block.component_map())): - if hasattr(obj, 'active') and not obj.active: - continue - handler = self.handlers.get(obj.ctype, None) - if not handler: - if handler is None: - raise GDP_Error( - "No chull transformation handler registered " - "for modeling components of type %s" % obj.ctype ) - continue - # obj is what we are transforming, we pass disjunct - # through so that we will have access to the indicator - # variables down the line. - handler(obj, disjunct, infodict, var_substitute_map, - zero_substitute_map) - - - def _warn_for_active_disjunction( - self, disjunction, disjunct, infodict, var_substitute_map, - zero_substitute_map): - # this should only have gotten called if the disjunction is active - assert disjunction.active - problemdisj = disjunction - if disjunction.is_indexed(): - for i in sorted(iterkeys(disjunction)): - if disjunction[i].active: - # a _DisjunctionData is active, we will yell about - # it specifically. - problemdisj = disjunction[i] - break - # None of the _DisjunctionDatas were actually active. We - # are OK and we can deactivate the container. - else: - disjunction.deactivate() - return - parentblock = problemdisj.parent_block() - # the disjunction should only have been active if it wasn't transformed - _probDisjName = problemdisj.getname( - fully_qualified=True, name_buffer=NAME_BUFFER) - assert (not hasattr(parentblock, "_gdp_transformation_info")) or \ - _probDisjName not in parentblock._gdp_transformation_info - raise GDP_Error("Found untransformed disjunction %s in disjunct %s! " - "The disjunction must be transformed before the " - "disjunct. If you are using targets, put the " - "disjunction before the disjunct in the list." \ - % (_probDisjName, disjunct.name)) - - - def _warn_for_active_disjunct( - self, innerdisjunct, outerdisjunct, infodict, var_substitute_map, - zero_substitute_map): - assert innerdisjunct.active - problemdisj = innerdisjunct - if innerdisjunct.is_indexed(): - for i in sorted(iterkeys(innerdisjunct)): - if innerdisjunct[i].active: - # This is shouldn't be true, we will complain about it. - problemdisj = innerdisjunct[i] - break - # None of the _DisjunctDatas were actually active, so we - # are fine and we can deactivate the container. - else: - # HACK: See above about _deactivate_without_fixing_indicator - ActiveComponent.deactivate(innerdisjunct) - return - raise GDP_Error("Found active disjunct {0} in disjunct {1}! Either {0} " - "is not in a disjunction or the disjunction it is in " - "has not been transformed. {0} needs to be deactivated " - "or its disjunction transformed before {1} can be " - "transformed.".format(problemdisj.name, - outerdisjunct.name)) - - - def _transform_block_on_disjunct( - self, block, disjunct, infodict, var_substitute_map, - zero_substitute_map): - # We look through everything on the component map of the block - # and transform it just as we would if it was on the disjunct - # directly. (We are passing the disjunct through so that when - # we find constraints, _xform_constraint will have access to - # the correct indicator variable. - self._transform_block_components( - block, disjunct, infodict, var_substitute_map, zero_substitute_map) - - - def _xform_constraint(self, obj, disjunct, infodict, var_substitute_map, - zero_substitute_map): - # we will put a new transformed constraint on the relaxation block. - relaxationBlock = infodict['chull']['relaxationBlock'] - transBlock = relaxationBlock.parent_block() - varMap = infodict['chull']['disaggregatedVars'] - - # Though rare, it is possible to get naming conflicts here - # since constraints from all blocks are getting moved onto the - # same block. So we get a unique name - name = unique_component_name(relaxationBlock, obj.getname( - fully_qualified=True, name_buffer=NAME_BUFFER)) - - if obj.is_indexed(): - try: - newConstraint = Constraint(obj.index_set(), transBlock.lbub) - except: - # The original constraint may have been indexed by a - # non-concrete set (like an Any). We will give up on - # strict index verification and just blindly proceed. - newConstraint = Constraint(Any) - else: - newConstraint = Constraint(transBlock.lbub) - relaxationBlock.add_component(name, newConstraint) - # add mapping of original constraint to transformed constraint - # in transformation info dictionary - infodict['chull']['relaxedConstraints'][obj] = newConstraint - # add mapping of transformed constraint back to original constraint (we - # know that the info dict is already created because this only got - # called if we were transforming a disjunct...) - relaxationBlock._gdp_transformation_info['srcConstraints'][ - newConstraint] = obj - - for i in sorted(iterkeys(obj)): - c = obj[i] - if not c.active: - continue - - NL = c.body.polynomial_degree() not in (0,1) - EPS = self._config.EPS - mode = self._config.perspective_function - - # We need to evaluate the expression at the origin *before* - # we substitute the expression variables with the - # disaggregated variables - if not NL or mode == "FurmanSawayaGrossmann": - h_0 = clone_without_expression_components( - c.body, substitute=zero_substitute_map) - - y = disjunct.indicator_var - if NL: - if mode == "LeeGrossmann": - sub_expr = clone_without_expression_components( - c.body, - substitute=dict( - (var, subs/y) - for var, subs in iteritems(var_substitute_map) ) - ) - expr = sub_expr * y - elif mode == "GrossmannLee": - sub_expr = clone_without_expression_components( - c.body, - substitute=dict( - (var, subs/(y + EPS)) - for var, subs in iteritems(var_substitute_map) ) - ) - expr = (y + EPS) * sub_expr - elif mode == "FurmanSawayaGrossmann": - sub_expr = clone_without_expression_components( - c.body, - substitute=dict( - (var, subs/((1 - EPS)*y + EPS)) - for var, subs in iteritems(var_substitute_map) ) - ) - expr = ((1-EPS)*y + EPS)*sub_expr - EPS*h_0*(1-y) - else: - raise RuntimeError("Unknown NL CHull mode") - else: - expr = clone_without_expression_components( - c.body, substitute=var_substitute_map) - - if c.equality: - if NL: - newConsExpr = expr == c.lower*y - else: - v = list(EXPR.identify_variables(expr)) - if len(v) == 1 and not c.lower: - # Setting a variable to 0 in a disjunct is - # *very* common. We should recognize that in - # that structure, the disaggregated variable - # will also be fixed to 0. - v[0].fix(0) - continue - newConsExpr = expr - (1-y)*h_0 == c.lower*y - - if obj.is_indexed(): - newConstraint.add((i, 'eq'), newConsExpr) - else: - newConstraint.add('eq', newConsExpr) - continue - - if c.lower is not None: - # TODO: At the moment there is no reason for this to be in both - # lower and upper... I think there could be though if I say what - # the new constraint is going to be or something. - if __debug__ and logger.isEnabledFor(logging.DEBUG): - _name = c.getname( - fully_qualified=True, name_buffer=NAME_BUFFER) - logger.debug("GDP(cHull): Transforming constraint " + - "'%s'", _name) - if NL: - newConsExpr = expr >= c.lower*y - else: - newConsExpr = expr - (1-y)*h_0 >= c.lower*y - - if obj.is_indexed(): - newConstraint.add((i, 'lb'), newConsExpr) - else: - newConstraint.add('lb', newConsExpr) - - if c.upper is not None: - if __debug__ and logger.isEnabledFor(logging.DEBUG): - _name = c.getname( - fully_qualified=True, name_buffer=NAME_BUFFER) - logger.debug("GDP(cHull): Transforming constraint " + - "'%s'", _name) - if NL: - newConsExpr = expr <= c.upper*y - else: - newConsExpr = expr - (1-y)*h_0 <= c.upper*y - - if obj.is_indexed(): - newConstraint.add((i, 'ub'), newConsExpr) - else: - newConstraint.add('ub', newConsExpr) +from .hull import _Deprecated_Name_Hull as ConvexHull_Transformation diff --git a/pyomo/gdp/plugins/cuttingplane.py b/pyomo/gdp/plugins/cuttingplane.py index 8744e8762d0..984f1b25356 100644 --- a/pyomo/gdp/plugins/cuttingplane.py +++ b/pyomo/gdp/plugins/cuttingplane.py @@ -62,11 +62,11 @@ def _apply_to(self, instance, bigM=None, **kwds): logger.warning("GDP(CuttingPlanes): unrecognized options:\n%s" % ( '\n'.join(iterkeys(options)), )) - instance_rBigM, instance_rCHull, var_info, transBlockName \ + instance_rBigM, instance_rHull, var_info, transBlockName \ = self._setup_subproblems(instance, bigM) self._generate_cuttingplanes( - instance, instance_rBigM, instance_rCHull, var_info, transBlockName) + instance, instance_rBigM, instance_rHull, var_info, transBlockName) def _setup_subproblems(self, instance, bigM): @@ -85,9 +85,9 @@ def _setup_subproblems(self, instance, bigM): # we'll store all the cuts we add together transBlock.cuts = Constraint(Any) - # get bigM and chull relaxations + # get bigM and hull relaxations bigMRelaxation = TransformationFactory('gdp.bigm') - chullRelaxation = TransformationFactory('gdp.chull') + hullRelaxation = TransformationFactory('gdp.hull') relaxIntegrality = TransformationFactory('core.relax_integrality') # HACK: for the current writers, we need to also apply gdp.reclassify so @@ -97,14 +97,14 @@ def _setup_subproblems(self, instance, bigM): reclassify = TransformationFactory('gdp.reclassify') # - # Generalte the CHull relaxation (used for the separation + # Generalte the Hull relaxation (used for the separation # problem to generate cutting planes # - instance_rCHull = chullRelaxation.create_using(instance) + instance_rHull = hullRelaxation.create_using(instance) # This relies on relaxIntegrality relaxing variables on deactivated # blocks, which should be fine. - reclassify.apply_to(instance_rCHull) - relaxIntegrality.apply_to(instance_rCHull) + reclassify.apply_to(instance_rHull) + relaxIntegrality.apply_to(instance_rHull) # # Reformulate the instance using the BigM relaxation (this will @@ -119,14 +119,14 @@ def _setup_subproblems(self, instance, bigM): instance_rBigM = relaxIntegrality.create_using(instance) # - # Add the xstar parameter for the CHull problem + # Add the xstar parameter for the Hull problem # - transBlock_rCHull = instance_rCHull.component(transBlockName) + transBlock_rHull = instance_rHull.component(transBlockName) # # this will hold the solution to rbigm each time we solve it. We # add it to the transformation block so that we don't have to # worry about name conflicts. - transBlock_rCHull.xstar = Param( + transBlock_rHull.xstar = Param( range(len(transBlock.all_vars)), mutable=True, default=None) transBlock_rBigM = instance_rBigM.component(transBlockName) @@ -138,20 +138,20 @@ def _setup_subproblems(self, instance, bigM): var_info = tuple( (v, transBlock_rBigM.all_vars[i], - transBlock_rCHull.all_vars[i], - transBlock_rCHull.xstar[i]) + transBlock_rHull.all_vars[i], + transBlock_rHull.xstar[i]) for i,v in enumerate(transBlock.all_vars)) # - # Add the separation objective to the chull subproblem + # Add the separation objective to the hull subproblem # - self._add_separation_objective(var_info, transBlock_rCHull) + self._add_separation_objective(var_info, transBlock_rHull) - return instance_rBigM, instance_rCHull, var_info, transBlockName + return instance_rBigM, instance_rHull, var_info, transBlockName def _generate_cuttingplanes( - self, instance, instance_rBigM, instance_rCHull, + self, instance, instance_rBigM, instance_rHull, var_info, transBlockName): opt = SolverFactory(SOLVER) @@ -187,15 +187,15 @@ def _generate_cuttingplanes( % (rBigM_objVal,)) # copy over xstar - for x_bigm, x_rbigm, x_chull, x_star in var_info: + for x_bigm, x_rbigm, x_hull, x_star in var_info: x_star.value = x_rbigm.value # initialize the X values - x_chull.value = x_rbigm.value + x_hull.value = x_rbigm.value # solve separation problem to get xhat. - results = opt.solve(instance_rCHull, tee=stream_solvers) + results = opt.solve(instance_rHull, tee=stream_solvers) if verify_successful_solve(results) is not NORMAL: - logger.warning("GDP.cuttingplane: CHull separation subproblem " + logger.warning("GDP.cuttingplane: Hull separation subproblem " "did not solve normally. Stopping cutting " "plane generation.\n\n%s" % (results,)) return @@ -224,16 +224,16 @@ def _add_relaxation_block(self, instance, name): return transBlockName, transBlock - def _add_separation_objective(self, var_info, transBlock_rCHull): + def _add_separation_objective(self, var_info, transBlock_rHull): # Deactivate any/all other objectives - for o in transBlock_rCHull.model().component_data_objects(Objective): + for o in transBlock_rHull.model().component_data_objects(Objective): o.deactivate() obj_expr = 0 - for x_bigm, x_rbigm, x_chull, x_star in var_info: - obj_expr += (x_chull - x_star)**2 + for x_bigm, x_rbigm, x_hull, x_star in var_info: + obj_expr += (x_hull - x_star)**2 # add separation objective to transformation block - transBlock_rCHull.separation_objective = Objective(expr=obj_expr) + transBlock_rHull.separation_objective = Objective(expr=obj_expr) def _add_cut(self, var_info, transBlock, transBlock_rBigM): @@ -244,12 +244,12 @@ def _add_cut(self, var_info, transBlock, transBlock_rBigM): cutexpr_bigm = 0 cutexpr_rBigM = 0 - for x_bigm, x_rbigm, x_chull, x_star in var_info: - # xhat = x_chull.value + for x_bigm, x_rbigm, x_hull, x_star in var_info: + # xhat = x_hull.value cutexpr_bigm += ( - x_chull.value - x_star.value)*(x_bigm - x_chull.value) + x_hull.value - x_star.value)*(x_bigm - x_hull.value) cutexpr_rBigM += ( - x_chull.value - x_star.value)*(x_rbigm - x_chull.value) + x_hull.value - x_star.value)*(x_rbigm - x_hull.value) transBlock.cuts.add(cut_number, cutexpr_bigm >= 0) transBlock_rBigM.cuts.add(cut_number, cutexpr_rBigM >= 0) diff --git a/pyomo/gdp/plugins/hull.py b/pyomo/gdp/plugins/hull.py new file mode 100644 index 00000000000..daf951ea3e5 --- /dev/null +++ b/pyomo/gdp/plugins/hull.py @@ -0,0 +1,1008 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import logging + +import pyomo.common.config as cfg +from pyomo.common import deprecated +from pyomo.common.modeling import unique_component_name +from pyomo.core.expr.numvalue import ZeroConstant +from pyomo.core.base.component import ActiveComponent, ComponentUID +from pyomo.core.kernel.component_map import ComponentMap +from pyomo.core.kernel.component_set import ComponentSet +import pyomo.core.expr.current as EXPR +from pyomo.core.base import Transformation, TransformationFactory +from pyomo.core import ( + Block, Connector, Constraint, Param, Set, Suffix, Var, + Expression, SortComponents, TraversalStrategy, + Any, RangeSet, Reals, value, NonNegativeIntegers +) +from pyomo.gdp import Disjunct, Disjunction, GDP_Error +from pyomo.gdp.util import (clone_without_expression_components, target_list, + is_child_of, get_src_disjunction, + get_src_constraint, get_transformed_constraints, + get_src_disjunct, _warn_for_active_disjunction, + _warn_for_active_disjunct) +from pyomo.gdp.plugins.gdp_var_mover import HACK_GDP_Disjunct_Reclassifier + +from functools import wraps +from six import iteritems, iterkeys +from weakref import ref as weakref_ref + +logger = logging.getLogger('pyomo.gdp.hull') + +NAME_BUFFER = {} + +@TransformationFactory.register( + 'gdp.hull', + doc="Relax disjunctive model by forming the hull reformulation.") +class Hull_Reformulation(Transformation): + """Relax disjunctive model by forming the hull reformulation. + + Relaxes a disjunctive model into an algebraic model by forming the + hull reformulation of each disjunction. + + This transformation accepts the following keyword arguments: + + Parameters + ---------- + perspective_function : str + The perspective function used for the disaggregated variables. + Must be one of 'FurmanSawayaGrossmann' (default), + 'LeeGrossmann', or 'GrossmannLee' + EPS : float + The value to use for epsilon [default: 1e-4] + targets : (block, disjunction, or list of those types) + The targets to transform. This can be a block, disjunction, or a + list of blocks and Disjunctions [default: the instance] + + The transformation will create a new Block with a unique + name beginning "_pyomo_gdp_hull_reformulation". That Block will + contain an indexed Block named "relaxedDisjuncts", which will hold + the relaxed disjuncts. This block is indexed by an integer + indicating the order in which the disjuncts were relaxed. + Each block has a dictionary "_constraintMap": + + 'srcConstraints': ComponentMap(: + ), + 'transformedConstraints':ComponentMap(: + , + : + [] + ) + + It will have a dictionary "_disaggregatedVarMap: + 'srcVar': ComponentMap(:), + 'disaggregatedVar': ComponentMap(:) + + And, last, it will have a ComponentMap "_bigMConstraintMap": + + : + + All transformed Disjuncts will have a pointer to the block their transformed + constraints are on, and all transformed Disjunctions will have a + pointer to the corresponding OR or XOR constraint. + + The _pyomo_gdp_hull_reformulation block will have a ComponentMap + "_disaggregationConstraintMap": + :ComponentMap(: ) + + """ + + + CONFIG = cfg.ConfigBlock('gdp.hull') + CONFIG.declare('targets', cfg.ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be relaxed", + doc=""" + + This specifies the target or list of targets to relax as either a + component or a list of components. If None (default), the entire model + is transformed. Note that if the transformation is done out of place, + the list of targets should be attached to the model before it is cloned, + and the list will specify the targets on the cloned instance.""" + )) + CONFIG.declare('perspective function', cfg.ConfigValue( + default='FurmanSawayaGrossmann', + domain=cfg.In(['FurmanSawayaGrossmann','LeeGrossmann','GrossmannLee']), + description='perspective function used for variable disaggregation', + doc=""" + The perspective function used for variable disaggregation + + "LeeGrossmann" is the original NL convex hull from Lee & + Grossmann (2000) [1]_, which substitutes nonlinear constraints + + h_ik(x) <= 0 + + with + + x_k = sum( nu_ik ) + y_ik * h_ik( nu_ik/y_ik ) <= 0 + + "GrossmannLee" is an updated formulation from Grossmann & + Lee (2003) [2]_, which avoids divide-by-0 errors by using: + + x_k = sum( nu_ik ) + (y_ik + eps) * h_ik( nu_ik/(y_ik + eps) ) <= 0 + + "FurmanSawayaGrossmann" (default) is an improved relaxation [3]_ + that is exact at 0 and 1 while avoiding numerical issues from + the Lee & Grossmann formulation by using: + + x_k = sum( nu_ik ) + ((1-eps)*y_ik + eps) * h_ik( nu_ik/((1-eps)*y_ik + eps) ) \ + - eps * h_ki(0) * ( 1-y_ik ) <= 0 + + References + ---------- + .. [1] Lee, S., & Grossmann, I. E. (2000). New algorithms for + nonlinear generalized disjunctive programming. Computers and + Chemical Engineering, 24, 2125-2141 + + .. [2] Grossmann, I. E., & Lee, S. (2003). Generalized disjunctive + programming: Nonlinear convex hull relaxation and algorithms. + Computational Optimization and Applications, 26, 83-100. + + .. [3] Furman, K., Sawaya, N., and Grossmann, I. A computationally + useful algebraic representation of nonlinear disjunctive convex + sets using the perspective function. Optimization Online + (2016). http://www.optimization-online.org/DB_HTML/2016/07/5544.html. + """ + )) + CONFIG.declare('EPS', cfg.ConfigValue( + default=1e-4, + domain=cfg.PositiveFloat, + description="Epsilon value to use in perspective function", + )) + CONFIG.declare('assume_fixed_vars_permanent', cfg.ConfigValue( + default=False, + domain=bool, + description="Boolean indicating whether or not to transform so that the " + "the transformed model will still be valid when fixed Vars are unfixed.", + doc=""" + If True, the transformation will not disaggregate fixed variables. + This means that if a fixed variable is unfixed after transformation, + the transformed model is no longer valid. By default, the transformation + will disagregate fixed variables so that any later fixing and unfixing + will be valid in the transformed model. + """ + )) + + def __init__(self): + super(Hull_Reformulation, self).__init__() + self.handlers = { + Constraint : self._transform_constraint, + Var : False, + Connector : False, + Expression : False, + Param : False, + Set : False, + RangeSet: False, + Suffix : False, + Disjunction: self._warn_for_active_disjunction, + Disjunct: self._warn_for_active_disjunct, + Block: self._transform_block_on_disjunct, + } + + def _add_local_vars(self, block, local_var_dict): + localVars = block.component('LocalVars') + if type(localVars) is Suffix: + for disj, var_list in iteritems(localVars): + if local_var_dict.get(disj) is None: + local_var_dict[disj] = ComponentSet(var_list) + else: + local_var_dict[disj].update(var_list) + + def _get_local_var_suffixes(self, block, local_var_dict): + # You can specify suffixes on any block (disjuncts included). This method + # starts from a Disjunct (presumably) and checks for a LocalVar suffixes + # going both up and down the tree, adding them into the dictionary that + # is the second argument. + + # first look beneath where we are (there could be Blocks on this + # disjunct) + for b in block.component_data_objects(Block, descend_into=(Block), + active=True, + sort=SortComponents.deterministic): + self._add_local_vars(b, local_var_dict) + # now traverse upwards and get what's above + while block is not None: + self._add_local_vars(block, local_var_dict) + block = block.parent_block() + + return local_var_dict + + def _apply_to(self, instance, **kwds): + assert not NAME_BUFFER + try: + self._apply_to_impl(instance, **kwds) + finally: + # Clear the global name buffer now that we are done + NAME_BUFFER.clear() + + def _apply_to_impl(self, instance, **kwds): + self._config = self.CONFIG(kwds.pop('options', {})) + self._config.set_value(kwds) + + targets = self._config.targets + if targets is None: + targets = ( instance, ) + _HACK_transform_whole_instance = True + else: + _HACK_transform_whole_instance = False + knownBlocks = {} + for t in targets: + # check that t is in fact a child of instance + if not is_child_of(parent=instance, child=t, + knownBlocks=knownBlocks): + raise GDP_Error( + "Target '%s' is not a component on instance '%s'!" + % (t.name, instance.name)) + elif t.ctype is Disjunction: + if t.is_indexed(): + self._transform_disjunction(t) + else: + self._transform_disjunctionData(t, t.index()) + elif t.ctype in (Block, Disjunct): + if t.is_indexed(): + self._transform_block(t) + else: + self._transform_blockData(t) + else: + raise GDP_Error( + "Target '%s' was not a Block, Disjunct, or Disjunction. " + "It was of type %s and can't be transformed." + % (t.name, type(t)) ) + + # HACK for backwards compatibility with the older GDP transformations + # + # Until the writers are updated to find variables on things other than + # active blocks, we need to reclassify the Disjuncts as Blocks after + # transformation so that the writer will pick up all the variables that + # it needs (in this case, indicator_vars and also variables which are + # declared in a single Disjunct and only used on that Disjunct (as they + # will not be disaggregated)). + if _HACK_transform_whole_instance: + HACK_GDP_Disjunct_Reclassifier().apply_to(instance) + + def _add_transformation_block(self, instance): + # make a transformation block on instance where we will store + # transformed components + transBlockName = unique_component_name( + instance, + '_pyomo_gdp_hull_reformulation') + transBlock = Block() + instance.add_component(transBlockName, transBlock) + transBlock.relaxedDisjuncts = Block(NonNegativeIntegers) + transBlock.lbub = Set(initialize = ['lb','ub','eq']) + # We will store all of the disaggregation constraints for any + # Disjunctions we transform onto this block here. + transBlock.disaggregationConstraints = Constraint(NonNegativeIntegers, + Any) + + # This will map from srcVar to a map of srcDisjunction to the + # disaggregation constraint corresponding to srcDisjunction + transBlock._disaggregationConstraintMap = ComponentMap() + + return transBlock + + def _transform_block(self, obj): + for i in sorted(iterkeys(obj)): + self._transform_blockData(obj[i]) + + def _transform_blockData(self, obj): + # Transform every (active) disjunction in the block + for disjunction in obj.component_objects( + Disjunction, + active=True, + sort=SortComponents.deterministic, + descend_into=(Block,Disjunct), + descent_order=TraversalStrategy.PostfixDFS): + self._transform_disjunction(disjunction) + + def _add_xor_constraint(self, disjunction, transBlock): + # Put XOR constraint on the transformation block + + # We never do this for just a DisjunctionData because we need + # to know about the index set of its parent component. So if + # we called this on a DisjunctionData, we did something wrong. + assert isinstance(disjunction, Disjunction) + + # check if the constraint already exists + if disjunction._algebraic_constraint is not None: + return disjunction._algebraic_constraint() + + # add the XOR (or OR) constraints to parent block (with + # unique name) It's indexed if this is an + # IndexedDisjunction, not otherwise + orC = Constraint(disjunction.index_set()) + transBlock.add_component( + unique_component_name(transBlock, + disjunction.getname(fully_qualified=True, + name_buffer=NAME_BUFFER) +\ + '_xor'), orC) + disjunction._algebraic_constraint = weakref_ref(orC) + + return orC + + def _transform_disjunction(self, obj): + # NOTE: this check is actually necessary because it's possible we go + # straight to this function when we use targets. + if not obj.active: + return + + # put the transformation block on the parent block of the Disjunction, + # unless this is a disjunction we have seen in a prior call to hull, in + # which case we will use the same transformation block we created + # before. + if obj._algebraic_constraint is not None: + transBlock = obj._algebraic_constraint().parent_block() + else: + transBlock = self._add_transformation_block(obj.parent_block()) + # and create the xor constraint + xorConstraint = self._add_xor_constraint(obj, transBlock) + + # create the disjunction constraint and disaggregation + # constraints and then relax each of the disjunctionDatas + for i in sorted(iterkeys(obj)): + self._transform_disjunctionData(obj[i], i, transBlock) + + # deactivate so the writers will be happy + obj.deactivate() + + def _transform_disjunctionData(self, obj, index, transBlock=None): + if not obj.active: + return + # Hull reformulation doesn't work if this is an OR constraint. So if + # xor is false, give up + if not obj.xor: + raise GDP_Error("Cannot do hull reformulation for " + "Disjunction '%s' with OR constraint. " + "Must be an XOR!" % obj.name) + + if transBlock is None: + # It's possible that we have already created a transformation block + # for another disjunctionData from this same container. If that's + # the case, let's use the same transformation block. (Else it will + # be really confusing that the XOR constraint goes to that old block + # but we create a new one here.) + if obj.parent_component()._algebraic_constraint is not None: + transBlock = obj.parent_component()._algebraic_constraint().\ + parent_block() + else: + transBlock = self._add_transformation_block(obj.parent_block()) + + parent_component = obj.parent_component() + + orConstraint = self._add_xor_constraint(parent_component, transBlock) + disaggregationConstraint = transBlock.disaggregationConstraints + disaggregationConstraintMap = transBlock._disaggregationConstraintMap + + # Just because it's unlikely this is what someone meant to do... + if len(obj.disjuncts) == 0: + raise GDP_Error("Disjunction '%s' is empty. This is " + "likely indicative of a modeling error." % + obj.getname(fully_qualified=True, + name_buffer=NAME_BUFFER)) + + # We first go through and collect all the variables that we + # are going to disaggregate. + varOrder_set = ComponentSet() + varOrder = [] + varsByDisjunct = ComponentMap() + localVarsByDisjunct = ComponentMap() + include_fixed_vars = not self._config.assume_fixed_vars_permanent + for disjunct in obj.disjuncts: + disjunctVars = varsByDisjunct[disjunct] = ComponentSet() + for cons in disjunct.component_data_objects( + Constraint, + active = True, + sort=SortComponents.deterministic, + descend_into=Block): + # [ESJ 02/14/2020] By default, we disaggregate fixed variables + # on the philosophy that fixing is not a promise for the future + # and we are mathematically wrong if we don't transform these + # correctly and someone later unfixes them and keeps playing + # with their transformed model. However, the user may have set + # assume_fixed_vars_permanent to True in which case we will skip + # them + for var in EXPR.identify_variables( + cons.body, include_fixed=include_fixed_vars): + # Note the use of a list so that we will + # eventually disaggregate the vars in a + # deterministic order (the order that we found + # them) + disjunctVars.add(var) + if not var in varOrder_set: + varOrder.append(var) + varOrder_set.add(var) + + # check for LocalVars Suffix + localVarsByDisjunct = self._get_local_var_suffixes( + disjunct, localVarsByDisjunct) + + # We will disaggregate all variables which are not explicitly declared + # as being local. Note however, that we do declare our own disaggregated + # variables as local, so they will not be re-disaggregated. + varSet = [] + # values of localVarsByDisjunct are ComponentSets, so we need this for + # determinism (we iterate through the localVars later) + localVars = [] + for var in varOrder: + disjuncts = [d for d in varsByDisjunct if var in varsByDisjunct[d]] + # clearly not local if used in more than one disjunct + if len(disjuncts) > 1: + if __debug__ and logger.isEnabledFor(logging.DEBUG): + logger.debug("Assuming '%s' is not a local var since it is" + "used in multiple disjuncts." % + var.getname(fully_qualified=True, + name_buffer=NAME_BUFFER)) + varSet.append(var) + elif localVarsByDisjunct.get(disjuncts[0]) is not None: + if var in localVarsByDisjunct[disjuncts[0]]: + localVars.append(var) + else: + varSet.append(var) + else: + varSet.append(var) + + # Now that we know who we need to disaggregate, we will do it + # while we also transform the disjuncts. + or_expr = 0 + for disjunct in obj.disjuncts: + or_expr += disjunct.indicator_var + self._transform_disjunct(disjunct, transBlock, varSet, localVars) + orConstraint.add(index, (or_expr, 1)) + # map the DisjunctionData to its XOR constraint to mark it as + # transformed + obj._algebraic_constraint = weakref_ref(orConstraint[index]) + + for i, var in enumerate(varSet): + disaggregatedExpr = 0 + for disjunct in obj.disjuncts: + if disjunct._transformation_block is None: + # Because we called _transform_disjunct in the loop above, + # we know that if this isn't transformed it is because it + # was cleanly deactivated, and we can just skip it. + continue + + disaggregatedVar = disjunct._transformation_block().\ + _disaggregatedVarMap['disaggregatedVar'][var] + disaggregatedExpr += disaggregatedVar + + disaggregationConstraint.add((i, index), var == disaggregatedExpr) + # and update the map so that we can find this later. We index by + # variable and the particular disjunction because there is a + # different one for each disjunction + if disaggregationConstraintMap.get(var) is not None: + disaggregationConstraintMap[var][obj] = disaggregationConstraint[ + (i, index)] + else: + thismap = disaggregationConstraintMap[var] = ComponentMap() + thismap[obj] = disaggregationConstraint[(i, index)] + + # deactivate for the writers + obj.deactivate() + + def _transform_disjunct(self, obj, transBlock, varSet, localVars): + # deactivated should only come from the user + if not obj.active: + if obj.indicator_var.is_fixed(): + if value(obj.indicator_var) == 0: + # The user cleanly deactivated the disjunct: there + # is nothing for us to do here. + return + else: + raise GDP_Error( + "The disjunct '%s' is deactivated, but the " + "indicator_var is fixed to %s. This makes no sense." + % ( obj.name, value(obj.indicator_var) )) + if obj._transformation_block is None: + raise GDP_Error( + "The disjunct '%s' is deactivated, but the " + "indicator_var is not fixed and the disjunct does not " + "appear to have been relaxed. This makes no sense. " + "(If the intent is to deactivate the disjunct, fix its " + "indicator_var to 0.)" + % ( obj.name, )) + + if obj._transformation_block is not None: + # we've transformed it, which means this is the second time it's + # appearing in a Disjunction + raise GDP_Error( + "The disjunct '%s' has been transformed, but a disjunction " + "it appears in has not. Putting the same disjunct in " + "multiple disjunctions is not supported." % obj.name) + + # create a relaxation block for this disjunct + relaxedDisjuncts = transBlock.relaxedDisjuncts + relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)] + + # add the map that will link back and forth between transformed + # constraints and their originals. + relaxationBlock._constraintMap = { + 'srcConstraints': ComponentMap(), + 'transformedConstraints': ComponentMap() + } + # Map between disaggregated variables for this disjunct and their + # originals + relaxationBlock._disaggregatedVarMap = { + 'srcVar': ComponentMap(), + 'disaggregatedVar': ComponentMap(), + } + # Map between disaggregated variables and their lb*indicator <= var <= + # ub*indicator constraints + relaxationBlock._bigMConstraintMap = ComponentMap() + + # add mappings to source disjunct (so we'll know we've relaxed) + obj._transformation_block = weakref_ref(relaxationBlock) + relaxationBlock._srcDisjunct = weakref_ref(obj) + + # add Suffix to the relaxation block that disaggregated variables are + # local (in case this is nested in another Disjunct) + local_var_set = None + parent_disjunct = obj.parent_block() + while parent_disjunct is not None: + if parent_disjunct.ctype is Disjunct: + break + parent_disjunct = parent_disjunct.parent_block() + if parent_disjunct is not None: + localVarSuffix = relaxationBlock.LocalVars = Suffix( + direction=Suffix.LOCAL) + local_var_set = localVarSuffix[parent_disjunct] = ComponentSet() + + # add the disaggregated variables and their bigm constraints + # to the relaxationBlock + for var in varSet: + lb = var.lb + ub = var.ub + if lb is None or ub is None: + raise GDP_Error("Variables that appear in disjuncts must be " + "bounded in order to use the hull " + "transformation! Missing bound for %s." + % (var.name)) + + disaggregatedVar = Var(within=Reals, + bounds=(min(0, lb), max(0, ub)), + initialize=var.value) + # naming conflicts are possible here since this is a bunch + # of variables from different blocks coming together, so we + # get a unique name + disaggregatedVarName = unique_component_name( + relaxationBlock, + var.getname(fully_qualified=False, name_buffer=NAME_BUFFER), + ) + relaxationBlock.add_component( disaggregatedVarName, + disaggregatedVar) + # mark this as local because we won't re-disaggregate if this is a + # nested disjunction + if local_var_set is not None: + local_var_set.add(disaggregatedVar) + # store the mappings from variables to their disaggregated selves on + # the transformation block. + relaxationBlock._disaggregatedVarMap['disaggregatedVar'][ + var] = disaggregatedVar + relaxationBlock._disaggregatedVarMap['srcVar'][ + disaggregatedVar] = var + + bigmConstraint = Constraint(transBlock.lbub) + relaxationBlock.add_component( + disaggregatedVarName + "_bounds", bigmConstraint) + if lb: + bigmConstraint.add( + 'lb', obj.indicator_var*lb <= disaggregatedVar) + if ub: + bigmConstraint.add( + 'ub', disaggregatedVar <= obj.indicator_var*ub) + + relaxationBlock._bigMConstraintMap[disaggregatedVar] = bigmConstraint + + for var in localVars: + lb = var.lb + ub = var.ub + if lb is None or ub is None: + raise GDP_Error("Variables that appear in disjuncts must be " + "bounded in order to use the hull " + "transformation! Missing bound for %s." + % (var.name)) + if value(lb) > 0: + var.setlb(0) + if value(ub) < 0: + var.setub(0) + + # map it to itself + relaxationBlock._disaggregatedVarMap['disaggregatedVar'][var] = var + relaxationBlock._disaggregatedVarMap['srcVar'][var] = var + + # naming conflicts are possible here since this is a bunch + # of variables from different blocks coming together, so we + # get a unique name + conName = unique_component_name( + relaxationBlock, + var.getname(fully_qualified=False, name_buffer=NAME_BUFFER) + \ + "_bounds") + bigmConstraint = Constraint(transBlock.lbub) + relaxationBlock.add_component(conName, bigmConstraint) + if lb: + bigmConstraint.add('lb', obj.indicator_var*lb <= var) + if ub: + bigmConstraint.add('ub', var <= obj.indicator_var*ub) + relaxationBlock._bigMConstraintMap[var] = bigmConstraint + + var_substitute_map = dict((id(v), newV) for v, newV in iteritems( + relaxationBlock._disaggregatedVarMap['disaggregatedVar'])) + zero_substitute_map = dict((id(v), ZeroConstant) for v, newV in \ + iteritems( + relaxationBlock._disaggregatedVarMap[ + 'disaggregatedVar'])) + zero_substitute_map.update((id(v), ZeroConstant) for v in localVars) + + # Transform each component within this disjunct + self._transform_block_components(obj, obj, var_substitute_map, + zero_substitute_map) + + # deactivate disjunct so writers can be happy + obj._deactivate_without_fixing_indicator() + + def _transform_block_components( self, block, disjunct, var_substitute_map, + zero_substitute_map): + # As opposed to bigm, in hull we do not need to do anything special for + # nested disjunctions. The indicator variables and disaggregated + # variables of the inner disjunction will need to be disaggregated again + # anyway, and nothing will get double-bigm-ed. (If an untransformed + # disjunction is lurking here, we will catch it below). + + # Look through the component map of block and transform everything we + # have a handler for. Yell if we don't know how to handle it. (Note that + # because we only iterate through active components, this means + # non-ActiveComponent types cannot have handlers.) + for obj in block.component_objects(active=True, descend_into=False): + handler = self.handlers.get(obj.ctype, None) + if not handler: + if handler is None: + raise GDP_Error( + "No hull transformation handler registered " + "for modeling components of type %s. If your " + "disjuncts contain non-GDP Pyomo components that " + "require transformation, please transform them first." + % obj.ctype ) + continue + # obj is what we are transforming, we pass disjunct + # through so that we will have access to the indicator + # variables down the line. + handler(obj, disjunct, var_substitute_map, zero_substitute_map) + + def _warn_for_active_disjunction( self, disjunction, disjunct, + var_substitute_map, zero_substitute_map): + _warn_for_active_disjunction(disjunction, disjunct, NAME_BUFFER) + + def _warn_for_active_disjunct( self, innerdisjunct, outerdisjunct, + var_substitute_map, zero_substitute_map): + _warn_for_active_disjunct(innerdisjunct, outerdisjunct, NAME_BUFFER) + + def _transform_block_on_disjunct( self, block, disjunct, var_substitute_map, + zero_substitute_map): + # We look through everything on the component map of the block + # and transform it just as we would if it was on the disjunct + # directly. (We are passing the disjunct through so that when + # we find constraints, _transform_constraint will have access to + # the correct indicator variable. + for i in sorted(iterkeys(block)): + self._transform_block_components( block[i], disjunct, + var_substitute_map, + zero_substitute_map) + + def _transform_constraint(self, obj, disjunct, var_substitute_map, + zero_substitute_map): + # we will put a new transformed constraint on the relaxation block. + relaxationBlock = disjunct._transformation_block() + transBlock = relaxationBlock.parent_block() + varMap = relaxationBlock._disaggregatedVarMap['disaggregatedVar'] + constraintMap = relaxationBlock._constraintMap + + # Though rare, it is possible to get naming conflicts here + # since constraints from all blocks are getting moved onto the + # same block. So we get a unique name + name = unique_component_name(relaxationBlock, obj.getname( + fully_qualified=True, name_buffer=NAME_BUFFER)) + + if obj.is_indexed(): + newConstraint = Constraint(obj.index_set(), transBlock.lbub) + else: + newConstraint = Constraint(transBlock.lbub) + relaxationBlock.add_component(name, newConstraint) + # map the containers: + # add mapping of original constraint to transformed constraint + if obj.is_indexed(): + constraintMap['transformedConstraints'][obj] = newConstraint + # add mapping of transformed constraint container back to original + # constraint container (or SimpleConstraint) + constraintMap['srcConstraints'][newConstraint] = obj + + for i in sorted(iterkeys(obj)): + c = obj[i] + if not c.active: + continue + + NL = c.body.polynomial_degree() not in (0,1) + EPS = self._config.EPS + mode = self._config.perspective_function + + # We need to evaluate the expression at the origin *before* + # we substitute the expression variables with the + # disaggregated variables + if not NL or mode == "FurmanSawayaGrossmann": + h_0 = clone_without_expression_components( + c.body, substitute=zero_substitute_map) + + y = disjunct.indicator_var + if NL: + if mode == "LeeGrossmann": + sub_expr = clone_without_expression_components( + c.body, + substitute=dict( + (var, subs/y) + for var, subs in iteritems(var_substitute_map) ) + ) + expr = sub_expr * y + elif mode == "GrossmannLee": + sub_expr = clone_without_expression_components( + c.body, + substitute=dict( + (var, subs/(y + EPS)) + for var, subs in iteritems(var_substitute_map) ) + ) + expr = (y + EPS) * sub_expr + elif mode == "FurmanSawayaGrossmann": + sub_expr = clone_without_expression_components( + c.body, + substitute=dict( + (var, subs/((1 - EPS)*y + EPS)) + for var, subs in iteritems(var_substitute_map) ) + ) + expr = ((1-EPS)*y + EPS)*sub_expr - EPS*h_0*(1-y) + else: + raise RuntimeError("Unknown NL Hull mode") + else: + expr = clone_without_expression_components( + c.body, substitute=var_substitute_map) + + if c.equality: + if NL: + # ESJ TODO: This can't happen right? This is the only + # obvious case where someone has messed up, but this has to + # be nonconvex, right? Shouldn't we tell them? + newConsExpr = expr == c.lower*y + else: + v = list(EXPR.identify_variables(expr)) + if len(v) == 1 and not c.lower: + # Setting a variable to 0 in a disjunct is + # *very* common. We should recognize that in + # that structure, the disaggregated variable + # will also be fixed to 0. + v[0].fix(0) + # ESJ: If you ask where the transformed constraint is, + # the answer is nowhere. Really, it is in the bounds of + # this variable, so I'm going to return + # it. Alternatively we could return an empty list, but I + # think I like this better. + constraintMap['transformedConstraints'][c] = [v[0]] + # Reverse map also (this is strange) + constraintMap['srcConstraints'][v[0]] = c + continue + newConsExpr = expr - (1-y)*h_0 == c.lower*y + + if obj.is_indexed(): + newConstraint.add((i, 'eq'), newConsExpr) + # map the _ConstraintDatas (we mapped the container above) + constraintMap[ + 'transformedConstraints'][c] = [newConstraint[i,'eq']] + constraintMap['srcConstraints'][newConstraint[i,'eq']] = c + else: + newConstraint.add('eq', newConsExpr) + # map to the _ConstraintData (And yes, for + # SimpleConstraints, this is overwriting the map to the + # container we made above, and that is what I want to + # happen. SimpleConstraints will map to lists. For + # IndexedConstraints, we can map the container to the + # container, but more importantly, we are mapping the + # _ConstraintDatas to each other above) + constraintMap[ + 'transformedConstraints'][c] = [newConstraint['eq']] + constraintMap['srcConstraints'][newConstraint['eq']] = c + + continue + + if c.lower is not None: + if __debug__ and logger.isEnabledFor(logging.DEBUG): + _name = c.getname( + fully_qualified=True, name_buffer=NAME_BUFFER) + logger.debug("GDP(Hull): Transforming constraint " + + "'%s'", _name) + if NL: + newConsExpr = expr >= c.lower*y + else: + newConsExpr = expr - (1-y)*h_0 >= c.lower*y + + if obj.is_indexed(): + newConstraint.add((i, 'lb'), newConsExpr) + constraintMap[ + 'transformedConstraints'][c] = [newConstraint[i,'lb']] + constraintMap['srcConstraints'][newConstraint[i,'lb']] = c + else: + newConstraint.add('lb', newConsExpr) + constraintMap[ + 'transformedConstraints'][c] = [newConstraint['lb']] + constraintMap['srcConstraints'][newConstraint['lb']] = c + + if c.upper is not None: + if __debug__ and logger.isEnabledFor(logging.DEBUG): + _name = c.getname( + fully_qualified=True, name_buffer=NAME_BUFFER) + logger.debug("GDP(Hull): Transforming constraint " + + "'%s'", _name) + if NL: + newConsExpr = expr <= c.upper*y + else: + newConsExpr = expr - (1-y)*h_0 <= c.upper*y + + if obj.is_indexed(): + newConstraint.add((i, 'ub'), newConsExpr) + # map (have to account for fact we might have created list + # above + transformed = constraintMap['transformedConstraints'].get(c) + if transformed is not None: + transformed.append(newConstraint[i,'ub']) + else: + constraintMap['transformedConstraints'][ + c] = [newConstraint[i,'ub']] + constraintMap['srcConstraints'][newConstraint[i,'ub']] = c + else: + newConstraint.add('ub', newConsExpr) + transformed = constraintMap['transformedConstraints'].get(c) + if transformed is not None: + transformed.append(newConstraint['ub']) + else: + constraintMap['transformedConstraints'][ + c] = [newConstraint['ub']] + constraintMap['srcConstraints'][newConstraint['ub']] = c + + # deactivate now that we have transformed + obj.deactivate() + + # These are all functions to retrieve transformed components from + # original ones and vice versa. + + @wraps(get_src_disjunct) + def get_src_disjunct(self, transBlock): + return get_src_disjunct(transBlock) + + @wraps(get_src_disjunction) + def get_src_disjunction(self, xor_constraint): + return get_src_disjunction(xor_constraint) + + @wraps(get_src_constraint) + def get_src_constraint(self, transformedConstraint): + return get_src_constraint(transformedConstraint) + + @wraps(get_transformed_constraints) + def get_transformed_constraints(self, srcConstraint): + return get_transformed_constraints(srcConstraint) + + def get_disaggregated_var(self, v, disjunct): + """ + Returns the disaggregated variable corresponding to the Var v and the + Disjunct disjunct. + + If v is a local variable, this method will return v. + + Parameters + ---------- + v: a Var which appears in a constraint in a transformed Disjunct + disjunct: a transformed Disjunct in which v appears + """ + if disjunct._transformation_block is None: + raise GDP_Error("Disjunct '%s' has not been transformed" + % disjunct.name) + transBlock = disjunct._transformation_block() + try: + return transBlock._disaggregatedVarMap['disaggregatedVar'][v] + except: + logger.error("It does not appear '%s' is a " + "variable which appears in disjunct '%s'" + % (v.name, disjunct.name)) + raise + + def get_src_var(self, disaggregated_var): + """ + Returns the original model variable to which disaggregated_var + corresponds. + + Parameters + ---------- + disaggregated_var: a Var which was created by the hull + transformation as a disaggregated variable + (and so appears on a transformation block + of some Disjunct) + """ + transBlock = disaggregated_var.parent_block() + try: + return transBlock._disaggregatedVarMap['srcVar'][disaggregated_var] + except: + logger.error("'%s' does not appear to be a disaggregated variable" + % disaggregated_var.name) + raise + + # retrieves the disaggregation constraint for original_var resulting from + # transforming disjunction + def get_disaggregation_constraint(self, original_var, disjunction): + """ + Returns the disaggregation (re-aggregation?) constraint + (which links the disaggregated variables to their original) + corresponding to original_var and the transformation of disjunction. + + Parameters + ---------- + original_var: a Var which was disaggregated in the transformation + of Disjunction disjunction + disjunction: a transformed Disjunction containing original_var + """ + for disjunct in disjunction.disjuncts: + transBlock = disjunct._transformation_block + if transBlock is not None: + break + if transBlock is None: + raise GDP_Error("Disjunction '%s' has not been properly transformed: " + "None of its disjuncts are transformed." + % disjunction.name) + + try: + return transBlock().parent_block()._disaggregationConstraintMap[ + original_var][disjunction] + except: + logger.error("It doesn't appear that '%s' is a variable that was " + "disaggregated by Disjunction '%s'" % + (original_var.name, disjunction.name)) + raise + + def get_var_bounds_constraint(self, v): + """ + Returns the IndexedConstraint which sets a disaggregated + variable to be within its bounds when its Disjunct is active and to + be 0 otherwise. (It is always an IndexedConstraint because each + bound becomes a separate constraint.) + + Parameters + ---------- + v: a Var which was created by the hull transformation as a + disaggregated variable (and so appears on a transformation + block of some Disjunct) + """ + # This can only go well if v is a disaggregated var + transBlock = v.parent_block() + try: + return transBlock._bigMConstraintMap[v] + except: + logger.error("Either '%s' is not a disaggregated variable, or " + "the disjunction that disaggregates it has not " + "been properly transformed." % v.name) + raise + + +@TransformationFactory.register( + 'gdp.chull', + doc="Deprecated name for the hull reformulation. Please use 'gdp.hull'.") +class _Deprecated_Name_Hull(Hull_Reformulation): + @deprecated("The 'gdp.chull' name is deprecated. Please use the more apt 'gdp.hull' instead.", + logger='pyomo.gdp', + version="TBD", remove_in="TBD") + def __init__(self): + super(_Deprecated_Name_Hull, self).__init__() diff --git a/pyomo/gdp/tests/common_tests.py b/pyomo/gdp/tests/common_tests.py new file mode 100644 index 00000000000..0fc54dc74cc --- /dev/null +++ b/pyomo/gdp/tests/common_tests.py @@ -0,0 +1,1499 @@ +from pyomo.environ import * +from pyomo.gdp import * +from pyomo.core.base import constraint +from pyomo.repn import generate_standard_repn +import pyomo.gdp.tests.models as models +from six import StringIO +import random + +# utitility functions + +def check_linear_coef(self, repn, var, coef): + # utility used to check a variable-coefficient pair in a standard_repn + var_id = None + for i,v in enumerate(repn.linear_vars): + if v is var: + var_id = i + self.assertIsNotNone(var_id) + self.assertEqual(repn.linear_coefs[var_id], coef) + +def diff_apply_to_and_create_using(self, model, transformation): + # compares the pprint from the transformed model after using both apply_to + # and create_using to make sure the two do the same thing + modelcopy = TransformationFactory(transformation).create_using(model) + modelcopy_buf = StringIO() + modelcopy.pprint(ostream=modelcopy_buf) + modelcopy_output = modelcopy_buf.getvalue() + + # reset the seed for the apply_to call. + random.seed(666) + TransformationFactory(transformation).apply_to(model) + model_buf = StringIO() + model.pprint(ostream=model_buf) + model_output = model_buf.getvalue() + self.assertMultiLineEqual(modelcopy_output, model_output) + +def check_relaxation_block(self, m, name, numdisjuncts): + # utility for checking the transformation block (this method is generic to + # bigm and hull though there is more on the hull transformation block, and + # the lbub set differs between the two + transBlock = m.component(name) + self.assertIsInstance(transBlock, Block) + self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) + self.assertEqual(len(transBlock.relaxedDisjuncts), numdisjuncts) + +def checkb0TargetsInactive(self, m): + self.assertTrue(m.disjunct1.active) + self.assertTrue(m.disjunct1[1,0].active) + self.assertTrue(m.disjunct1[1,1].active) + self.assertTrue(m.disjunct1[2,0].active) + self.assertTrue(m.disjunct1[2,1].active) + + self.assertFalse(m.b[0].disjunct.active) + self.assertFalse(m.b[0].disjunct[0].active) + self.assertFalse(m.b[0].disjunct[1].active) + self.assertTrue(m.b[1].disjunct0.active) + self.assertTrue(m.b[1].disjunct1.active) + +def checkb0TargetsTransformed(self, m, transformation): + trans = TransformationFactory('gdp.%s' % transformation) + disjBlock = m.b[0].component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + self.assertEqual(len(disjBlock), 2) + self.assertIsInstance(disjBlock[0].component("b[0].disjunct[0].c"), + Constraint) + self.assertIsInstance(disjBlock[1].component("b[0].disjunct[1].c"), + Constraint) + + # This relies on the disjunctions being transformed in the same order + # every time. This dictionary maps the block index to the list of + # pairs of (originalDisjunctIndex, transBlockIndex) + pairs = [ + (0,0), + (1,1), + ] + for i, j in pairs: + self.assertIs(m.b[0].disjunct[i].transformation_block(), + disjBlock[j]) + self.assertIs(trans.get_src_disjunct(disjBlock[j]), + m.b[0].disjunct[i]) + +# active status checks + +def check_user_deactivated_disjuncts(self, transformation): + # check that we do not transform a deactivated DisjunctData + m = models.makeTwoTermDisj() + m.d[0].deactivate() + transform = TransformationFactory('gdp.%s' % transformation) + transform.apply_to(m, targets=(m,)) + + self.assertFalse(m.disjunction.active) + self.assertFalse(m.d[1].active) + + rBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + disjBlock = rBlock.relaxedDisjuncts + self.assertEqual(len(disjBlock), 1) + self.assertIs(disjBlock[0], m.d[1].transformation_block()) + self.assertIs(transform.get_src_disjunct(disjBlock[0]), m.d[1]) + +def check_improperly_deactivated_disjuncts(self, transformation): + # check that if a Disjunct is deactivated but its indicator variable is not + # fixed to 0, we express our confusion. + m = models.makeTwoTermDisj() + m.d[0].deactivate() + self.assertEqual(value(m.d[0].indicator_var), 0) + self.assertTrue(m.d[0].indicator_var.is_fixed()) + m.d[0].indicator_var.fix(1) + self.assertRaisesRegexp( + GDP_Error, + "The disjunct 'd\[0\]' is deactivated, but the " + "indicator_var is fixed to 1. This makes no sense.", + TransformationFactory('gdp.%s' % transformation).apply_to, + m) + +def check_indexed_disjunction_not_transformed(self, m, transformation): + # no transformation block, nothing transformed + self.assertIsNone(m.component("_pyomo_gdp_%s_transformation" + % transformation)) + for idx in m.disjunct: + self.assertIsNone(m.disjunct[idx].transformation_block) + for idx in m.disjunction: + self.assertIsNone(m.disjunction[idx].algebraic_constraint) + +def check_do_not_transform_userDeactivated_indexedDisjunction(self, + transformation): + # check that we do not transform a deactivated disjunction + m = models.makeTwoTermIndexedDisjunction() + # If you truly want to transform nothing, deactivate everything + m.disjunction.deactivate() + for idx in m.disjunct: + m.disjunct[idx].deactivate() + directly = TransformationFactory('gdp.%s' % transformation).create_using(m) + check_indexed_disjunction_not_transformed(self, directly, transformation) + + targets = TransformationFactory('gdp.%s' % transformation).create_using( + m, targets=(m.disjunction)) + check_indexed_disjunction_not_transformed(self, targets, transformation) + +def check_disjunction_deactivated(self, transformation): + # check that we deactivate disjunctions after we transform them + m = models.makeTwoTermDisj() + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,)) + + oldblock = m.component("disjunction") + self.assertIsInstance(oldblock, Disjunction) + self.assertFalse(oldblock.active) + +def check_disjunctDatas_deactivated(self, transformation): + # check that we deactivate disjuncts after we transform them + m = models.makeTwoTermDisj() + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,)) + + oldblock = m.component("disjunction") + self.assertFalse(oldblock.disjuncts[0].active) + self.assertFalse(oldblock.disjuncts[1].active) + +def check_deactivated_constraints(self, transformation): + # test that we deactivate constraints after we transform them + m = models.makeTwoTermDisj() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + oldblock = m.component("d") + # old constraints still there, deactivated + oldc1 = oldblock[1].component("c1") + self.assertIsInstance(oldc1, Constraint) + self.assertFalse(oldc1.active) + + oldc2 = oldblock[1].component("c2") + self.assertIsInstance(oldc2, Constraint) + self.assertFalse(oldc2.active) + + oldc = oldblock[0].component("c") + self.assertIsInstance(oldc, Constraint) + self.assertFalse(oldc.active) + +def check_deactivated_disjuncts(self, transformation): + # another test that we deactivated transformed Disjuncts, but this one + # includes a SimpleDisjunct as well + m = models.makeTwoTermMultiIndexedDisjunction() + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,)) + # all the disjuncts got transformed, so all should be deactivated + for i in m.disjunct.index_set(): + self.assertFalse(m.disjunct[i].active) + self.assertFalse(m.disjunct.active) + +def check_deactivated_disjunctions(self, transformation): + # another test that we deactivated transformed Disjunctions, but including a + # SimpleDisjunction + m = models.makeTwoTermMultiIndexedDisjunction() + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,)) + + # all the disjunctions got transformed, so they should be + # deactivated too + for i in m.disjunction.index_set(): + self.assertFalse(m.disjunction[i].active) + self.assertFalse(m.disjunction.active) + +def check_do_not_transform_twice_if_disjunction_reactivated(self, + transformation): + # test that if an already-transformed disjunction is reactivated, we will + # not retransform it in a subsequent call to the transformation. + m = models.makeTwoTermDisj() + # this is a hack, but just diff the pprint from this and from calling + # the transformation again. + TransformationFactory('gdp.%s' % transformation).apply_to(m) + first_buf = StringIO() + m.pprint(ostream=first_buf) + first_output = first_buf.getvalue() + + TransformationFactory('gdp.%s' % transformation).apply_to(m) + second_buf = StringIO() + m.pprint(ostream=second_buf) + second_output = second_buf.getvalue() + + self.assertMultiLineEqual(first_output, second_output) + + # this is a stupid thing to do, but we should still know not to + # retransform because active status is now *not* the source of truth. + m.disjunction.activate() + + # This is kind of the wrong error, but I'll live with it: at least we + # get an error. + self.assertRaisesRegexp( + GDP_Error, + "The disjunct 'd\[0\]' has been transformed, but a disjunction " + "it appears in has not. Putting the same disjunct in " + "multiple disjunctions is not supported.", + TransformationFactory('gdp.%s' % transformation).apply_to, + m) + +def check_constraints_deactivated_indexedDisjunction(self, transformation): + # check that we deactivate transformed constraints + m = models.makeTwoTermMultiIndexedDisjunction() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + for i in m.disjunct.index_set(): + self.assertFalse(m.disjunct[i].c.active) + +def check_partial_deactivate_indexed_disjunction(self, transformation): + """Test for partial deactivation of an indexed disjunction.""" + m = ConcreteModel() + m.x = Var(bounds=(0, 10)) + @m.Disjunction([0, 1]) + def disj(m, i): + if i == 0: + return [m.x >= 1, m.x >= 2] + else: + return [m.x >= 3, m.x >= 4] + + m.disj[0].disjuncts[0].indicator_var.fix(1) + m.disj[0].disjuncts[1].indicator_var.fix(1) + m.disj[0].deactivate() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + self.assertEqual( + len(transBlock.disj_xor), 1, + "There should only be one XOR constraint generated. Found %s." % + len(transBlock.disj_xor)) + +# transformation block + +def check_transformation_block_name_collision(self, transformation): + # make sure that if the model already has a block called + # _pyomo_gdp_*_relaxation that we come up with a different name for the + # transformation block (and put the relaxed disjuncts on it) + m = models.makeTwoTermDisj() + # add block with the name we are about to try to use + m.add_component("_pyomo_gdp_%s_reformulation" % transformation, Block(Any)) + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + # check that we got a uniquely named block + transBlock = m.component("_pyomo_gdp_%s_reformulation_4" % transformation) + self.assertIsInstance(transBlock, Block) + + # check that the relaxed disjuncts really are here. + disjBlock = transBlock.relaxedDisjuncts + self.assertIsInstance(disjBlock, Block) + self.assertEqual(len(disjBlock), 2) + self.assertIsInstance(disjBlock[0].component("d[0].c"), Constraint) + self.assertIsInstance(disjBlock[1].component("d[1].c1"), Constraint) + self.assertIsInstance(disjBlock[1].component("d[1].c2"), Constraint) + + # we didn't add to the block that wasn't ours + self.assertEqual(len(m.component("_pyomo_gdp_%s_reformulation" % + transformation)), 0) + +# XOR constraints + +def check_indicator_vars(self, transformation): + # particularly paranoid test checking that the indicator_vars are intact + # after transformation + m = models.makeTwoTermDisj() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + oldblock = m.component("d") + # have indicator variables on original disjuncts and they are still + # active. + self.assertIsInstance(oldblock[0].indicator_var, Var) + self.assertTrue(oldblock[0].indicator_var.active) + self.assertTrue(oldblock[0].indicator_var.is_binary()) + self.assertIsInstance(oldblock[1].indicator_var, Var) + self.assertTrue(oldblock[1].indicator_var.active) + self.assertTrue(oldblock[1].indicator_var.is_binary()) + +def check_xor_constraint(self, transformation): + # verify xor constraint for a SimpleDisjunction + m = models.makeTwoTermDisj() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + # make sure we created the xor constraint and put it on the relaxation + # block + rBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + xor = rBlock.component("disjunction_xor") + self.assertIsInstance(xor, Constraint) + self.assertEqual(len(xor), 1) + self.assertIs(m.d[0].indicator_var, xor.body.arg(0)) + self.assertIs(m.d[1].indicator_var, xor.body.arg(1)) + repn = generate_standard_repn(xor.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + check_linear_coef(self, repn, m.d[0].indicator_var, 1) + check_linear_coef(self, repn, m.d[1].indicator_var, 1) + self.assertEqual(xor.lower, 1) + self.assertEqual(xor.upper, 1) + +def check_indexed_xor_constraints(self, transformation): + # verify xor constraint for an IndexedDisjunction + m = models.makeTwoTermMultiIndexedDisjunction() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + xor = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ + component("disjunction_xor") + self.assertIsInstance(xor, Constraint) + for i in m.disjunction.index_set(): + repn = generate_standard_repn(xor[i].body) + self.assertEqual(repn.constant, 0) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 2) + check_linear_coef( + self, repn, m.disjunction[i].disjuncts[0].indicator_var, 1) + check_linear_coef( + self, repn, m.disjunction[i].disjuncts[1].indicator_var, 1) + self.assertEqual(xor[i].lower, 1) + self.assertEqual(xor[i].upper, 1) + +def check_indexed_xor_constraints_with_targets(self, transformation): + # check that when we use targets to specfy some DisjunctionDatas in an + # IndexedDisjunction, the xor constraint is indexed correctly + m = models.makeTwoTermIndexedDisjunction_BoundedVars() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.disjunction[1], + m.disjunction[3]]) + + xorC = m.disjunction[1].algebraic_constraint().parent_component() + self.assertIsInstance(xorC, Constraint) + self.assertEqual(len(xorC), 2) + + # check the constraints + for i in [1,3]: + self.assertEqual(xorC[i].lower, 1) + self.assertEqual(xorC[i].upper, 1) + repn = generate_standard_repn(xorC[i].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + check_linear_coef(self, repn, m.disjunct[i, 0].indicator_var, 1) + check_linear_coef(self, repn, m.disjunct[i, 1].indicator_var, 1) + +def check_three_term_xor_constraint(self, transformation): + # check that the xor constraint has all the indicator variables from a + # three-term disjunction + m = models.makeThreeTermIndexedDisj() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + xor = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ + component("disjunction_xor") + self.assertIsInstance(xor, Constraint) + self.assertEqual(xor[1].lower, 1) + self.assertEqual(xor[1].upper, 1) + self.assertEqual(xor[2].lower, 1) + self.assertEqual(xor[2].upper, 1) + + repn = generate_standard_repn(xor[1].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 3) + for i in range(3): + check_linear_coef(self, repn, m.disjunct[i,1].indicator_var, 1) + + repn = generate_standard_repn(xor[2].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 3) + for i in range(3): + check_linear_coef(self, repn, m.disjunct[i,2].indicator_var, 1) + + +# mappings + +def check_xor_constraint_mapping(self, transformation): + # test that we correctly map between disjunctions and XOR constraints + m = models.makeTwoTermDisj() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to(m) + + transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + self.assertIs( trans.get_src_disjunction(transBlock.disjunction_xor), + m.disjunction) + self.assertIs( m.disjunction.algebraic_constraint(), + transBlock.disjunction_xor) + + +def check_xor_constraint_mapping_two_disjunctions(self, transformation): + # test that we correctly map between disjunctions and xor constraints when + # we have multiple SimpleDisjunctions (probably redundant with the above) + m = models.makeDisjunctionOfDisjunctDatas() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to(m) + + transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + transBlock2 = m.component("_pyomo_gdp_%s_reformulation_4" % transformation) + self.assertIs( trans.get_src_disjunction(transBlock.disjunction_xor), + m.disjunction) + self.assertIs( trans.get_src_disjunction(transBlock2.disjunction2_xor), + m.disjunction2) + + self.assertIs( m.disjunction.algebraic_constraint(), + transBlock.disjunction_xor) + self.assertIs( m.disjunction2.algebraic_constraint(), + transBlock2.disjunction2_xor) + +def check_disjunct_mapping(self, transformation): + # check that we correctly map between Disjuncts and their transformation + # blocks + m = models.makeTwoTermDisj_Nonlinear() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to(m) + + disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + + # the disjuncts will always be transformed in the same order, + # and d[0] goes first, so we can check in a loop. + for i in [0,1]: + self.assertIs(disjBlock[i]._srcDisjunct(), m.d[i]) + self.assertIs(trans.get_src_disjunct(disjBlock[i]), m.d[i]) + +# targets + +def check_only_targets_inactive(self, transformation): + # test that we only transform targets (by checking active status) + m = models.makeTwoSimpleDisjunctions() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.disjunction1]) + + self.assertFalse(m.disjunction1.active) + self.assertIsNotNone(m.disjunction1._algebraic_constraint) + # disjunction2 still active + self.assertTrue(m.disjunction2.active) + self.assertIsNone(m.disjunction2._algebraic_constraint) + + self.assertFalse(m.disjunct1[0].active) + self.assertFalse(m.disjunct1[1].active) + self.assertFalse(m.disjunct1.active) + self.assertTrue(m.disjunct2[0].active) + self.assertTrue(m.disjunct2[1].active) + self.assertTrue(m.disjunct2.active) + +def check_only_targets_get_transformed(self, transformation): + # test that we only transform targets (by checking the actual components) + m = models.makeTwoSimpleDisjunctions() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to( + m, + targets=[m.disjunction1]) + + disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + # only two disjuncts relaxed + self.assertEqual(len(disjBlock), 2) + # Note that in hull, these aren't the only components that get created, but + # they are a proxy for which disjuncts got relaxed, which is what we want to + # check. + self.assertIsInstance(disjBlock[0].component("disjunct1[0].c"), + Constraint) + self.assertIsInstance(disjBlock[1].component("disjunct1[1].c"), + Constraint) + + pairs = [ + (0, 0), + (1, 1) + ] + for i, j in pairs: + self.assertIs(disjBlock[i], m.disjunct1[j].transformation_block()) + self.assertIs(trans.get_src_disjunct(disjBlock[i]), m.disjunct1[j]) + + self.assertIsNone(m.disjunct2[0].transformation_block) + self.assertIsNone(m.disjunct2[1].transformation_block) + +def check_targets_with_container_as_arg(self, transformation): + # check that we can giv a Disjunction as the argument to the transformation + # and use targets to specify a DisjunctionData to transform + m = models.makeTwoTermIndexedDisjunction() + TransformationFactory('gdp.%s' % transformation).apply_to( + m.disjunction, + targets=(m.disjunction[2])) + transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + self.assertIsNone(m.disjunction[1].algebraic_constraint) + self.assertIsNone(m.disjunction[3].algebraic_constraint) + self.assertIs(m.disjunction[2].algebraic_constraint(), + transBlock.disjunction_xor[2]) + self.assertIs(m.disjunction._algebraic_constraint(), + transBlock.disjunction_xor) + +def check_target_not_a_component_error(self, transformation): + # test error message for crazy targets + decoy = ConcreteModel() + decoy.block = Block() + m = models.makeTwoSimpleDisjunctions() + self.assertRaisesRegexp( + GDP_Error, + "Target 'block' is not a component on instance 'unknown'!", + TransformationFactory('gdp.%s' % transformation).apply_to, + m, + targets=[decoy.block]) + +def check_targets_cannot_be_cuids(self, transformation): + # check that we scream if targets are cuids + m = models.makeTwoTermDisj() + self.assertRaisesRegexp( + ValueError, + "invalid value for configuration 'targets':\n" + "\tFailed casting \[disjunction\]\n" + "\tto target_list\n" + "\tError: Expected Component or list of Components." + "\n\tRecieved %s" % type(ComponentUID(m.disjunction)), + TransformationFactory('gdp.%s' % transformation).apply_to, + m, + targets=[ComponentUID(m.disjunction)]) + +def check_indexedDisj_targets_inactive(self, transformation): + # check that targets are deactivated (when target is IndexedDisjunction) + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.disjunction1]) + + self.assertFalse(m.disjunction1.active) + self.assertFalse(m.disjunction1[1].active) + self.assertFalse(m.disjunction1[2].active) + + self.assertFalse(m.disjunct1[1,0].active) + self.assertFalse(m.disjunct1[1,1].active) + self.assertFalse(m.disjunct1[2,0].active) + self.assertFalse(m.disjunct1[2,1].active) + self.assertFalse(m.disjunct1.active) + + self.assertTrue(m.b[0].disjunct[0].active) + self.assertTrue(m.b[0].disjunct[1].active) + self.assertTrue(m.b[1].disjunct0.active) + self.assertTrue(m.b[1].disjunct1.active) + +def check_indexedDisj_only_targets_transformed(self, transformation): + # check that only the targets are transformed (with IndexedDisjunction as + # target) + m = models.makeDisjunctionsOnIndexedBlock() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to( + m, + targets=[m.disjunction1]) + + disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + self.assertEqual(len(disjBlock), 4) + self.assertIsInstance(disjBlock[0].component("disjunct1[1,0].c"), + Constraint) + self.assertIsInstance(disjBlock[1].component("disjunct1[1,1].c"), + Constraint) + self.assertIsInstance(disjBlock[2].component("disjunct1[2,0].c"), + Constraint) + self.assertIsInstance(disjBlock[3].component("disjunct1[2,1].c"), + Constraint) + + # This relies on the disjunctions being transformed in the same order + # every time. These are the mappings between the indices of the original + # disjuncts and the indices on the indexed block on the transformation + # block. + pairs = [ + ((1,0), 0), + ((1,1), 1), + ((2,0), 2), + ((2,1), 3), + ] + for i, j in pairs: + self.assertIs(trans.get_src_disjunct(disjBlock[j]), m.disjunct1[i]) + self.assertIs(disjBlock[j], m.disjunct1[i].transformation_block()) + +def check_warn_for_untransformed(self, transformation): + # Check that we complain if we find an untransformed Disjunct inside of + # another Disjunct we are transforming + m = models.makeDisjunctionsOnIndexedBlock() + def innerdisj_rule(d, flag): + m = d.model() + if flag: + d.c = Constraint(expr=m.a[1] <= 2) + else: + d.c = Constraint(expr=m.a[1] >= 65) + m.disjunct1[1,1].innerdisjunct = Disjunct([0,1], rule=innerdisj_rule) + m.disjunct1[1,1].innerdisjunction = Disjunction([0], + rule=lambda a,i: [m.disjunct1[1,1].innerdisjunct[0], + m.disjunct1[1,1].innerdisjunct[1]]) + # This test relies on the order that the component objects of + # the disjunct get considered. In this case, the disjunct + # causes the error, but in another world, it could be the + # disjunction, which is also active. + self.assertRaisesRegexp( + GDP_Error, + "Found active disjunct 'disjunct1\[1,1\].innerdisjunct\[0\]' " + "in disjunct 'disjunct1\[1,1\]'!.*", + TransformationFactory('gdp.%s' % transformation).create_using, + m, + targets=[m.disjunction1[1]]) + # + # we will make that disjunction come first now... + # + tmp = m.disjunct1[1,1].innerdisjunct + m.disjunct1[1,1].del_component(tmp) + m.disjunct1[1,1].add_component('innerdisjunct', tmp) + self.assertRaisesRegexp( + GDP_Error, + "Found untransformed disjunction 'disjunct1\[1,1\]." + "innerdisjunction\[0\]' in disjunct 'disjunct1\[1,1\]'!.*", + TransformationFactory('gdp.%s' % transformation).create_using, + m, + targets=[m.disjunction1[1]]) + # Deactivating the disjunction will allow us to get past it back + # to the Disjunct (after we realize there are no active + # DisjunctionData within the active Disjunction) + m.disjunct1[1,1].innerdisjunction[0].deactivate() + self.assertRaisesRegexp( + GDP_Error, + "Found active disjunct 'disjunct1\[1,1\].innerdisjunct\[0\]' " + "in disjunct 'disjunct1\[1,1\]'!.*", + TransformationFactory('gdp.%s' % transformation).create_using, + m, + targets=[m.disjunction1[1]]) + +def check_disjData_targets_inactive(self, transformation): + # check targets deactivated with DisjunctionData is the target + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.disjunction1[2]]) + + self.assertIsNotNone(m.disjunction1[2]._algebraic_constraint) + self.assertFalse(m.disjunction1[2].active) + + self.assertTrue(m.disjunct1.active) + self.assertIsNotNone(m.disjunction1._algebraic_constraint) + self.assertTrue(m.disjunct1[1,0].active) + self.assertIsNone(m.disjunct1[1,0]._transformation_block) + self.assertTrue(m.disjunct1[1,1].active) + self.assertIsNone(m.disjunct1[1,1]._transformation_block) + self.assertFalse(m.disjunct1[2,0].active) + self.assertIsNotNone(m.disjunct1[2,0]._transformation_block) + self.assertFalse(m.disjunct1[2,1].active) + self.assertIsNotNone(m.disjunct1[2,1]._transformation_block) + + self.assertTrue(m.b[0].disjunct.active) + self.assertTrue(m.b[0].disjunct[0].active) + self.assertIsNone(m.b[0].disjunct[0]._transformation_block) + self.assertTrue(m.b[0].disjunct[1].active) + self.assertIsNone(m.b[0].disjunct[1]._transformation_block) + self.assertTrue(m.b[1].disjunct0.active) + self.assertIsNone(m.b[1].disjunct0._transformation_block) + self.assertTrue(m.b[1].disjunct1.active) + self.assertIsNone(m.b[1].disjunct1._transformation_block) + +def check_disjData_only_targets_transformed(self, transformation): + # check that targets are transformed when DisjunctionData is the target + m = models.makeDisjunctionsOnIndexedBlock() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to( + m, + targets=[m.disjunction1[2]]) + + disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + self.assertEqual(len(disjBlock), 2) + self.assertIsInstance(disjBlock[0].component("disjunct1[2,0].c"), + Constraint) + self.assertIsInstance(disjBlock[1].component("disjunct1[2,1].c"), + Constraint) + + # This relies on the disjunctions being transformed in the same order + # every time. These are the mappings between the indices of the original + # disjuncts and the indices on the indexed block on the transformation + # block. + pairs = [ + ((2,0), 0), + ((2,1), 1), + ] + for i, j in pairs: + self.assertIs(m.disjunct1[i].transformation_block(), disjBlock[j]) + self.assertIs(trans.get_src_disjunct(disjBlock[j]), m.disjunct1[i]) + +def check_indexedBlock_targets_inactive(self, transformation): + # check that targets are deactivated when target is an IndexedBlock + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.b]) + + self.assertTrue(m.disjunct1.active) + self.assertTrue(m.disjunct1[1,0].active) + self.assertTrue(m.disjunct1[1,1].active) + self.assertTrue(m.disjunct1[2,0].active) + self.assertTrue(m.disjunct1[2,1].active) + self.assertIsNone(m.disjunct1[1,0].transformation_block) + self.assertIsNone(m.disjunct1[1,1].transformation_block) + self.assertIsNone(m.disjunct1[2,0].transformation_block) + self.assertIsNone(m.disjunct1[2,1].transformation_block) + + self.assertFalse(m.b[0].disjunct.active) + self.assertFalse(m.b[0].disjunct[0].active) + self.assertFalse(m.b[0].disjunct[1].active) + self.assertFalse(m.b[1].disjunct0.active) + self.assertFalse(m.b[1].disjunct1.active) + +def check_indexedBlock_only_targets_transformed(self, transformation): + # check that targets are transformed when target is an IndexedBlock + m = models.makeDisjunctionsOnIndexedBlock() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to( + m, + targets=[m.b]) + + disjBlock1 = m.b[0].component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + self.assertEqual(len(disjBlock1), 2) + self.assertIsInstance(disjBlock1[0].component("b[0].disjunct[0].c"), + Constraint) + self.assertIsInstance(disjBlock1[1].component("b[0].disjunct[1].c"), + Constraint) + disjBlock2 = m.b[1].component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + self.assertEqual(len(disjBlock2), 2) + self.assertIsInstance(disjBlock2[0].component("b[1].disjunct0.c"), + Constraint) + self.assertIsInstance(disjBlock2[1].component("b[1].disjunct1.c"), + Constraint) + + # This relies on the disjunctions being transformed in the same order + # every time. This dictionary maps the block index to the list of + # pairs of (originalDisjunctIndex, transBlockIndex) + pairs = { + 0: + [ + ('disjunct',0,0), + ('disjunct',1,1), + ], + 1: + [ + ('disjunct0',None,0), + ('disjunct1',None,1), + ] + } + + for blocknum, lst in iteritems(pairs): + for comp, i, j in lst: + original = m.b[blocknum].component(comp) + if blocknum == 0: + disjBlock = disjBlock1 + if blocknum == 1: + disjBlock = disjBlock2 + self.assertIs(original[i].transformation_block(), disjBlock[j]) + self.assertIs(trans.get_src_disjunct(disjBlock[j]), original[i]) + +def check_blockData_targets_inactive(self, transformation): + # test that BlockData target is deactivated + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.b[0]]) + + checkb0TargetsInactive(self, m) + +def check_blockData_only_targets_transformed(self, transformation): + # test that BlockData target is transformed + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.b[0]]) + checkb0TargetsTransformed(self, m, transformation) + +def check_do_not_transform_deactivated_targets(self, transformation): + # test that if a deactivated component is given as a target, we don't + # transform it. (This is actually an important test because it is the only + # reason to check active status at the beginning of many of the methods in + # the transformation like _transform_disjunct and _transform_disjunction. In + # the absence of targets, those checks wouldn't be necessary.) + m = models.makeDisjunctionsOnIndexedBlock() + m.b[1].deactivate() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.b[0], m.b[1]]) + + checkb0TargetsInactive(self, m) + checkb0TargetsTransformed(self, m, transformation) + +def check_disjunction_data_target(self, transformation): + # test that if we transform DisjunctionDatas one at a time, we get what we + # expect in terms of using the same transformation block and the indexing of + # the xor constraint. + m = models.makeThreeTermIndexedDisj() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, targets=[m.disjunction[2]]) + + # we got a transformation block on the model + transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + self.assertIsInstance(transBlock, Block) + self.assertIsInstance(transBlock.component("disjunction_xor"), + Constraint) + self.assertIsInstance(transBlock.disjunction_xor[2], + constraint._GeneralConstraintData) + self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) + self.assertEqual(len(transBlock.relaxedDisjuncts), 3) + + # suppose we transform the next one separately + TransformationFactory('gdp.%s' % transformation).apply_to( + m, targets=[m.disjunction[1]]) + # we added to the same XOR constraint before + self.assertIsInstance(transBlock.disjunction_xor[1], + constraint._GeneralConstraintData) + # we used the same transformation block, so we have more relaxed + # disjuncts + self.assertEqual(len(transBlock.relaxedDisjuncts), 6) + +def check_disjunction_data_target_any_index(self, transformation): + # check the same as the above, but that it still works when the Disjunction + # is indexed by Any. + m = ConcreteModel() + m.x = Var(bounds=(-100, 100)) + m.disjunct3 = Disjunct(Any) + m.disjunct4 = Disjunct(Any) + m.disjunction2=Disjunction(Any) + for i in range(2): + m.disjunct3[i].cons = Constraint(expr=m.x == 2) + m.disjunct4[i].cons = Constraint(expr=m.x <= 3) + m.disjunction2[i] = [m.disjunct3[i], m.disjunct4[i]] + + TransformationFactory('gdp.%s' % transformation).apply_to( + m, targets=[m.disjunction2[i]]) + + if i == 0: + check_relaxation_block(self, m, "_pyomo_gdp_%s_reformulation" % + transformation, 2) + if i == 2: + check_relaxation_block(self, m, "_pyomo_gdp_%s_reformulation" % + transformation, 4) + +# tests that we treat disjunctions on blocks correctly (the main issue here is +# that if you were to solve that block post-transformation that you would have +# the whole transformed model) + +def check_xor_constraint_added(self, transformation): + # test we put the xor on the transformation block + m = models.makeTwoTermDisjOnBlock() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + self.assertIsInstance( + m.b.component("_pyomo_gdp_%s_reformulation" % transformation).\ + component('b.disjunction_xor'), Constraint) + +def check_trans_block_created(self, transformation): + # check we put the transformation block on the parent block of the + # disjunction + m = models.makeTwoTermDisjOnBlock() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + # test that the transformation block go created on the model + transBlock = m.b.component('_pyomo_gdp_%s_reformulation' % transformation) + self.assertIsInstance(transBlock, Block) + disjBlock = transBlock.component("relaxedDisjuncts") + self.assertIsInstance(disjBlock, Block) + self.assertEqual(len(disjBlock), 2) + # and that it didn't get created on the model + self.assertIsNone(m.component('_pyomo_gdp_%s_reformulation' % transformation)) + + +# disjunction generation tests: These all suppose that you are doing some sort +# of column and constraint generation algorithm, but you are in fact generating +# Disjunctions and retransforming the model after each addition. + +def check_iteratively_adding_to_indexed_disjunction_on_block(self, + transformation): + # check that we can iteratively add to an IndexedDisjunction and transform + # the block it lives on + m = ConcreteModel() + m.b = Block() + m.b.x = Var(bounds=(-100, 100)) + m.b.firstTerm = Disjunct([1,2]) + m.b.firstTerm[1].cons = Constraint(expr=m.b.x == 0) + m.b.firstTerm[2].cons = Constraint(expr=m.b.x == 2) + m.b.secondTerm = Disjunct([1,2]) + m.b.secondTerm[1].cons = Constraint(expr=m.b.x >= 2) + m.b.secondTerm[2].cons = Constraint(expr=m.b.x >= 3) + m.b.disjunctionList = Disjunction(Any) + + m.b.obj = Objective(expr=m.b.x) + + for i in range(1,3): + m.b.disjunctionList[i] = [m.b.firstTerm[i], m.b.secondTerm[i]] + + TransformationFactory('gdp.%s' % transformation).apply_to(m, + targets=[m.b]) + m.b.disjunctionList[i] = [m.b.firstTerm[i], m.b.secondTerm[i]] + + TransformationFactory('gdp.%s' % transformation).apply_to(m, + targets=[m.b]) + + if i == 1: + check_relaxation_block(self, m.b, "_pyomo_gdp_%s_reformulation" % + transformation, 2) + if i == 2: + check_relaxation_block(self, m.b, "_pyomo_gdp_%s_reformulation" % + transformation, 4) + +def check_simple_disjunction_of_disjunct_datas(self, transformation): + # This is actually a reasonable use case if you are generating + # disjunctions with the same structure. So you might have Disjuncts + # indexed by Any and disjunctions indexed by Any and be adding a + # disjunction of two of the DisjunctDatas in every iteration. + m = models.makeDisjunctionOfDisjunctDatas() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + self.check_trans_block_disjunctions_of_disjunct_datas(m) + transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + self.assertIsInstance( transBlock.component("disjunction_xor"), + Constraint) + transBlock2 = m.component("_pyomo_gdp_%s_reformulation_4" % transformation) + self.assertIsInstance( transBlock2.component("disjunction2_xor"), + Constraint) + +# these tests have different checks for what ends up on the model between bigm +# and hull, but they have the same structure +def check_iteratively_adding_disjunctions_transform_container(self, + transformation): + # Check that we can play the same game with iteratively adding Disjunctions, + # but this time just specify the IndexedDisjunction as the argument. Note + # that the success of this depends on our rebellion regarding the active + # status of containers. + model = ConcreteModel() + model.x = Var(bounds=(-100, 100)) + model.disjunctionList = Disjunction(Any) + model.obj = Objective(expr=model.x) + for i in range(2): + firstTermName = "firstTerm[%s]" % i + model.add_component(firstTermName, Disjunct()) + model.component(firstTermName).cons = Constraint( + expr=model.x == 2*i) + secondTermName = "secondTerm[%s]" % i + model.add_component(secondTermName, Disjunct()) + model.component(secondTermName).cons = Constraint( + expr=model.x >= i + 2) + model.disjunctionList[i] = [model.component(firstTermName), + model.component(secondTermName)] + + # we're lazy and we just transform the disjunctionList (and in + # theory we are transforming at every iteration because we are + # solving at every iteration) + TransformationFactory('gdp.%s' % transformation).apply_to( + model, targets=[model.disjunctionList]) + if i == 0: + self.check_first_iteration(model) + + if i == 1: + self.check_second_iteration(model) + +def check_disjunction_and_disjuncts_indexed_by_any(self, transformation): + # check that we can play the same game when the Disjuncts also are indexed + # by Any + model = ConcreteModel() + model.x = Var(bounds=(-100, 100)) + + model.firstTerm = Disjunct(Any) + model.secondTerm = Disjunct(Any) + model.disjunctionList = Disjunction(Any) + + model.obj = Objective(expr=model.x) + + for i in range(2): + model.firstTerm[i].cons = Constraint(expr=model.x == 2*i) + model.secondTerm[i].cons = Constraint(expr=model.x >= i + 2) + model.disjunctionList[i] = [model.firstTerm[i], model.secondTerm[i]] + + TransformationFactory('gdp.%s' % transformation).apply_to(model) + + if i == 0: + self.check_first_iteration(model) + + if i == 1: + self.check_second_iteration(model) + +def check_iteratively_adding_disjunctions_transform_model(self, transformation): + # Same as above, but transforming whole model in every iteration + model = ConcreteModel() + model.x = Var(bounds=(-100, 100)) + model.disjunctionList = Disjunction(Any) + model.obj = Objective(expr=model.x) + for i in range(2): + firstTermName = "firstTerm[%s]" % i + model.add_component(firstTermName, Disjunct()) + model.component(firstTermName).cons = Constraint( + expr=model.x == 2*i) + secondTermName = "secondTerm[%s]" % i + model.add_component(secondTermName, Disjunct()) + model.component(secondTermName).cons = Constraint( + expr=model.x >= i + 2) + model.disjunctionList[i] = [model.component(firstTermName), + model.component(secondTermName)] + + # we're lazy and we just transform the model (and in + # theory we are transforming at every iteration because we are + # solving at every iteration) + TransformationFactory('gdp.%s' % transformation).apply_to(model) + if i == 0: + self.check_first_iteration(model) + + if i == 1: + self.check_second_iteration(model) + +# transforming blocks + +# If you transform a block as if it is a model, the transformation should +# only modify the block you passed it, else when you solve the block, you +# are missing the disjunction you thought was on there. +def check_transformation_simple_block(self, transformation): + m = models.makeTwoTermDisjOnBlock() + TransformationFactory('gdp.%s' % transformation).apply_to(m.b) + + # transformation block not on m + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) + + # transformation block on m.b + self.assertIsInstance(m.b.component("_pyomo_gdp_%s_reformulation" % + transformation), Block) + +def check_transform_block_data(self, transformation): + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to(m.b[0]) + + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) + + self.assertIsInstance(m.b[0].component("_pyomo_gdp_%s_reformulation" % + transformation), Block) + +def check_simple_block_target(self, transformation): + m = models.makeTwoTermDisjOnBlock() + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m.b]) + + # transformation block not on m + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) + + # transformation block on m.b + self.assertIsInstance(m.b.component("_pyomo_gdp_%s_reformulation" % + transformation), Block) + +def check_block_data_target(self, transformation): + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to(m, + targets=[m.b[0]]) + + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) + + self.assertIsInstance(m.b[0].component("_pyomo_gdp_%s_reformulation" % + transformation), Block) + +def check_indexed_block_target(self, transformation): + m = models.makeDisjunctionsOnIndexedBlock() + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m.b]) + + # We expect the transformation block on each of the BlockDatas. Because + # it is always going on the parent block of the disjunction. + + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) + + for i in [0,1]: + self.assertIsInstance( m.b[i].component("_pyomo_gdp_%s_reformulation" % + transformation), Block) + +def check_block_targets_inactive(self, transformation): + m = models.makeTwoTermDisjOnBlock() + m = models.add_disj_not_on_block(m) + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.b]) + + self.assertFalse(m.b.disjunct[0].active) + self.assertFalse(m.b.disjunct[1].active) + self.assertFalse(m.b.disjunct.active) + self.assertTrue(m.simpledisj.active) + self.assertTrue(m.simpledisj2.active) + +def check_block_only_targets_transformed(self, transformation): + m = models.makeTwoTermDisjOnBlock() + m = models.add_disj_not_on_block(m) + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to( + m, + targets=[m.b]) + + disjBlock = m.b.component("_pyomo_gdp_%s_reformulation" % transformation).\ + relaxedDisjuncts + self.assertEqual(len(disjBlock), 2) + self.assertIsInstance(disjBlock[0].component("b.disjunct[0].c"), + Constraint) + self.assertIsInstance(disjBlock[1].component("b.disjunct[1].c"), + Constraint) + + # this relies on the disjuncts being transformed in the same order every + # time + pairs = [ + (0,0), + (1,1), + ] + for i, j in pairs: + self.assertIs(m.b.disjunct[i].transformation_block(), disjBlock[j]) + self.assertIs(trans.get_src_disjunct(disjBlock[j]), m.b.disjunct[i]) + +# common error messages + +def check_transform_empty_disjunction(self, transformation): + m = ConcreteModel() + m.empty = Disjunction(expr=[]) + + self.assertRaisesRegexp( + GDP_Error, + "Disjunction 'empty' is empty. This is likely indicative of a " + "modeling error.*", + TransformationFactory('gdp.%s' % transformation).apply_to, + m) + +def check_deactivated_disjunct_nonzero_indicator_var(self, transformation): + m = ConcreteModel() + m.x = Var(bounds=(0,8)) + m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) + + m.disjunction.disjuncts[0].deactivate() + m.disjunction.disjuncts[0].indicator_var.fix(1) + + self.assertRaisesRegexp( + GDP_Error, + "The disjunct 'disjunction_disjuncts\[0\]' is deactivated, but the " + "indicator_var is fixed to 1. This makes no sense.", + TransformationFactory('gdp.%s' % transformation).apply_to, + m) + +def check_deactivated_disjunct_unfixed_indicator_var(self, transformation): + m = ConcreteModel() + m.x = Var(bounds=(0,8)) + m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) + + m.disjunction.disjuncts[0].deactivate() + m.disjunction.disjuncts[0].indicator_var.fixed = False + + self.assertRaisesRegexp( + GDP_Error, + "The disjunct 'disjunction_disjuncts\[0\]' is deactivated, but the " + "indicator_var is not fixed and the disjunct does not " + "appear to have been relaxed. This makes no sense. " + "\(If the intent is to deactivate the disjunct, fix its " + "indicator_var to 0.\)", + TransformationFactory('gdp.%s' % transformation).apply_to, + m) + +def check_retrieving_nondisjunctive_components(self, transformation): + m = models.makeTwoTermDisj() + m.b = Block() + m.b.global_cons = Constraint(expr=m.a + m.x >= 8) + m.another_global_cons = Constraint(expr=m.a + m.x <= 11) + + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to(m) + + self.assertRaisesRegexp( + GDP_Error, + "Constraint 'b.global_cons' is not on a disjunct and so was not " + "transformed", + trans.get_transformed_constraints, + m.b.global_cons) + + self.assertRaisesRegexp( + GDP_Error, + "Constraint 'b.global_cons' is not a transformed constraint", + trans.get_src_constraint, + m.b.global_cons) + + self.assertRaisesRegexp( + GDP_Error, + "Constraint 'another_global_cons' is not a transformed constraint", + trans.get_src_constraint, + m.another_global_cons) + + self.assertRaisesRegexp( + GDP_Error, + "Block 'b' doesn't appear to be a transformation block for a " + "disjunct. No source disjunct found.", + trans.get_src_disjunct, + m.b) + + self.assertRaisesRegexp( + GDP_Error, + "It appears that 'another_global_cons' is not an XOR or OR" + " constraint resulting from transforming a Disjunction.", + trans.get_src_disjunction, + m.another_global_cons) + +def check_silly_target(self, transformation): + m = models.makeTwoTermDisj() + self.assertRaisesRegexp( + GDP_Error, + "Target 'd\[1\].c1' was not a Block, Disjunct, or Disjunction. " + "It was of type " + " and " + "can't be transformed.", + TransformationFactory('gdp.%s' % transformation).apply_to, + m, + targets=[m.d[1].c1]) + +def check_ask_for_transformed_constraint_from_untransformed_disjunct( + self, transformation): + m = models.makeTwoTermIndexedDisjunction() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to(m, targets=m.disjunction[1]) + + self.assertRaisesRegexp( + GDP_Error, + "Constraint 'disjunct\[2,b\].cons_b' is on a disjunct which has " + "not been transformed", + trans.get_transformed_constraints, + m.disjunct[2, 'b'].cons_b) + +def check_error_for_same_disjunct_in_multiple_disjunctions(self, transformation): + m = models.makeDisjunctInMultipleDisjunctions() + self.assertRaisesRegexp( + GDP_Error, + "The disjunct 'disjunct1\[1\]' has been transformed, " + "but a disjunction it appears in has not. Putting the same " + "disjunct in multiple disjunctions is not supported.", + TransformationFactory('gdp.%s' % transformation).apply_to, + m) + +# This is really neurotic, but test that we will create an infeasible XOR +# constraint. We have to because in the case of nested disjunctions, our model +# is not necessarily infeasible because of this. It just might make a Disjunct +# infeasible. +def setup_infeasible_xor_because_all_disjuncts_deactivated(self, transformation): + m = ConcreteModel() + m.x = Var(bounds=(0,8)) + m.y = Var(bounds=(0,7)) + m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) + m.disjunction_disjuncts[0].nestedDisjunction = Disjunction( + expr=[m.y == 6, m.y <= 1]) + # Note that this fixes the indicator variables to 0, but since the + # disjunction is still active, the XOR constraint will be created. So we + # will have to land in the second disjunct of m.disjunction + m.disjunction.disjuncts[0].nestedDisjunction.disjuncts[0].deactivate() + m.disjunction.disjuncts[0].nestedDisjunction.disjuncts[1].deactivate() + # This should create a 0 = 1 XOR constraint, actually... + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=m.disjunction.disjuncts[0].nestedDisjunction) + + # check that our XOR is the bad thing it should be. + transBlock = m.disjunction.disjuncts[0].component( + "_pyomo_gdp_%s_reformulation" % transformation) + xor = transBlock.component( + "disjunction_disjuncts[0].nestedDisjunction_xor") + self.assertIsInstance(xor, Constraint) + self.assertEqual(value(xor.lower), 1) + self.assertEqual(value(xor.upper), 1) + repn = generate_standard_repn(xor.body) + for v in repn.linear_vars: + self.assertTrue(v.is_fixed()) + self.assertEqual(value(v), 0) + + # make sure when we transform the outer thing, all is well + TransformationFactory('gdp.%s' % transformation).apply_to(m) + + return m + +def check_disjunction_target_err(self, transformation): + m = models.makeNestedDisjunctions() + self.assertRaisesRegexp( + GDP_Error, + "Found active disjunct 'simpledisjunct.innerdisjunct0' in " + "disjunct 'simpledisjunct'!.*", + TransformationFactory('gdp.%s' % transformation).apply_to, + m, + targets=[m.disjunction]) + +def check_activeInnerDisjunction_err(self, transformation): + m = models.makeDuplicatedNestedDisjunction() + self.assertRaisesRegexp( + GDP_Error, + "Found untransformed disjunction " + "'outerdisjunct\[1\].duplicateddisjunction' in disjunct " + "'outerdisjunct\[1\]'! The disjunction must be transformed before " + "the disjunct. If you are using targets, put the disjunction " + "before the disjunct in the list.*", + TransformationFactory('gdp.%s' % transformation).apply_to, + m, + targets=[m.outerdisjunct[1].innerdisjunction, + m.disjunction]) + + +# nested disjunctions: hull and bigm have very different handling for nested +# disjunctions, but these tests check *that* everything is transformed, not how + +def check_disjuncts_inactive_nested(self, transformation): + m = models.makeNestedDisjunctions() + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,)) + + self.assertFalse(m.disjunction.active) + self.assertFalse(m.simpledisjunct.active) + self.assertFalse(m.disjunct[0].active) + self.assertFalse(m.disjunct[1].active) + self.assertFalse(m.disjunct.active) + +def check_deactivated_disjunct_leaves_nested_disjunct_active(self, + transformation): + m = models.makeNestedDisjunctions_FlatDisjuncts() + m.d1.deactivate() + # Specifying 'targets' prevents the HACK_GDP_Disjunct_Reclassifier + # transformation of Disjuncts to Blocks + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m]) + + self.assertFalse(m.d1.active) + self.assertTrue(m.d1.indicator_var.fixed) + self.assertEqual(m.d1.indicator_var.value, 0) + + self.assertFalse(m.d2.active) + self.assertFalse(m.d2.indicator_var.fixed) + + self.assertTrue(m.d3.active) + self.assertFalse(m.d3.indicator_var.fixed) + + self.assertTrue(m.d4.active) + self.assertFalse(m.d4.indicator_var.fixed) + + m = models.makeNestedDisjunctions_NestedDisjuncts() + m.d1.deactivate() + # Specifying 'targets' prevents the HACK_GDP_Disjunct_Reclassifier + # transformation of Disjuncts to Blocks + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m]) + + self.assertFalse(m.d1.active) + self.assertTrue(m.d1.indicator_var.fixed) + self.assertEqual(m.d1.indicator_var.value, 0) + + self.assertFalse(m.d2.active) + self.assertFalse(m.d2.indicator_var.fixed) + + self.assertTrue(m.d1.d3.active) + self.assertFalse(m.d1.d3.indicator_var.fixed) + + self.assertTrue(m.d1.d4.active) + self.assertFalse(m.d1.d4.indicator_var.fixed) + +def check_mappings_between_disjunctions_and_xors(self, transformation): + m = models.makeNestedDisjunctions() + transform = TransformationFactory('gdp.%s' % transformation) + transform.apply_to(m) + + transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) + + disjunctionPairs = [ + (m.disjunction, transBlock.disjunction_xor), + (m.disjunct[1].innerdisjunction[0], + m.disjunct[1].component("_pyomo_gdp_%s_reformulation" % transformation).\ + component("disjunct[1].innerdisjunction_xor")[0]), + (m.simpledisjunct.innerdisjunction, + m.simpledisjunct.component( + "_pyomo_gdp_%s_reformulation" % transformation).component( + "simpledisjunct.innerdisjunction_xor")) + ] + + # check disjunction mappings + for disjunction, xor in disjunctionPairs: + self.assertIs(disjunction.algebraic_constraint(), xor) + self.assertIs(transform.get_src_disjunction(xor), disjunction) + +def check_disjunct_targets_inactive(self, transformation): + m = models.makeNestedDisjunctions() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.simpledisjunct]) + + self.assertTrue(m.disjunct.active) + self.assertTrue(m.disjunct[0].active) + self.assertTrue(m.disjunct[1].active) + self.assertTrue(m.disjunct[1].innerdisjunct.active) + self.assertTrue(m.disjunct[1].innerdisjunct[0].active) + self.assertTrue(m.disjunct[1].innerdisjunct[1].active) + + # We basically just treated simpledisjunct as a block. It + # itself has not been transformed and should not be + # deactivated. We just transformed everything in it. + self.assertTrue(m.simpledisjunct.active) + self.assertFalse(m.simpledisjunct.innerdisjunct0.active) + self.assertFalse(m.simpledisjunct.innerdisjunct1.active) + +def check_disjunct_only_targets_transformed(self, transformation): + m = models.makeNestedDisjunctions() + transform = TransformationFactory('gdp.%s' % transformation) + transform.apply_to( + m, + targets=[m.simpledisjunct]) + + disjBlock = m.simpledisjunct.component("_pyomo_gdp_%s_reformulation" % + transformation).relaxedDisjuncts + self.assertEqual(len(disjBlock), 2) + self.assertIsInstance( + disjBlock[0].component("simpledisjunct.innerdisjunct0.c"), + Constraint) + self.assertIsInstance( + disjBlock[1].component("simpledisjunct.innerdisjunct1.c"), + Constraint) + + # This also relies on the disjuncts being transformed in the same + # order every time. + pairs = [ + (0,0), + (1,1), + ] + for i, j in pairs: + self.assertIs(m.simpledisjunct.component('innerdisjunct%d'%i), + transform.get_src_disjunct(disjBlock[j])) + self.assertIs(disjBlock[j], + m.simpledisjunct.component( + 'innerdisjunct%d'%i).transformation_block()) + +def check_disjunctData_targets_inactive(self, transformation): + m = models.makeNestedDisjunctions() + TransformationFactory('gdp.%s' % transformation).apply_to( + m, + targets=[m.disjunct[1]]) + + self.assertTrue(m.disjunct[0].active) + self.assertTrue(m.disjunct[1].active) + self.assertTrue(m.disjunct.active) + self.assertFalse(m.disjunct[1].innerdisjunct[0].active) + self.assertFalse(m.disjunct[1].innerdisjunct[1].active) + self.assertFalse(m.disjunct[1].innerdisjunct.active) + + self.assertTrue(m.simpledisjunct.active) + self.assertTrue(m.simpledisjunct.innerdisjunct0.active) + self.assertTrue(m.simpledisjunct.innerdisjunct1.active) + +def check_disjunctData_only_targets_transformed(self, transformation): + m = models.makeNestedDisjunctions() + # This is so convoluted, but you can treat a disjunct like a block: + transform = TransformationFactory('gdp.%s' % transformation) + transform.apply_to( + m, + targets=[m.disjunct[1]]) + + disjBlock = m.disjunct[1].component("_pyomo_gdp_%s_reformulation" % + transformation).relaxedDisjuncts + self.assertEqual(len(disjBlock), 2) + self.assertIsInstance( + disjBlock[0].component("disjunct[1].innerdisjunct[0].c"), + Constraint) + self.assertIsInstance( + disjBlock[1].component("disjunct[1].innerdisjunct[1].c"), + Constraint) + + # This also relies on the disjuncts being transformed in the same + # order every time. + pairs = [ + (0,0), + (1,1), + ] + for i, j in pairs: + self.assertIs(transform.get_src_disjunct(disjBlock[j]), + m.disjunct[1].innerdisjunct[i]) + self.assertIs(m.disjunct[1].innerdisjunct[i].transformation_block(), + disjBlock[j]) + +# checks for handling of benign types that could be on disjuncts we're +# transforming + +def check_RangeSet(self, transformation): + m = models.makeDisjunctWithRangeSet() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + self.assertIsInstance(m.d1.s, RangeSet) + +def check_Expression(self, transformation): + m = models.makeDisjunctWithExpression() + TransformationFactory('gdp.%s' % transformation).apply_to(m) + self.assertIsInstance(m.d1.e, Expression) diff --git a/pyomo/gdp/tests/jobshop_large_bigm.lp b/pyomo/gdp/tests/jobshop_large_bigm.lp index 48875e5ac7d..65417e69f9b 100644 --- a/pyomo/gdp/tests/jobshop_large_bigm.lp +++ b/pyomo/gdp/tests/jobshop_large_bigm.lp @@ -41,596 +41,596 @@ c_u_Feas(G)_: +1 t(G) <= -17 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_B_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_B_3)_: +1 NoClash(A_B_3_0)_indicator_var +1 NoClash(A_B_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_B_5)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_B_5)_: +1 NoClash(A_B_5_0)_indicator_var +1 NoClash(A_B_5_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_C_1)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_C_1)_: +1 NoClash(A_C_1_0)_indicator_var +1 NoClash(A_C_1_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_D_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_D_3)_: +1 NoClash(A_D_3_0)_indicator_var +1 NoClash(A_D_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_E_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_E_3)_: +1 NoClash(A_E_3_0)_indicator_var +1 NoClash(A_E_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_E_5)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_E_5)_: +1 NoClash(A_E_5_0)_indicator_var +1 NoClash(A_E_5_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_F_1)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_F_1)_: +1 NoClash(A_F_1_0)_indicator_var +1 NoClash(A_F_1_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_F_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_F_3)_: +1 NoClash(A_F_3_0)_indicator_var +1 NoClash(A_F_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_G_5)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_G_5)_: +1 NoClash(A_G_5_0)_indicator_var +1 NoClash(A_G_5_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_C_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_C_2)_: +1 NoClash(B_C_2_0)_indicator_var +1 NoClash(B_C_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_D_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_D_2)_: +1 NoClash(B_D_2_0)_indicator_var +1 NoClash(B_D_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_D_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_D_3)_: +1 NoClash(B_D_3_0)_indicator_var +1 NoClash(B_D_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_E_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_E_2)_: +1 NoClash(B_E_2_0)_indicator_var +1 NoClash(B_E_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_E_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_E_3)_: +1 NoClash(B_E_3_0)_indicator_var +1 NoClash(B_E_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_E_5)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_E_5)_: +1 NoClash(B_E_5_0)_indicator_var +1 NoClash(B_E_5_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_F_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_F_3)_: +1 NoClash(B_F_3_0)_indicator_var +1 NoClash(B_F_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_G_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_G_2)_: +1 NoClash(B_G_2_0)_indicator_var +1 NoClash(B_G_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_G_5)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_G_5)_: +1 NoClash(B_G_5_0)_indicator_var +1 NoClash(B_G_5_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(C_D_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(C_D_2)_: +1 NoClash(C_D_2_0)_indicator_var +1 NoClash(C_D_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(C_D_4)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(C_D_4)_: +1 NoClash(C_D_4_0)_indicator_var +1 NoClash(C_D_4_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(C_E_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(C_E_2)_: +1 NoClash(C_E_2_0)_indicator_var +1 NoClash(C_E_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(C_F_1)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(C_F_1)_: +1 NoClash(C_F_1_0)_indicator_var +1 NoClash(C_F_1_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(C_F_4)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(C_F_4)_: +1 NoClash(C_F_4_0)_indicator_var +1 NoClash(C_F_4_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(C_G_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(C_G_2)_: +1 NoClash(C_G_2_0)_indicator_var +1 NoClash(C_G_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(C_G_4)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(C_G_4)_: +1 NoClash(C_G_4_0)_indicator_var +1 NoClash(C_G_4_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(D_E_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(D_E_2)_: +1 NoClash(D_E_2_0)_indicator_var +1 NoClash(D_E_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(D_E_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(D_E_3)_: +1 NoClash(D_E_3_0)_indicator_var +1 NoClash(D_E_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(D_F_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(D_F_3)_: +1 NoClash(D_F_3_0)_indicator_var +1 NoClash(D_F_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(D_F_4)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(D_F_4)_: +1 NoClash(D_F_4_0)_indicator_var +1 NoClash(D_F_4_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(D_G_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(D_G_2)_: +1 NoClash(D_G_2_0)_indicator_var +1 NoClash(D_G_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(D_G_4)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(D_G_4)_: +1 NoClash(D_G_4_0)_indicator_var +1 NoClash(D_G_4_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(E_F_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(E_F_3)_: +1 NoClash(E_F_3_0)_indicator_var +1 NoClash(E_F_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(E_G_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(E_G_2)_: +1 NoClash(E_G_2_0)_indicator_var +1 NoClash(E_G_2_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(E_G_5)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(E_G_5)_: +1 NoClash(E_G_5_0)_indicator_var +1 NoClash(E_G_5_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(F_G_4)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(F_G_4)_: +1 NoClash(F_G_4_0)_indicator_var +1 NoClash(F_G_4_1)_indicator_var = 1 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +96 NoClash(A_B_3_0)_indicator_var -1 t(A) +1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +97 NoClash(A_B_3_1)_indicator_var +1 t(A) -1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(2)_NoClash(A_B_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)_NoClash(A_B_5_0)_c(ub)_: +94 NoClash(A_B_5_0)_indicator_var -1 t(A) +1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(3)_NoClash(A_B_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)_NoClash(A_B_5_1)_c(ub)_: +95 NoClash(A_B_5_1)_indicator_var +1 t(A) -1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(4)_NoClash(A_C_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)_NoClash(A_C_1_0)_c(ub)_: +98 NoClash(A_C_1_0)_indicator_var -1 t(A) +1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(5)_NoClash(A_C_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)_NoClash(A_C_1_1)_c(ub)_: +95 NoClash(A_C_1_1)_indicator_var +1 t(A) -1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(6)_NoClash(A_D_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(6)_NoClash(A_D_3_0)_c(ub)_: +102 NoClash(A_D_3_0)_indicator_var -1 t(A) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(7)_NoClash(A_D_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(7)_NoClash(A_D_3_1)_c(ub)_: +92 NoClash(A_D_3_1)_indicator_var +1 t(A) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(8)_NoClash(A_E_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(8)_NoClash(A_E_3_0)_c(ub)_: +99 NoClash(A_E_3_0)_indicator_var -1 t(A) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(9)_NoClash(A_E_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(9)_NoClash(A_E_3_1)_c(ub)_: +96 NoClash(A_E_3_1)_indicator_var +1 t(A) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(10)_NoClash(A_E_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(10)_NoClash(A_E_5_0)_c(ub)_: +96 NoClash(A_E_5_0)_indicator_var -1 t(A) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(11)_NoClash(A_E_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(11)_NoClash(A_E_5_1)_c(ub)_: +92 NoClash(A_E_5_1)_indicator_var +1 t(A) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(12)_NoClash(A_F_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(12)_NoClash(A_F_1_0)_c(ub)_: +94 NoClash(A_F_1_0)_indicator_var -1 t(A) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(13)_NoClash(A_F_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(13)_NoClash(A_F_1_1)_c(ub)_: +95 NoClash(A_F_1_1)_indicator_var +1 t(A) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(14)_NoClash(A_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(14)_NoClash(A_F_3_0)_c(ub)_: +96 NoClash(A_F_3_0)_indicator_var -1 t(A) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(15)_NoClash(A_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(15)_NoClash(A_F_3_1)_c(ub)_: +98 NoClash(A_F_3_1)_indicator_var +1 t(A) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(16)_NoClash(A_G_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(16)_NoClash(A_G_5_0)_c(ub)_: +101 NoClash(A_G_5_0)_indicator_var -1 t(A) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(17)_NoClash(A_G_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(17)_NoClash(A_G_5_1)_c(ub)_: +89 NoClash(A_G_5_1)_indicator_var +1 t(A) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(18)_NoClash(B_C_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(18)_NoClash(B_C_2_0)_c(ub)_: +101 NoClash(B_C_2_0)_indicator_var -1 t(B) +1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(19)_NoClash(B_C_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(19)_NoClash(B_C_2_1)_c(ub)_: +89 NoClash(B_C_2_1)_indicator_var +1 t(B) -1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(20)_NoClash(B_D_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(20)_NoClash(B_D_2_0)_c(ub)_: +100 NoClash(B_D_2_0)_indicator_var -1 t(B) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(21)_NoClash(B_D_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(21)_NoClash(B_D_2_1)_c(ub)_: +95 NoClash(B_D_2_1)_indicator_var +1 t(B) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(22)_NoClash(B_D_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(22)_NoClash(B_D_3_0)_c(ub)_: +102 NoClash(B_D_3_0)_indicator_var -1 t(B) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(23)_NoClash(B_D_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(23)_NoClash(B_D_3_1)_c(ub)_: +91 NoClash(B_D_3_1)_indicator_var +1 t(B) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(24)_NoClash(B_E_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(24)_NoClash(B_E_2_0)_c(ub)_: +96 NoClash(B_E_2_0)_indicator_var -1 t(B) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(25)_NoClash(B_E_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(25)_NoClash(B_E_2_1)_c(ub)_: +95 NoClash(B_E_2_1)_indicator_var +1 t(B) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(26)_NoClash(B_E_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(26)_NoClash(B_E_3_0)_c(ub)_: +99 NoClash(B_E_3_0)_indicator_var -1 t(B) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(27)_NoClash(B_E_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(27)_NoClash(B_E_3_1)_c(ub)_: +95 NoClash(B_E_3_1)_indicator_var +1 t(B) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(28)_NoClash(B_E_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(28)_NoClash(B_E_5_0)_c(ub)_: +97 NoClash(B_E_5_0)_indicator_var -1 t(B) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(29)_NoClash(B_E_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(29)_NoClash(B_E_5_1)_c(ub)_: +92 NoClash(B_E_5_1)_indicator_var +1 t(B) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(30)_NoClash(B_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(30)_NoClash(B_F_3_0)_c(ub)_: +96 NoClash(B_F_3_0)_indicator_var -1 t(B) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(31)_NoClash(B_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(31)_NoClash(B_F_3_1)_c(ub)_: +97 NoClash(B_F_3_1)_indicator_var +1 t(B) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(32)_NoClash(B_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(32)_NoClash(B_G_2_0)_c(ub)_: +100 NoClash(B_G_2_0)_indicator_var -1 t(B) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(33)_NoClash(B_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(33)_NoClash(B_G_2_1)_c(ub)_: +95 NoClash(B_G_2_1)_indicator_var +1 t(B) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(34)_NoClash(B_G_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(34)_NoClash(B_G_5_0)_c(ub)_: +102 NoClash(B_G_5_0)_indicator_var -1 t(B) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(35)_NoClash(B_G_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(35)_NoClash(B_G_5_1)_c(ub)_: +89 NoClash(B_G_5_1)_indicator_var +1 t(B) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(36)_NoClash(C_D_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(36)_NoClash(C_D_2_0)_c(ub)_: +94 NoClash(C_D_2_0)_indicator_var -1 t(C) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(37)_NoClash(C_D_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(37)_NoClash(C_D_2_1)_c(ub)_: +101 NoClash(C_D_2_1)_indicator_var +1 t(C) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(38)_NoClash(C_D_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(38)_NoClash(C_D_4_0)_c(ub)_: +97 NoClash(C_D_4_0)_indicator_var -1 t(C) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(39)_NoClash(C_D_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(39)_NoClash(C_D_4_1)_c(ub)_: +94 NoClash(C_D_4_1)_indicator_var +1 t(C) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(40)_NoClash(C_E_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(40)_NoClash(C_E_2_0)_c(ub)_: +90 NoClash(C_E_2_0)_indicator_var -1 t(C) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(41)_NoClash(C_E_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(41)_NoClash(C_E_2_1)_c(ub)_: +101 NoClash(C_E_2_1)_indicator_var +1 t(C) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(42)_NoClash(C_F_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(42)_NoClash(C_F_1_0)_c(ub)_: +94 NoClash(C_F_1_0)_indicator_var -1 t(C) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(43)_NoClash(C_F_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(43)_NoClash(C_F_1_1)_c(ub)_: +98 NoClash(C_F_1_1)_indicator_var +1 t(C) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(44)_NoClash(C_F_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(44)_NoClash(C_F_4_0)_c(ub)_: +97 NoClash(C_F_4_0)_indicator_var -1 t(C) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(45)_NoClash(C_F_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(45)_NoClash(C_F_4_1)_c(ub)_: +100 NoClash(C_F_4_1)_indicator_var +1 t(C) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(46)_NoClash(C_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(46)_NoClash(C_G_2_0)_c(ub)_: +94 NoClash(C_G_2_0)_indicator_var -1 t(C) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(47)_NoClash(C_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(47)_NoClash(C_G_2_1)_c(ub)_: +101 NoClash(C_G_2_1)_indicator_var +1 t(C) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(48)_NoClash(C_G_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(48)_NoClash(C_G_4_0)_c(ub)_: +96 NoClash(C_G_4_0)_indicator_var -1 t(C) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(49)_NoClash(C_G_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(49)_NoClash(C_G_4_1)_c(ub)_: +99 NoClash(C_G_4_1)_indicator_var +1 t(C) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(50)_NoClash(D_E_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(50)_NoClash(D_E_2_0)_c(ub)_: +96 NoClash(D_E_2_0)_indicator_var -1 t(D) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(51)_NoClash(D_E_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(51)_NoClash(D_E_2_1)_c(ub)_: +100 NoClash(D_E_2_1)_indicator_var +1 t(D) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(52)_NoClash(D_E_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(52)_NoClash(D_E_3_0)_c(ub)_: +94 NoClash(D_E_3_0)_indicator_var -1 t(D) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(53)_NoClash(D_E_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(53)_NoClash(D_E_3_1)_c(ub)_: +101 NoClash(D_E_3_1)_indicator_var +1 t(D) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(54)_NoClash(D_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(54)_NoClash(D_F_3_0)_c(ub)_: +91 NoClash(D_F_3_0)_indicator_var -1 t(D) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(55)_NoClash(D_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(55)_NoClash(D_F_3_1)_c(ub)_: +103 NoClash(D_F_3_1)_indicator_var +1 t(D) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(56)_NoClash(D_F_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(56)_NoClash(D_F_4_0)_c(ub)_: +93 NoClash(D_F_4_0)_indicator_var -1 t(D) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(57)_NoClash(D_F_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(57)_NoClash(D_F_4_1)_c(ub)_: +99 NoClash(D_F_4_1)_indicator_var +1 t(D) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(58)_NoClash(D_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(58)_NoClash(D_G_2_0)_c(ub)_: +100 NoClash(D_G_2_0)_indicator_var -1 t(D) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(59)_NoClash(D_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(59)_NoClash(D_G_2_1)_c(ub)_: +100 NoClash(D_G_2_1)_indicator_var +1 t(D) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(60)_NoClash(D_G_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(60)_NoClash(D_G_4_0)_c(ub)_: +92 NoClash(D_G_4_0)_indicator_var -1 t(D) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(61)_NoClash(D_G_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(61)_NoClash(D_G_4_1)_c(ub)_: +98 NoClash(D_G_4_1)_indicator_var +1 t(D) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(62)_NoClash(E_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(62)_NoClash(E_F_3_0)_c(ub)_: +95 NoClash(E_F_3_0)_indicator_var -1 t(E) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(63)_NoClash(E_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(63)_NoClash(E_F_3_1)_c(ub)_: +100 NoClash(E_F_3_1)_indicator_var +1 t(E) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(64)_NoClash(E_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(64)_NoClash(E_G_2_0)_c(ub)_: +100 NoClash(E_G_2_0)_indicator_var -1 t(E) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(65)_NoClash(E_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(65)_NoClash(E_G_2_1)_c(ub)_: +96 NoClash(E_G_2_1)_indicator_var +1 t(E) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(66)_NoClash(E_G_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(66)_NoClash(E_G_5_0)_c(ub)_: +99 NoClash(E_G_5_0)_indicator_var -1 t(E) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(67)_NoClash(E_G_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(67)_NoClash(E_G_5_1)_c(ub)_: +91 NoClash(E_G_5_1)_indicator_var +1 t(E) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(68)_NoClash(F_G_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(68)_NoClash(F_G_4_0)_c(ub)_: +98 NoClash(F_G_4_0)_indicator_var -1 t(F) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(69)_NoClash(F_G_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(69)_NoClash(F_G_4_1)_c(ub)_: +98 NoClash(F_G_4_1)_indicator_var +1 t(F) -1 t(G) diff --git a/pyomo/gdp/tests/jobshop_large_chull.lp b/pyomo/gdp/tests/jobshop_large_chull.lp deleted file mode 100644 index 2b13650e3a4..00000000000 --- a/pyomo/gdp/tests/jobshop_large_chull.lp +++ /dev/null @@ -1,2048 +0,0 @@ -\* Source Pyomo model name=unknown *\ - -min -makespan: -+1 ms - -s.t. - -c_u_Feas(A)_: --1 ms -+1 t(A) -<= -10 - -c_u_Feas(B)_: --1 ms -+1 t(B) -<= -10 - -c_u_Feas(C)_: --1 ms -+1 t(C) -<= -15 - -c_u_Feas(D)_: --1 ms -+1 t(D) -<= -14 - -c_u_Feas(E)_: --1 ms -+1 t(E) -<= -12 - -c_u_Feas(F)_: --1 ms -+1 t(F) -<= -14 - -c_u_Feas(G)_: --1 ms -+1 t(G) -<= -17 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_B_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_B_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_B_5_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_B_5_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_C_1_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_C_1_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_D_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_D_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_E_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_E_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_E_5_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_E_5_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_F_1_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_F_1_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_F_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_F_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_G_5_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_G_5_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_C_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_C_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_D_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_D_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_D_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_D_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_E_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_E_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_E_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_E_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_E_5_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_E_5_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_F_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_F_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_G_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_G_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_G_5_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_G_5_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_D_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_D_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_D_4_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_D_4_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_E_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_E_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_F_1_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_F_1_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_F_4_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_F_4_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_G_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_G_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_G_4_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(C_G_4_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_E_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_E_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_E_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_E_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_F_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_F_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_F_4_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_F_4_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_G_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_G_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_G_4_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(D_G_4_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(D) -+1 t(D) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(E_F_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(E_F_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(E_G_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(E_G_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(E_G_5_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(E_G_5_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(E) -+1 t(E) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(F_G_4_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(G) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(G) -+1 t(G) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(F_G_4_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(F) -+1 t(F) -= 0 - -c_e__gdp_chull_relaxation_disj_xor(A_B_3)_: -+1 NoClash(A_B_3_0)_indicator_var -+1 NoClash(A_B_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_B_5)_: -+1 NoClash(A_B_5_0)_indicator_var -+1 NoClash(A_B_5_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_C_1)_: -+1 NoClash(A_C_1_0)_indicator_var -+1 NoClash(A_C_1_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_D_3)_: -+1 NoClash(A_D_3_0)_indicator_var -+1 NoClash(A_D_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_E_3)_: -+1 NoClash(A_E_3_0)_indicator_var -+1 NoClash(A_E_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_E_5)_: -+1 NoClash(A_E_5_0)_indicator_var -+1 NoClash(A_E_5_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_F_1)_: -+1 NoClash(A_F_1_0)_indicator_var -+1 NoClash(A_F_1_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_F_3)_: -+1 NoClash(A_F_3_0)_indicator_var -+1 NoClash(A_F_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_G_5)_: -+1 NoClash(A_G_5_0)_indicator_var -+1 NoClash(A_G_5_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_C_2)_: -+1 NoClash(B_C_2_0)_indicator_var -+1 NoClash(B_C_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_D_2)_: -+1 NoClash(B_D_2_0)_indicator_var -+1 NoClash(B_D_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_D_3)_: -+1 NoClash(B_D_3_0)_indicator_var -+1 NoClash(B_D_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_E_2)_: -+1 NoClash(B_E_2_0)_indicator_var -+1 NoClash(B_E_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_E_3)_: -+1 NoClash(B_E_3_0)_indicator_var -+1 NoClash(B_E_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_E_5)_: -+1 NoClash(B_E_5_0)_indicator_var -+1 NoClash(B_E_5_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_F_3)_: -+1 NoClash(B_F_3_0)_indicator_var -+1 NoClash(B_F_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_G_2)_: -+1 NoClash(B_G_2_0)_indicator_var -+1 NoClash(B_G_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_G_5)_: -+1 NoClash(B_G_5_0)_indicator_var -+1 NoClash(B_G_5_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(C_D_2)_: -+1 NoClash(C_D_2_0)_indicator_var -+1 NoClash(C_D_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(C_D_4)_: -+1 NoClash(C_D_4_0)_indicator_var -+1 NoClash(C_D_4_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(C_E_2)_: -+1 NoClash(C_E_2_0)_indicator_var -+1 NoClash(C_E_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(C_F_1)_: -+1 NoClash(C_F_1_0)_indicator_var -+1 NoClash(C_F_1_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(C_F_4)_: -+1 NoClash(C_F_4_0)_indicator_var -+1 NoClash(C_F_4_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(C_G_2)_: -+1 NoClash(C_G_2_0)_indicator_var -+1 NoClash(C_G_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(C_G_4)_: -+1 NoClash(C_G_4_0)_indicator_var -+1 NoClash(C_G_4_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(D_E_2)_: -+1 NoClash(D_E_2_0)_indicator_var -+1 NoClash(D_E_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(D_E_3)_: -+1 NoClash(D_E_3_0)_indicator_var -+1 NoClash(D_E_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(D_F_3)_: -+1 NoClash(D_F_3_0)_indicator_var -+1 NoClash(D_F_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(D_F_4)_: -+1 NoClash(D_F_4_0)_indicator_var -+1 NoClash(D_F_4_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(D_G_2)_: -+1 NoClash(D_G_2_0)_indicator_var -+1 NoClash(D_G_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(D_G_4)_: -+1 NoClash(D_G_4_0)_indicator_var -+1 NoClash(D_G_4_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(E_F_3)_: -+1 NoClash(E_F_3_0)_indicator_var -+1 NoClash(E_F_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(E_G_2)_: -+1 NoClash(E_G_2_0)_indicator_var -+1 NoClash(E_G_2_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(E_G_5)_: -+1 NoClash(E_G_5_0)_indicator_var -+1 NoClash(E_G_5_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(F_G_4)_: -+1 NoClash(F_G_4_0)_indicator_var -+1 NoClash(F_G_4_1)_indicator_var -= 1 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B)_bounds(ub)_: --92 NoClash(A_B_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A)_bounds(ub)_: --92 NoClash(A_B_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: -+4 NoClash(A_B_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B)_bounds(ub)_: --92 NoClash(A_B_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A)_bounds(ub)_: --92 NoClash(A_B_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: -+5 NoClash(A_B_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(B)_bounds(ub)_: --92 NoClash(A_B_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A)_bounds(ub)_: --92 NoClash(A_B_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_NoClash(A_B_5_0)_c(ub)_: -+2 NoClash(A_B_5_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(B)_bounds(ub)_: --92 NoClash(A_B_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A)_bounds(ub)_: --92 NoClash(A_B_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_NoClash(A_B_5_1)_c(ub)_: -+3 NoClash(A_B_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C)_bounds(ub)_: --92 NoClash(A_C_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(A)_bounds(ub)_: --92 NoClash(A_C_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_NoClash(A_C_1_0)_c(ub)_: -+6 NoClash(A_C_1_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C)_bounds(ub)_: --92 NoClash(A_C_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(A)_bounds(ub)_: --92 NoClash(A_C_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_NoClash(A_C_1_1)_c(ub)_: -+3 NoClash(A_C_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(D)_bounds(ub)_: --92 NoClash(A_D_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(A)_bounds(ub)_: --92 NoClash(A_D_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_NoClash(A_D_3_0)_c(ub)_: -+10 NoClash(A_D_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(D)_bounds(ub)_: --92 NoClash(A_D_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(A)_bounds(ub)_: --92 NoClash(A_D_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_NoClash(A_D_3_1)_c(ub)_: -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(E)_bounds(ub)_: --92 NoClash(A_E_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(A)_bounds(ub)_: --92 NoClash(A_E_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_NoClash(A_E_3_0)_c(ub)_: -+7 NoClash(A_E_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(E)_bounds(ub)_: --92 NoClash(A_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(A)_bounds(ub)_: --92 NoClash(A_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_NoClash(A_E_3_1)_c(ub)_: -+4 NoClash(A_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(E)_bounds(ub)_: --92 NoClash(A_E_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(A)_bounds(ub)_: --92 NoClash(A_E_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_NoClash(A_E_5_0)_c(ub)_: -+4 NoClash(A_E_5_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(E)_bounds(ub)_: --92 NoClash(A_E_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(A)_bounds(ub)_: --92 NoClash(A_E_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_NoClash(A_E_5_1)_c(ub)_: -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(F)_bounds(ub)_: --92 NoClash(A_F_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(A)_bounds(ub)_: --92 NoClash(A_F_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_NoClash(A_F_1_0)_c(ub)_: -+2 NoClash(A_F_1_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(F)_bounds(ub)_: --92 NoClash(A_F_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(A)_bounds(ub)_: --92 NoClash(A_F_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_NoClash(A_F_1_1)_c(ub)_: -+3 NoClash(A_F_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(F)_bounds(ub)_: --92 NoClash(A_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(A)_bounds(ub)_: --92 NoClash(A_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_NoClash(A_F_3_0)_c(ub)_: -+4 NoClash(A_F_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(F)_bounds(ub)_: --92 NoClash(A_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(A)_bounds(ub)_: --92 NoClash(A_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_NoClash(A_F_3_1)_c(ub)_: -+6 NoClash(A_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(G)_bounds(ub)_: --92 NoClash(A_G_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(A)_bounds(ub)_: --92 NoClash(A_G_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_NoClash(A_G_5_0)_c(ub)_: -+9 NoClash(A_G_5_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(G)_bounds(ub)_: --92 NoClash(A_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(A)_bounds(ub)_: --92 NoClash(A_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_NoClash(A_G_5_1)_c(ub)_: --3 NoClash(A_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(C)_bounds(ub)_: --92 NoClash(B_C_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(B)_bounds(ub)_: --92 NoClash(B_C_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_NoClash(B_C_2_0)_c(ub)_: -+9 NoClash(B_C_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(C)_bounds(ub)_: --92 NoClash(B_C_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(B)_bounds(ub)_: --92 NoClash(B_C_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_NoClash(B_C_2_1)_c(ub)_: --3 NoClash(B_C_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(D)_bounds(ub)_: --92 NoClash(B_D_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(B)_bounds(ub)_: --92 NoClash(B_D_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_NoClash(B_D_2_0)_c(ub)_: -+8 NoClash(B_D_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(D)_bounds(ub)_: --92 NoClash(B_D_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(B)_bounds(ub)_: --92 NoClash(B_D_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_NoClash(B_D_2_1)_c(ub)_: -+3 NoClash(B_D_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(D)_bounds(ub)_: --92 NoClash(B_D_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(B)_bounds(ub)_: --92 NoClash(B_D_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_NoClash(B_D_3_0)_c(ub)_: -+10 NoClash(B_D_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(D)_bounds(ub)_: --92 NoClash(B_D_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(B)_bounds(ub)_: --92 NoClash(B_D_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_NoClash(B_D_3_1)_c(ub)_: --1 NoClash(B_D_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(E)_bounds(ub)_: --92 NoClash(B_E_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(B)_bounds(ub)_: --92 NoClash(B_E_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_NoClash(B_E_2_0)_c(ub)_: -+4 NoClash(B_E_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(E)_bounds(ub)_: --92 NoClash(B_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(B)_bounds(ub)_: --92 NoClash(B_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_NoClash(B_E_2_1)_c(ub)_: -+3 NoClash(B_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(E)_bounds(ub)_: --92 NoClash(B_E_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(B)_bounds(ub)_: --92 NoClash(B_E_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_NoClash(B_E_3_0)_c(ub)_: -+7 NoClash(B_E_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(E)_bounds(ub)_: --92 NoClash(B_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(B)_bounds(ub)_: --92 NoClash(B_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_NoClash(B_E_3_1)_c(ub)_: -+3 NoClash(B_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(E)_bounds(ub)_: --92 NoClash(B_E_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(B)_bounds(ub)_: --92 NoClash(B_E_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_NoClash(B_E_5_0)_c(ub)_: -+5 NoClash(B_E_5_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(E)_bounds(ub)_: --92 NoClash(B_E_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(B)_bounds(ub)_: --92 NoClash(B_E_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_NoClash(B_E_5_1)_c(ub)_: -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(F)_bounds(ub)_: --92 NoClash(B_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(B)_bounds(ub)_: --92 NoClash(B_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_NoClash(B_F_3_0)_c(ub)_: -+4 NoClash(B_F_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(F)_bounds(ub)_: --92 NoClash(B_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(B)_bounds(ub)_: --92 NoClash(B_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_NoClash(B_F_3_1)_c(ub)_: -+5 NoClash(B_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(G)_bounds(ub)_: --92 NoClash(B_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(B)_bounds(ub)_: --92 NoClash(B_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_NoClash(B_G_2_0)_c(ub)_: -+8 NoClash(B_G_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(G)_bounds(ub)_: --92 NoClash(B_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(B)_bounds(ub)_: --92 NoClash(B_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_NoClash(B_G_2_1)_c(ub)_: -+3 NoClash(B_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(G)_bounds(ub)_: --92 NoClash(B_G_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(B)_bounds(ub)_: --92 NoClash(B_G_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_NoClash(B_G_5_0)_c(ub)_: -+10 NoClash(B_G_5_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(G)_bounds(ub)_: --92 NoClash(B_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(B)_bounds(ub)_: --92 NoClash(B_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_NoClash(B_G_5_1)_c(ub)_: --3 NoClash(B_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(D)_bounds(ub)_: --92 NoClash(C_D_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(C)_bounds(ub)_: --92 NoClash(C_D_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_NoClash(C_D_2_0)_c(ub)_: -+2 NoClash(C_D_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(C) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(D)_bounds(ub)_: --92 NoClash(C_D_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(C)_bounds(ub)_: --92 NoClash(C_D_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_NoClash(C_D_2_1)_c(ub)_: -+9 NoClash(C_D_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(D)_bounds(ub)_: --92 NoClash(C_D_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(C)_bounds(ub)_: --92 NoClash(C_D_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_NoClash(C_D_4_0)_c(ub)_: -+5 NoClash(C_D_4_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(C) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(D)_bounds(ub)_: --92 NoClash(C_D_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(C)_bounds(ub)_: --92 NoClash(C_D_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_NoClash(C_D_4_1)_c(ub)_: -+2 NoClash(C_D_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(E)_bounds(ub)_: --92 NoClash(C_E_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(C)_bounds(ub)_: --92 NoClash(C_E_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_NoClash(C_E_2_0)_c(ub)_: --2 NoClash(C_E_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(C) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(E)_bounds(ub)_: --92 NoClash(C_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(C)_bounds(ub)_: --92 NoClash(C_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_NoClash(C_E_2_1)_c(ub)_: -+9 NoClash(C_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(F)_bounds(ub)_: --92 NoClash(C_F_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(C)_bounds(ub)_: --92 NoClash(C_F_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_NoClash(C_F_1_0)_c(ub)_: -+2 NoClash(C_F_1_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(C) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(F)_bounds(ub)_: --92 NoClash(C_F_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(C)_bounds(ub)_: --92 NoClash(C_F_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_NoClash(C_F_1_1)_c(ub)_: -+6 NoClash(C_F_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(F)_bounds(ub)_: --92 NoClash(C_F_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(C)_bounds(ub)_: --92 NoClash(C_F_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_NoClash(C_F_4_0)_c(ub)_: -+5 NoClash(C_F_4_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(C) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(F)_bounds(ub)_: --92 NoClash(C_F_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(C)_bounds(ub)_: --92 NoClash(C_F_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_NoClash(C_F_4_1)_c(ub)_: -+8 NoClash(C_F_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(G)_bounds(ub)_: --92 NoClash(C_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(C)_bounds(ub)_: --92 NoClash(C_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_NoClash(C_G_2_0)_c(ub)_: -+2 NoClash(C_G_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(C) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(G)_bounds(ub)_: --92 NoClash(C_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(C)_bounds(ub)_: --92 NoClash(C_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_NoClash(C_G_2_1)_c(ub)_: -+9 NoClash(C_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(G)_bounds(ub)_: --92 NoClash(C_G_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(C)_bounds(ub)_: --92 NoClash(C_G_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_NoClash(C_G_4_0)_c(ub)_: -+4 NoClash(C_G_4_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(C) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(G)_bounds(ub)_: --92 NoClash(C_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(C)_bounds(ub)_: --92 NoClash(C_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_NoClash(C_G_4_1)_c(ub)_: -+7 NoClash(C_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(E)_bounds(ub)_: --92 NoClash(D_E_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(D)_bounds(ub)_: --92 NoClash(D_E_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_NoClash(D_E_2_0)_c(ub)_: -+4 NoClash(D_E_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(D) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(E)_bounds(ub)_: --92 NoClash(D_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(D)_bounds(ub)_: --92 NoClash(D_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_NoClash(D_E_2_1)_c(ub)_: -+8 NoClash(D_E_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(E)_bounds(ub)_: --92 NoClash(D_E_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(D)_bounds(ub)_: --92 NoClash(D_E_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_NoClash(D_E_3_0)_c(ub)_: -+2 NoClash(D_E_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(D) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(E)_bounds(ub)_: --92 NoClash(D_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(D)_bounds(ub)_: --92 NoClash(D_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_NoClash(D_E_3_1)_c(ub)_: -+9 NoClash(D_E_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(F)_bounds(ub)_: --92 NoClash(D_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(D)_bounds(ub)_: --92 NoClash(D_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_NoClash(D_F_3_0)_c(ub)_: --1 NoClash(D_F_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(D) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(F)_bounds(ub)_: --92 NoClash(D_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(D)_bounds(ub)_: --92 NoClash(D_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_NoClash(D_F_3_1)_c(ub)_: -+11 NoClash(D_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(F)_bounds(ub)_: --92 NoClash(D_F_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(D)_bounds(ub)_: --92 NoClash(D_F_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_NoClash(D_F_4_0)_c(ub)_: -+1 NoClash(D_F_4_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(D) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(F)_bounds(ub)_: --92 NoClash(D_F_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(D)_bounds(ub)_: --92 NoClash(D_F_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_NoClash(D_F_4_1)_c(ub)_: -+7 NoClash(D_F_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(G)_bounds(ub)_: --92 NoClash(D_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(D)_bounds(ub)_: --92 NoClash(D_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_NoClash(D_G_2_0)_c(ub)_: -+8 NoClash(D_G_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(D) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(G)_bounds(ub)_: --92 NoClash(D_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(D)_bounds(ub)_: --92 NoClash(D_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_NoClash(D_G_2_1)_c(ub)_: -+8 NoClash(D_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(G)_bounds(ub)_: --92 NoClash(D_G_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(D)_bounds(ub)_: --92 NoClash(D_G_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_NoClash(D_G_4_0)_c(ub)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(D) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(G)_bounds(ub)_: --92 NoClash(D_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(D)_bounds(ub)_: --92 NoClash(D_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(D) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_NoClash(D_G_4_1)_c(ub)_: -+6 NoClash(D_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(D) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(F)_bounds(ub)_: --92 NoClash(E_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(E)_bounds(ub)_: --92 NoClash(E_F_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_NoClash(E_F_3_0)_c(ub)_: -+3 NoClash(E_F_3_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(E) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(F)_bounds(ub)_: --92 NoClash(E_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(E)_bounds(ub)_: --92 NoClash(E_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_NoClash(E_F_3_1)_c(ub)_: -+8 NoClash(E_F_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(G)_bounds(ub)_: --92 NoClash(E_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(E)_bounds(ub)_: --92 NoClash(E_G_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_NoClash(E_G_2_0)_c(ub)_: -+8 NoClash(E_G_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(E) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(G)_bounds(ub)_: --92 NoClash(E_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(E)_bounds(ub)_: --92 NoClash(E_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_NoClash(E_G_2_1)_c(ub)_: -+4 NoClash(E_G_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(G)_bounds(ub)_: --92 NoClash(E_G_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(E)_bounds(ub)_: --92 NoClash(E_G_5_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_NoClash(E_G_5_0)_c(ub)_: -+7 NoClash(E_G_5_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(E) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(G)_bounds(ub)_: --92 NoClash(E_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(E)_bounds(ub)_: --92 NoClash(E_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(E) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_NoClash(E_G_5_1)_c(ub)_: --1 NoClash(E_G_5_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(E) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(G)_bounds(ub)_: --92 NoClash(F_G_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(F)_bounds(ub)_: --92 NoClash(F_G_4_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_NoClash(F_G_4_0)_c(ub)_: -+6 NoClash(F_G_4_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(F) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(G)_bounds(ub)_: --92 NoClash(F_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(G) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(F)_bounds(ub)_: --92 NoClash(F_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(F) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_NoClash(F_G_4_1)_c(ub)_: -+6 NoClash(F_G_4_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(F) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(G) -<= 0 - -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - -bounds - -inf <= ms <= +inf - 0 <= t(A) <= 92 - 0 <= t(B) <= 92 - 0 <= t(C) <= 92 - 0 <= t(D) <= 92 - 0 <= t(E) <= 92 - 0 <= t(F) <= 92 - 0 <= t(G) <= 92 - 0 <= NoClash(A_B_3_0)_indicator_var <= 1 - 0 <= NoClash(A_B_3_1)_indicator_var <= 1 - 0 <= NoClash(A_B_5_0)_indicator_var <= 1 - 0 <= NoClash(A_B_5_1)_indicator_var <= 1 - 0 <= NoClash(A_C_1_0)_indicator_var <= 1 - 0 <= NoClash(A_C_1_1)_indicator_var <= 1 - 0 <= NoClash(A_D_3_0)_indicator_var <= 1 - 0 <= NoClash(A_D_3_1)_indicator_var <= 1 - 0 <= NoClash(A_E_3_0)_indicator_var <= 1 - 0 <= NoClash(A_E_3_1)_indicator_var <= 1 - 0 <= NoClash(A_E_5_0)_indicator_var <= 1 - 0 <= NoClash(A_E_5_1)_indicator_var <= 1 - 0 <= NoClash(A_F_1_0)_indicator_var <= 1 - 0 <= NoClash(A_F_1_1)_indicator_var <= 1 - 0 <= NoClash(A_F_3_0)_indicator_var <= 1 - 0 <= NoClash(A_F_3_1)_indicator_var <= 1 - 0 <= NoClash(A_G_5_0)_indicator_var <= 1 - 0 <= NoClash(A_G_5_1)_indicator_var <= 1 - 0 <= NoClash(B_C_2_0)_indicator_var <= 1 - 0 <= NoClash(B_C_2_1)_indicator_var <= 1 - 0 <= NoClash(B_D_2_0)_indicator_var <= 1 - 0 <= NoClash(B_D_2_1)_indicator_var <= 1 - 0 <= NoClash(B_D_3_0)_indicator_var <= 1 - 0 <= NoClash(B_D_3_1)_indicator_var <= 1 - 0 <= NoClash(B_E_2_0)_indicator_var <= 1 - 0 <= NoClash(B_E_2_1)_indicator_var <= 1 - 0 <= NoClash(B_E_3_0)_indicator_var <= 1 - 0 <= NoClash(B_E_3_1)_indicator_var <= 1 - 0 <= NoClash(B_E_5_0)_indicator_var <= 1 - 0 <= NoClash(B_E_5_1)_indicator_var <= 1 - 0 <= NoClash(B_F_3_0)_indicator_var <= 1 - 0 <= NoClash(B_F_3_1)_indicator_var <= 1 - 0 <= NoClash(B_G_2_0)_indicator_var <= 1 - 0 <= NoClash(B_G_2_1)_indicator_var <= 1 - 0 <= NoClash(B_G_5_0)_indicator_var <= 1 - 0 <= NoClash(B_G_5_1)_indicator_var <= 1 - 0 <= NoClash(C_D_2_0)_indicator_var <= 1 - 0 <= NoClash(C_D_2_1)_indicator_var <= 1 - 0 <= NoClash(C_D_4_0)_indicator_var <= 1 - 0 <= NoClash(C_D_4_1)_indicator_var <= 1 - 0 <= NoClash(C_E_2_0)_indicator_var <= 1 - 0 <= NoClash(C_E_2_1)_indicator_var <= 1 - 0 <= NoClash(C_F_1_0)_indicator_var <= 1 - 0 <= NoClash(C_F_1_1)_indicator_var <= 1 - 0 <= NoClash(C_F_4_0)_indicator_var <= 1 - 0 <= NoClash(C_F_4_1)_indicator_var <= 1 - 0 <= NoClash(C_G_2_0)_indicator_var <= 1 - 0 <= NoClash(C_G_2_1)_indicator_var <= 1 - 0 <= NoClash(C_G_4_0)_indicator_var <= 1 - 0 <= NoClash(C_G_4_1)_indicator_var <= 1 - 0 <= NoClash(D_E_2_0)_indicator_var <= 1 - 0 <= NoClash(D_E_2_1)_indicator_var <= 1 - 0 <= NoClash(D_E_3_0)_indicator_var <= 1 - 0 <= NoClash(D_E_3_1)_indicator_var <= 1 - 0 <= NoClash(D_F_3_0)_indicator_var <= 1 - 0 <= NoClash(D_F_3_1)_indicator_var <= 1 - 0 <= NoClash(D_F_4_0)_indicator_var <= 1 - 0 <= NoClash(D_F_4_1)_indicator_var <= 1 - 0 <= NoClash(D_G_2_0)_indicator_var <= 1 - 0 <= NoClash(D_G_2_1)_indicator_var <= 1 - 0 <= NoClash(D_G_4_0)_indicator_var <= 1 - 0 <= NoClash(D_G_4_1)_indicator_var <= 1 - 0 <= NoClash(E_F_3_0)_indicator_var <= 1 - 0 <= NoClash(E_F_3_1)_indicator_var <= 1 - 0 <= NoClash(E_G_2_0)_indicator_var <= 1 - 0 <= NoClash(E_G_2_1)_indicator_var <= 1 - 0 <= NoClash(E_G_5_0)_indicator_var <= 1 - 0 <= NoClash(E_G_5_1)_indicator_var <= 1 - 0 <= NoClash(F_G_4_0)_indicator_var <= 1 - 0 <= NoClash(F_G_4_1)_indicator_var <= 1 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(6)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(7)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(8)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(9)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(10)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(11)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(12)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(13)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(14)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(15)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(16)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(17)_t(A) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(18)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(19)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(20)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(21)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(22)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(23)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(24)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(25)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(26)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(27)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(28)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(29)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(30)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(31)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(32)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(33)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(34)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(35)_t(B) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(36)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(37)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(38)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(39)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(40)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(41)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(42)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(43)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(44)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(45)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(46)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(47)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(48)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(49)_t(C) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(50)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(51)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(52)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(53)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(54)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(55)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(56)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(57)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(58)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(59)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(60)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(61)_t(D) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(62)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(63)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(64)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(65)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(66)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(67)_t(E) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(68)_t(F) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(G) <= 92 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(69)_t(F) <= 92 -binary - NoClash(A_B_3_0)_indicator_var - NoClash(A_B_3_1)_indicator_var - NoClash(A_B_5_0)_indicator_var - NoClash(A_B_5_1)_indicator_var - NoClash(A_C_1_0)_indicator_var - NoClash(A_C_1_1)_indicator_var - NoClash(A_D_3_0)_indicator_var - NoClash(A_D_3_1)_indicator_var - NoClash(A_E_3_0)_indicator_var - NoClash(A_E_3_1)_indicator_var - NoClash(A_E_5_0)_indicator_var - NoClash(A_E_5_1)_indicator_var - NoClash(A_F_1_0)_indicator_var - NoClash(A_F_1_1)_indicator_var - NoClash(A_F_3_0)_indicator_var - NoClash(A_F_3_1)_indicator_var - NoClash(A_G_5_0)_indicator_var - NoClash(A_G_5_1)_indicator_var - NoClash(B_C_2_0)_indicator_var - NoClash(B_C_2_1)_indicator_var - NoClash(B_D_2_0)_indicator_var - NoClash(B_D_2_1)_indicator_var - NoClash(B_D_3_0)_indicator_var - NoClash(B_D_3_1)_indicator_var - NoClash(B_E_2_0)_indicator_var - NoClash(B_E_2_1)_indicator_var - NoClash(B_E_3_0)_indicator_var - NoClash(B_E_3_1)_indicator_var - NoClash(B_E_5_0)_indicator_var - NoClash(B_E_5_1)_indicator_var - NoClash(B_F_3_0)_indicator_var - NoClash(B_F_3_1)_indicator_var - NoClash(B_G_2_0)_indicator_var - NoClash(B_G_2_1)_indicator_var - NoClash(B_G_5_0)_indicator_var - NoClash(B_G_5_1)_indicator_var - NoClash(C_D_2_0)_indicator_var - NoClash(C_D_2_1)_indicator_var - NoClash(C_D_4_0)_indicator_var - NoClash(C_D_4_1)_indicator_var - NoClash(C_E_2_0)_indicator_var - NoClash(C_E_2_1)_indicator_var - NoClash(C_F_1_0)_indicator_var - NoClash(C_F_1_1)_indicator_var - NoClash(C_F_4_0)_indicator_var - NoClash(C_F_4_1)_indicator_var - NoClash(C_G_2_0)_indicator_var - NoClash(C_G_2_1)_indicator_var - NoClash(C_G_4_0)_indicator_var - NoClash(C_G_4_1)_indicator_var - NoClash(D_E_2_0)_indicator_var - NoClash(D_E_2_1)_indicator_var - NoClash(D_E_3_0)_indicator_var - NoClash(D_E_3_1)_indicator_var - NoClash(D_F_3_0)_indicator_var - NoClash(D_F_3_1)_indicator_var - NoClash(D_F_4_0)_indicator_var - NoClash(D_F_4_1)_indicator_var - NoClash(D_G_2_0)_indicator_var - NoClash(D_G_2_1)_indicator_var - NoClash(D_G_4_0)_indicator_var - NoClash(D_G_4_1)_indicator_var - NoClash(E_F_3_0)_indicator_var - NoClash(E_F_3_1)_indicator_var - NoClash(E_G_2_0)_indicator_var - NoClash(E_G_2_1)_indicator_var - NoClash(E_G_5_0)_indicator_var - NoClash(E_G_5_1)_indicator_var - NoClash(F_G_4_0)_indicator_var - NoClash(F_G_4_1)_indicator_var -end diff --git a/pyomo/gdp/tests/jobshop_large_cuttingplane.lp b/pyomo/gdp/tests/jobshop_large_cuttingplane.lp index 5b90b665a4c..63ee0a969de 100644 --- a/pyomo/gdp/tests/jobshop_large_cuttingplane.lp +++ b/pyomo/gdp/tests/jobshop_large_cuttingplane.lp @@ -297,421 +297,421 @@ c_l__pyomo_gdp_cuttingplane_relaxation_cuts(0)_: +3.0097512309800001 t(G) >= 132.67931315860829 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +96 NoClash(A_B_3_0)_indicator_var -1 t(A) +1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +97 NoClash(A_B_3_1)_indicator_var +1 t(A) -1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(2)_NoClash(A_B_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)_NoClash(A_B_5_0)_c(ub)_: +94 NoClash(A_B_5_0)_indicator_var -1 t(A) +1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(3)_NoClash(A_B_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)_NoClash(A_B_5_1)_c(ub)_: +95 NoClash(A_B_5_1)_indicator_var +1 t(A) -1 t(B) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(4)_NoClash(A_C_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)_NoClash(A_C_1_0)_c(ub)_: +98 NoClash(A_C_1_0)_indicator_var -1 t(A) +1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(5)_NoClash(A_C_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)_NoClash(A_C_1_1)_c(ub)_: +95 NoClash(A_C_1_1)_indicator_var +1 t(A) -1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(6)_NoClash(A_D_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(6)_NoClash(A_D_3_0)_c(ub)_: +102 NoClash(A_D_3_0)_indicator_var -1 t(A) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(7)_NoClash(A_D_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(7)_NoClash(A_D_3_1)_c(ub)_: +92 NoClash(A_D_3_1)_indicator_var +1 t(A) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(8)_NoClash(A_E_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(8)_NoClash(A_E_3_0)_c(ub)_: +99 NoClash(A_E_3_0)_indicator_var -1 t(A) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(9)_NoClash(A_E_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(9)_NoClash(A_E_3_1)_c(ub)_: +96 NoClash(A_E_3_1)_indicator_var +1 t(A) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(10)_NoClash(A_E_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(10)_NoClash(A_E_5_0)_c(ub)_: +96 NoClash(A_E_5_0)_indicator_var -1 t(A) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(11)_NoClash(A_E_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(11)_NoClash(A_E_5_1)_c(ub)_: +92 NoClash(A_E_5_1)_indicator_var +1 t(A) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(12)_NoClash(A_F_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(12)_NoClash(A_F_1_0)_c(ub)_: +94 NoClash(A_F_1_0)_indicator_var -1 t(A) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(13)_NoClash(A_F_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(13)_NoClash(A_F_1_1)_c(ub)_: +95 NoClash(A_F_1_1)_indicator_var +1 t(A) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(14)_NoClash(A_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(14)_NoClash(A_F_3_0)_c(ub)_: +96 NoClash(A_F_3_0)_indicator_var -1 t(A) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(15)_NoClash(A_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(15)_NoClash(A_F_3_1)_c(ub)_: +98 NoClash(A_F_3_1)_indicator_var +1 t(A) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(16)_NoClash(A_G_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(16)_NoClash(A_G_5_0)_c(ub)_: +101 NoClash(A_G_5_0)_indicator_var -1 t(A) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(17)_NoClash(A_G_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(17)_NoClash(A_G_5_1)_c(ub)_: +89 NoClash(A_G_5_1)_indicator_var +1 t(A) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(18)_NoClash(B_C_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(18)_NoClash(B_C_2_0)_c(ub)_: +101 NoClash(B_C_2_0)_indicator_var -1 t(B) +1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(19)_NoClash(B_C_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(19)_NoClash(B_C_2_1)_c(ub)_: +89 NoClash(B_C_2_1)_indicator_var +1 t(B) -1 t(C) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(20)_NoClash(B_D_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(20)_NoClash(B_D_2_0)_c(ub)_: +100 NoClash(B_D_2_0)_indicator_var -1 t(B) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(21)_NoClash(B_D_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(21)_NoClash(B_D_2_1)_c(ub)_: +95 NoClash(B_D_2_1)_indicator_var +1 t(B) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(22)_NoClash(B_D_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(22)_NoClash(B_D_3_0)_c(ub)_: +102 NoClash(B_D_3_0)_indicator_var -1 t(B) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(23)_NoClash(B_D_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(23)_NoClash(B_D_3_1)_c(ub)_: +91 NoClash(B_D_3_1)_indicator_var +1 t(B) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(24)_NoClash(B_E_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(24)_NoClash(B_E_2_0)_c(ub)_: +96 NoClash(B_E_2_0)_indicator_var -1 t(B) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(25)_NoClash(B_E_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(25)_NoClash(B_E_2_1)_c(ub)_: +95 NoClash(B_E_2_1)_indicator_var +1 t(B) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(26)_NoClash(B_E_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(26)_NoClash(B_E_3_0)_c(ub)_: +99 NoClash(B_E_3_0)_indicator_var -1 t(B) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(27)_NoClash(B_E_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(27)_NoClash(B_E_3_1)_c(ub)_: +95 NoClash(B_E_3_1)_indicator_var +1 t(B) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(28)_NoClash(B_E_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(28)_NoClash(B_E_5_0)_c(ub)_: +97 NoClash(B_E_5_0)_indicator_var -1 t(B) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(29)_NoClash(B_E_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(29)_NoClash(B_E_5_1)_c(ub)_: +92 NoClash(B_E_5_1)_indicator_var +1 t(B) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(30)_NoClash(B_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(30)_NoClash(B_F_3_0)_c(ub)_: +96 NoClash(B_F_3_0)_indicator_var -1 t(B) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(31)_NoClash(B_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(31)_NoClash(B_F_3_1)_c(ub)_: +97 NoClash(B_F_3_1)_indicator_var +1 t(B) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(32)_NoClash(B_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(32)_NoClash(B_G_2_0)_c(ub)_: +100 NoClash(B_G_2_0)_indicator_var -1 t(B) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(33)_NoClash(B_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(33)_NoClash(B_G_2_1)_c(ub)_: +95 NoClash(B_G_2_1)_indicator_var +1 t(B) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(34)_NoClash(B_G_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(34)_NoClash(B_G_5_0)_c(ub)_: +102 NoClash(B_G_5_0)_indicator_var -1 t(B) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(35)_NoClash(B_G_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(35)_NoClash(B_G_5_1)_c(ub)_: +89 NoClash(B_G_5_1)_indicator_var +1 t(B) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(36)_NoClash(C_D_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(36)_NoClash(C_D_2_0)_c(ub)_: +94 NoClash(C_D_2_0)_indicator_var -1 t(C) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(37)_NoClash(C_D_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(37)_NoClash(C_D_2_1)_c(ub)_: +101 NoClash(C_D_2_1)_indicator_var +1 t(C) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(38)_NoClash(C_D_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(38)_NoClash(C_D_4_0)_c(ub)_: +97 NoClash(C_D_4_0)_indicator_var -1 t(C) +1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(39)_NoClash(C_D_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(39)_NoClash(C_D_4_1)_c(ub)_: +94 NoClash(C_D_4_1)_indicator_var +1 t(C) -1 t(D) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(40)_NoClash(C_E_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(40)_NoClash(C_E_2_0)_c(ub)_: +90 NoClash(C_E_2_0)_indicator_var -1 t(C) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(41)_NoClash(C_E_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(41)_NoClash(C_E_2_1)_c(ub)_: +101 NoClash(C_E_2_1)_indicator_var +1 t(C) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(42)_NoClash(C_F_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(42)_NoClash(C_F_1_0)_c(ub)_: +94 NoClash(C_F_1_0)_indicator_var -1 t(C) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(43)_NoClash(C_F_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(43)_NoClash(C_F_1_1)_c(ub)_: +98 NoClash(C_F_1_1)_indicator_var +1 t(C) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(44)_NoClash(C_F_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(44)_NoClash(C_F_4_0)_c(ub)_: +97 NoClash(C_F_4_0)_indicator_var -1 t(C) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(45)_NoClash(C_F_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(45)_NoClash(C_F_4_1)_c(ub)_: +100 NoClash(C_F_4_1)_indicator_var +1 t(C) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(46)_NoClash(C_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(46)_NoClash(C_G_2_0)_c(ub)_: +94 NoClash(C_G_2_0)_indicator_var -1 t(C) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(47)_NoClash(C_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(47)_NoClash(C_G_2_1)_c(ub)_: +101 NoClash(C_G_2_1)_indicator_var +1 t(C) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(48)_NoClash(C_G_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(48)_NoClash(C_G_4_0)_c(ub)_: +96 NoClash(C_G_4_0)_indicator_var -1 t(C) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(49)_NoClash(C_G_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(49)_NoClash(C_G_4_1)_c(ub)_: +99 NoClash(C_G_4_1)_indicator_var +1 t(C) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(50)_NoClash(D_E_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(50)_NoClash(D_E_2_0)_c(ub)_: +96 NoClash(D_E_2_0)_indicator_var -1 t(D) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(51)_NoClash(D_E_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(51)_NoClash(D_E_2_1)_c(ub)_: +100 NoClash(D_E_2_1)_indicator_var +1 t(D) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(52)_NoClash(D_E_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(52)_NoClash(D_E_3_0)_c(ub)_: +94 NoClash(D_E_3_0)_indicator_var -1 t(D) +1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(53)_NoClash(D_E_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(53)_NoClash(D_E_3_1)_c(ub)_: +101 NoClash(D_E_3_1)_indicator_var +1 t(D) -1 t(E) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(54)_NoClash(D_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(54)_NoClash(D_F_3_0)_c(ub)_: +91 NoClash(D_F_3_0)_indicator_var -1 t(D) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(55)_NoClash(D_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(55)_NoClash(D_F_3_1)_c(ub)_: +103 NoClash(D_F_3_1)_indicator_var +1 t(D) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(56)_NoClash(D_F_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(56)_NoClash(D_F_4_0)_c(ub)_: +93 NoClash(D_F_4_0)_indicator_var -1 t(D) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(57)_NoClash(D_F_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(57)_NoClash(D_F_4_1)_c(ub)_: +99 NoClash(D_F_4_1)_indicator_var +1 t(D) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(58)_NoClash(D_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(58)_NoClash(D_G_2_0)_c(ub)_: +100 NoClash(D_G_2_0)_indicator_var -1 t(D) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(59)_NoClash(D_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(59)_NoClash(D_G_2_1)_c(ub)_: +100 NoClash(D_G_2_1)_indicator_var +1 t(D) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(60)_NoClash(D_G_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(60)_NoClash(D_G_4_0)_c(ub)_: +92 NoClash(D_G_4_0)_indicator_var -1 t(D) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(61)_NoClash(D_G_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(61)_NoClash(D_G_4_1)_c(ub)_: +98 NoClash(D_G_4_1)_indicator_var +1 t(D) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(62)_NoClash(E_F_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(62)_NoClash(E_F_3_0)_c(ub)_: +95 NoClash(E_F_3_0)_indicator_var -1 t(E) +1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(63)_NoClash(E_F_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(63)_NoClash(E_F_3_1)_c(ub)_: +100 NoClash(E_F_3_1)_indicator_var +1 t(E) -1 t(F) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(64)_NoClash(E_G_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(64)_NoClash(E_G_2_0)_c(ub)_: +100 NoClash(E_G_2_0)_indicator_var -1 t(E) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(65)_NoClash(E_G_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(65)_NoClash(E_G_2_1)_c(ub)_: +96 NoClash(E_G_2_1)_indicator_var +1 t(E) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(66)_NoClash(E_G_5_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(66)_NoClash(E_G_5_0)_c(ub)_: +99 NoClash(E_G_5_0)_indicator_var -1 t(E) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(67)_NoClash(E_G_5_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(67)_NoClash(E_G_5_1)_c(ub)_: +91 NoClash(E_G_5_1)_indicator_var +1 t(E) -1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(68)_NoClash(F_G_4_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(68)_NoClash(F_G_4_0)_c(ub)_: +98 NoClash(F_G_4_0)_indicator_var -1 t(F) +1 t(G) <= 92 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(69)_NoClash(F_G_4_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(69)_NoClash(F_G_4_1)_c(ub)_: +98 NoClash(F_G_4_1)_indicator_var +1 t(F) -1 t(G) diff --git a/pyomo/gdp/tests/jobshop_large_hull.lp b/pyomo/gdp/tests/jobshop_large_hull.lp new file mode 100644 index 00000000000..97e8cd7d61e --- /dev/null +++ b/pyomo/gdp/tests/jobshop_large_hull.lp @@ -0,0 +1,2048 @@ +\* Source Pyomo model name=unknown *\ + +min +makespan: ++1 ms + +s.t. + +c_u_Feas(A)_: +-1 ms ++1 t(A) +<= -10 + +c_u_Feas(B)_: +-1 ms ++1 t(B) +<= -10 + +c_u_Feas(C)_: +-1 ms ++1 t(C) +<= -15 + +c_u_Feas(D)_: +-1 ms ++1 t(D) +<= -14 + +c_u_Feas(E)_: +-1 ms ++1 t(E) +<= -12 + +c_u_Feas(F)_: +-1 ms ++1 t(F) +<= -14 + +c_u_Feas(G)_: +-1 ms ++1 t(G) +<= -17 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_B_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_B_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_C_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_D_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_E_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_E_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_F_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_G_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_C_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_D_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_D_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_E_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_E_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_E_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_G_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_D_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_D_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_E_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_F_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_F_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_G_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_E_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_E_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_F_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_G_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_E_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_E_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_E_G_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_F_G_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(G) ++1 t(G) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_B_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_B_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_C_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_D_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_E_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_E_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_F_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_G_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_C_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_D_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_D_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_E_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_E_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_E_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_G_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_D_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_D_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_E_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_F_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_F_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_G_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_E_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_E_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_F_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_G_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(D) ++1 t(D) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_E_F_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_E_G_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_E_G_5)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(E) ++1 t(E) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_F_G_4)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(F) ++1 t(F) += 0 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_3)_: ++1 NoClash(A_B_3_0)_indicator_var ++1 NoClash(A_B_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_5)_: ++1 NoClash(A_B_5_0)_indicator_var ++1 NoClash(A_B_5_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_C_1)_: ++1 NoClash(A_C_1_0)_indicator_var ++1 NoClash(A_C_1_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_D_3)_: ++1 NoClash(A_D_3_0)_indicator_var ++1 NoClash(A_D_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_E_3)_: ++1 NoClash(A_E_3_0)_indicator_var ++1 NoClash(A_E_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_E_5)_: ++1 NoClash(A_E_5_0)_indicator_var ++1 NoClash(A_E_5_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_F_1)_: ++1 NoClash(A_F_1_0)_indicator_var ++1 NoClash(A_F_1_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_F_3)_: ++1 NoClash(A_F_3_0)_indicator_var ++1 NoClash(A_F_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_G_5)_: ++1 NoClash(A_G_5_0)_indicator_var ++1 NoClash(A_G_5_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_C_2)_: ++1 NoClash(B_C_2_0)_indicator_var ++1 NoClash(B_C_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_D_2)_: ++1 NoClash(B_D_2_0)_indicator_var ++1 NoClash(B_D_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_D_3)_: ++1 NoClash(B_D_3_0)_indicator_var ++1 NoClash(B_D_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_2)_: ++1 NoClash(B_E_2_0)_indicator_var ++1 NoClash(B_E_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_3)_: ++1 NoClash(B_E_3_0)_indicator_var ++1 NoClash(B_E_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_5)_: ++1 NoClash(B_E_5_0)_indicator_var ++1 NoClash(B_E_5_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_F_3)_: ++1 NoClash(B_F_3_0)_indicator_var ++1 NoClash(B_F_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_G_2)_: ++1 NoClash(B_G_2_0)_indicator_var ++1 NoClash(B_G_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_G_5)_: ++1 NoClash(B_G_5_0)_indicator_var ++1 NoClash(B_G_5_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_D_2)_: ++1 NoClash(C_D_2_0)_indicator_var ++1 NoClash(C_D_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_D_4)_: ++1 NoClash(C_D_4_0)_indicator_var ++1 NoClash(C_D_4_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_E_2)_: ++1 NoClash(C_E_2_0)_indicator_var ++1 NoClash(C_E_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_F_1)_: ++1 NoClash(C_F_1_0)_indicator_var ++1 NoClash(C_F_1_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_F_4)_: ++1 NoClash(C_F_4_0)_indicator_var ++1 NoClash(C_F_4_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_G_2)_: ++1 NoClash(C_G_2_0)_indicator_var ++1 NoClash(C_G_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_G_4)_: ++1 NoClash(C_G_4_0)_indicator_var ++1 NoClash(C_G_4_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_E_2)_: ++1 NoClash(D_E_2_0)_indicator_var ++1 NoClash(D_E_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_E_3)_: ++1 NoClash(D_E_3_0)_indicator_var ++1 NoClash(D_E_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_F_3)_: ++1 NoClash(D_F_3_0)_indicator_var ++1 NoClash(D_F_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_F_4)_: ++1 NoClash(D_F_4_0)_indicator_var ++1 NoClash(D_F_4_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_G_2)_: ++1 NoClash(D_G_2_0)_indicator_var ++1 NoClash(D_G_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_G_4)_: ++1 NoClash(D_G_4_0)_indicator_var ++1 NoClash(D_G_4_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(E_F_3)_: ++1 NoClash(E_F_3_0)_indicator_var ++1 NoClash(E_F_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(E_G_2)_: ++1 NoClash(E_G_2_0)_indicator_var ++1 NoClash(E_G_2_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(E_G_5)_: ++1 NoClash(E_G_5_0)_indicator_var ++1 NoClash(E_G_5_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(F_G_4)_: ++1 NoClash(F_G_4_0)_indicator_var ++1 NoClash(F_G_4_1)_indicator_var += 1 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B)_bounds(ub)_: +-92 NoClash(A_B_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A)_bounds(ub)_: +-92 NoClash(A_B_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: ++4 NoClash(A_B_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B)_bounds(ub)_: +-92 NoClash(A_B_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A)_bounds(ub)_: +-92 NoClash(A_B_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: ++5 NoClash(A_B_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(B)_bounds(ub)_: +-92 NoClash(A_B_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A)_bounds(ub)_: +-92 NoClash(A_B_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_NoClash(A_B_5_0)_c(ub)_: ++2 NoClash(A_B_5_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(B)_bounds(ub)_: +-92 NoClash(A_B_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A)_bounds(ub)_: +-92 NoClash(A_B_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_NoClash(A_B_5_1)_c(ub)_: ++3 NoClash(A_B_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C)_bounds(ub)_: +-92 NoClash(A_C_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(A)_bounds(ub)_: +-92 NoClash(A_C_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_NoClash(A_C_1_0)_c(ub)_: ++6 NoClash(A_C_1_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C)_bounds(ub)_: +-92 NoClash(A_C_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(A)_bounds(ub)_: +-92 NoClash(A_C_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_NoClash(A_C_1_1)_c(ub)_: ++3 NoClash(A_C_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(D)_bounds(ub)_: +-92 NoClash(A_D_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(A)_bounds(ub)_: +-92 NoClash(A_D_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_NoClash(A_D_3_0)_c(ub)_: ++10 NoClash(A_D_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(D)_bounds(ub)_: +-92 NoClash(A_D_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(A)_bounds(ub)_: +-92 NoClash(A_D_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_NoClash(A_D_3_1)_c(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(E)_bounds(ub)_: +-92 NoClash(A_E_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(A)_bounds(ub)_: +-92 NoClash(A_E_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_NoClash(A_E_3_0)_c(ub)_: ++7 NoClash(A_E_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(E)_bounds(ub)_: +-92 NoClash(A_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(A)_bounds(ub)_: +-92 NoClash(A_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_NoClash(A_E_3_1)_c(ub)_: ++4 NoClash(A_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(E)_bounds(ub)_: +-92 NoClash(A_E_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(A)_bounds(ub)_: +-92 NoClash(A_E_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_NoClash(A_E_5_0)_c(ub)_: ++4 NoClash(A_E_5_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(E)_bounds(ub)_: +-92 NoClash(A_E_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(A)_bounds(ub)_: +-92 NoClash(A_E_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_NoClash(A_E_5_1)_c(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(F)_bounds(ub)_: +-92 NoClash(A_F_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(A)_bounds(ub)_: +-92 NoClash(A_F_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_NoClash(A_F_1_0)_c(ub)_: ++2 NoClash(A_F_1_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(F)_bounds(ub)_: +-92 NoClash(A_F_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(A)_bounds(ub)_: +-92 NoClash(A_F_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_NoClash(A_F_1_1)_c(ub)_: ++3 NoClash(A_F_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(F)_bounds(ub)_: +-92 NoClash(A_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(A)_bounds(ub)_: +-92 NoClash(A_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_NoClash(A_F_3_0)_c(ub)_: ++4 NoClash(A_F_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(F)_bounds(ub)_: +-92 NoClash(A_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(A)_bounds(ub)_: +-92 NoClash(A_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_NoClash(A_F_3_1)_c(ub)_: ++6 NoClash(A_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(G)_bounds(ub)_: +-92 NoClash(A_G_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(A)_bounds(ub)_: +-92 NoClash(A_G_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_NoClash(A_G_5_0)_c(ub)_: ++9 NoClash(A_G_5_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(G)_bounds(ub)_: +-92 NoClash(A_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(A)_bounds(ub)_: +-92 NoClash(A_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_NoClash(A_G_5_1)_c(ub)_: +-3 NoClash(A_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(C)_bounds(ub)_: +-92 NoClash(B_C_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(B)_bounds(ub)_: +-92 NoClash(B_C_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_NoClash(B_C_2_0)_c(ub)_: ++9 NoClash(B_C_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(C)_bounds(ub)_: +-92 NoClash(B_C_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(B)_bounds(ub)_: +-92 NoClash(B_C_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_NoClash(B_C_2_1)_c(ub)_: +-3 NoClash(B_C_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(D)_bounds(ub)_: +-92 NoClash(B_D_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(B)_bounds(ub)_: +-92 NoClash(B_D_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_NoClash(B_D_2_0)_c(ub)_: ++8 NoClash(B_D_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(D)_bounds(ub)_: +-92 NoClash(B_D_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(B)_bounds(ub)_: +-92 NoClash(B_D_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_NoClash(B_D_2_1)_c(ub)_: ++3 NoClash(B_D_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(D)_bounds(ub)_: +-92 NoClash(B_D_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(B)_bounds(ub)_: +-92 NoClash(B_D_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_NoClash(B_D_3_0)_c(ub)_: ++10 NoClash(B_D_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(D)_bounds(ub)_: +-92 NoClash(B_D_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(B)_bounds(ub)_: +-92 NoClash(B_D_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_NoClash(B_D_3_1)_c(ub)_: +-1 NoClash(B_D_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(E)_bounds(ub)_: +-92 NoClash(B_E_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(B)_bounds(ub)_: +-92 NoClash(B_E_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_NoClash(B_E_2_0)_c(ub)_: ++4 NoClash(B_E_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(E)_bounds(ub)_: +-92 NoClash(B_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(B)_bounds(ub)_: +-92 NoClash(B_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_NoClash(B_E_2_1)_c(ub)_: ++3 NoClash(B_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(E)_bounds(ub)_: +-92 NoClash(B_E_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(B)_bounds(ub)_: +-92 NoClash(B_E_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_NoClash(B_E_3_0)_c(ub)_: ++7 NoClash(B_E_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(E)_bounds(ub)_: +-92 NoClash(B_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(B)_bounds(ub)_: +-92 NoClash(B_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_NoClash(B_E_3_1)_c(ub)_: ++3 NoClash(B_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(E)_bounds(ub)_: +-92 NoClash(B_E_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(B)_bounds(ub)_: +-92 NoClash(B_E_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_NoClash(B_E_5_0)_c(ub)_: ++5 NoClash(B_E_5_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(E)_bounds(ub)_: +-92 NoClash(B_E_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(B)_bounds(ub)_: +-92 NoClash(B_E_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_NoClash(B_E_5_1)_c(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(F)_bounds(ub)_: +-92 NoClash(B_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(B)_bounds(ub)_: +-92 NoClash(B_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_NoClash(B_F_3_0)_c(ub)_: ++4 NoClash(B_F_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(F)_bounds(ub)_: +-92 NoClash(B_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(B)_bounds(ub)_: +-92 NoClash(B_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_NoClash(B_F_3_1)_c(ub)_: ++5 NoClash(B_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(G)_bounds(ub)_: +-92 NoClash(B_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(B)_bounds(ub)_: +-92 NoClash(B_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_NoClash(B_G_2_0)_c(ub)_: ++8 NoClash(B_G_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(G)_bounds(ub)_: +-92 NoClash(B_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(B)_bounds(ub)_: +-92 NoClash(B_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_NoClash(B_G_2_1)_c(ub)_: ++3 NoClash(B_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(G)_bounds(ub)_: +-92 NoClash(B_G_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(B)_bounds(ub)_: +-92 NoClash(B_G_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_NoClash(B_G_5_0)_c(ub)_: ++10 NoClash(B_G_5_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(G)_bounds(ub)_: +-92 NoClash(B_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(B)_bounds(ub)_: +-92 NoClash(B_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_NoClash(B_G_5_1)_c(ub)_: +-3 NoClash(B_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(D)_bounds(ub)_: +-92 NoClash(C_D_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(C)_bounds(ub)_: +-92 NoClash(C_D_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_NoClash(C_D_2_0)_c(ub)_: ++2 NoClash(C_D_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(C) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(D)_bounds(ub)_: +-92 NoClash(C_D_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(C)_bounds(ub)_: +-92 NoClash(C_D_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_NoClash(C_D_2_1)_c(ub)_: ++9 NoClash(C_D_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(D)_bounds(ub)_: +-92 NoClash(C_D_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(C)_bounds(ub)_: +-92 NoClash(C_D_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_NoClash(C_D_4_0)_c(ub)_: ++5 NoClash(C_D_4_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(C) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(D)_bounds(ub)_: +-92 NoClash(C_D_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(C)_bounds(ub)_: +-92 NoClash(C_D_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_NoClash(C_D_4_1)_c(ub)_: ++2 NoClash(C_D_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(E)_bounds(ub)_: +-92 NoClash(C_E_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(C)_bounds(ub)_: +-92 NoClash(C_E_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_NoClash(C_E_2_0)_c(ub)_: +-2 NoClash(C_E_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(C) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(E)_bounds(ub)_: +-92 NoClash(C_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(C)_bounds(ub)_: +-92 NoClash(C_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_NoClash(C_E_2_1)_c(ub)_: ++9 NoClash(C_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(F)_bounds(ub)_: +-92 NoClash(C_F_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(C)_bounds(ub)_: +-92 NoClash(C_F_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_NoClash(C_F_1_0)_c(ub)_: ++2 NoClash(C_F_1_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(C) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(F)_bounds(ub)_: +-92 NoClash(C_F_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(C)_bounds(ub)_: +-92 NoClash(C_F_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_NoClash(C_F_1_1)_c(ub)_: ++6 NoClash(C_F_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(F)_bounds(ub)_: +-92 NoClash(C_F_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(C)_bounds(ub)_: +-92 NoClash(C_F_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_NoClash(C_F_4_0)_c(ub)_: ++5 NoClash(C_F_4_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(C) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(F)_bounds(ub)_: +-92 NoClash(C_F_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(C)_bounds(ub)_: +-92 NoClash(C_F_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_NoClash(C_F_4_1)_c(ub)_: ++8 NoClash(C_F_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(G)_bounds(ub)_: +-92 NoClash(C_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(C)_bounds(ub)_: +-92 NoClash(C_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_NoClash(C_G_2_0)_c(ub)_: ++2 NoClash(C_G_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(C) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(G)_bounds(ub)_: +-92 NoClash(C_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(C)_bounds(ub)_: +-92 NoClash(C_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_NoClash(C_G_2_1)_c(ub)_: ++9 NoClash(C_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(G)_bounds(ub)_: +-92 NoClash(C_G_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(C)_bounds(ub)_: +-92 NoClash(C_G_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_NoClash(C_G_4_0)_c(ub)_: ++4 NoClash(C_G_4_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(C) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(G)_bounds(ub)_: +-92 NoClash(C_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(C)_bounds(ub)_: +-92 NoClash(C_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_NoClash(C_G_4_1)_c(ub)_: ++7 NoClash(C_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(E)_bounds(ub)_: +-92 NoClash(D_E_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(D)_bounds(ub)_: +-92 NoClash(D_E_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_NoClash(D_E_2_0)_c(ub)_: ++4 NoClash(D_E_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(D) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(E)_bounds(ub)_: +-92 NoClash(D_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(D)_bounds(ub)_: +-92 NoClash(D_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_NoClash(D_E_2_1)_c(ub)_: ++8 NoClash(D_E_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(E)_bounds(ub)_: +-92 NoClash(D_E_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(D)_bounds(ub)_: +-92 NoClash(D_E_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_NoClash(D_E_3_0)_c(ub)_: ++2 NoClash(D_E_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(D) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(E)_bounds(ub)_: +-92 NoClash(D_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(D)_bounds(ub)_: +-92 NoClash(D_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_NoClash(D_E_3_1)_c(ub)_: ++9 NoClash(D_E_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(F)_bounds(ub)_: +-92 NoClash(D_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(D)_bounds(ub)_: +-92 NoClash(D_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_NoClash(D_F_3_0)_c(ub)_: +-1 NoClash(D_F_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(D) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(F)_bounds(ub)_: +-92 NoClash(D_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(D)_bounds(ub)_: +-92 NoClash(D_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_NoClash(D_F_3_1)_c(ub)_: ++11 NoClash(D_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(F)_bounds(ub)_: +-92 NoClash(D_F_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(D)_bounds(ub)_: +-92 NoClash(D_F_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_NoClash(D_F_4_0)_c(ub)_: ++1 NoClash(D_F_4_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(D) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(F)_bounds(ub)_: +-92 NoClash(D_F_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(D)_bounds(ub)_: +-92 NoClash(D_F_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_NoClash(D_F_4_1)_c(ub)_: ++7 NoClash(D_F_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(G)_bounds(ub)_: +-92 NoClash(D_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(D)_bounds(ub)_: +-92 NoClash(D_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_NoClash(D_G_2_0)_c(ub)_: ++8 NoClash(D_G_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(D) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(G)_bounds(ub)_: +-92 NoClash(D_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(D)_bounds(ub)_: +-92 NoClash(D_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_NoClash(D_G_2_1)_c(ub)_: ++8 NoClash(D_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(G)_bounds(ub)_: +-92 NoClash(D_G_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(D)_bounds(ub)_: +-92 NoClash(D_G_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_NoClash(D_G_4_0)_c(ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(D) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(G)_bounds(ub)_: +-92 NoClash(D_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(D)_bounds(ub)_: +-92 NoClash(D_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(D) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_NoClash(D_G_4_1)_c(ub)_: ++6 NoClash(D_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(F)_bounds(ub)_: +-92 NoClash(E_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(E)_bounds(ub)_: +-92 NoClash(E_F_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_NoClash(E_F_3_0)_c(ub)_: ++3 NoClash(E_F_3_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(E) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(F)_bounds(ub)_: +-92 NoClash(E_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(E)_bounds(ub)_: +-92 NoClash(E_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_NoClash(E_F_3_1)_c(ub)_: ++8 NoClash(E_F_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(G)_bounds(ub)_: +-92 NoClash(E_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(E)_bounds(ub)_: +-92 NoClash(E_G_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_NoClash(E_G_2_0)_c(ub)_: ++8 NoClash(E_G_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(E) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(G)_bounds(ub)_: +-92 NoClash(E_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(E)_bounds(ub)_: +-92 NoClash(E_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_NoClash(E_G_2_1)_c(ub)_: ++4 NoClash(E_G_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(G)_bounds(ub)_: +-92 NoClash(E_G_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(E)_bounds(ub)_: +-92 NoClash(E_G_5_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_NoClash(E_G_5_0)_c(ub)_: ++7 NoClash(E_G_5_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(E) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(G)_bounds(ub)_: +-92 NoClash(E_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(E)_bounds(ub)_: +-92 NoClash(E_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(E) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_NoClash(E_G_5_1)_c(ub)_: +-1 NoClash(E_G_5_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(G)_bounds(ub)_: +-92 NoClash(F_G_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(F)_bounds(ub)_: +-92 NoClash(F_G_4_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_NoClash(F_G_4_0)_c(ub)_: ++6 NoClash(F_G_4_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(F) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(G)_bounds(ub)_: +-92 NoClash(F_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(G) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(F)_bounds(ub)_: +-92 NoClash(F_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(F) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_NoClash(F_G_4_1)_c(ub)_: ++6 NoClash(F_G_4_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(G) +<= 0 + +c_e_ONE_VAR_CONSTANT: +ONE_VAR_CONSTANT = 1.0 + +bounds + -inf <= ms <= +inf + 0 <= t(A) <= 92 + 0 <= t(B) <= 92 + 0 <= t(C) <= 92 + 0 <= t(D) <= 92 + 0 <= t(E) <= 92 + 0 <= t(F) <= 92 + 0 <= t(G) <= 92 + 0 <= NoClash(A_B_3_0)_indicator_var <= 1 + 0 <= NoClash(A_B_3_1)_indicator_var <= 1 + 0 <= NoClash(A_B_5_0)_indicator_var <= 1 + 0 <= NoClash(A_B_5_1)_indicator_var <= 1 + 0 <= NoClash(A_C_1_0)_indicator_var <= 1 + 0 <= NoClash(A_C_1_1)_indicator_var <= 1 + 0 <= NoClash(A_D_3_0)_indicator_var <= 1 + 0 <= NoClash(A_D_3_1)_indicator_var <= 1 + 0 <= NoClash(A_E_3_0)_indicator_var <= 1 + 0 <= NoClash(A_E_3_1)_indicator_var <= 1 + 0 <= NoClash(A_E_5_0)_indicator_var <= 1 + 0 <= NoClash(A_E_5_1)_indicator_var <= 1 + 0 <= NoClash(A_F_1_0)_indicator_var <= 1 + 0 <= NoClash(A_F_1_1)_indicator_var <= 1 + 0 <= NoClash(A_F_3_0)_indicator_var <= 1 + 0 <= NoClash(A_F_3_1)_indicator_var <= 1 + 0 <= NoClash(A_G_5_0)_indicator_var <= 1 + 0 <= NoClash(A_G_5_1)_indicator_var <= 1 + 0 <= NoClash(B_C_2_0)_indicator_var <= 1 + 0 <= NoClash(B_C_2_1)_indicator_var <= 1 + 0 <= NoClash(B_D_2_0)_indicator_var <= 1 + 0 <= NoClash(B_D_2_1)_indicator_var <= 1 + 0 <= NoClash(B_D_3_0)_indicator_var <= 1 + 0 <= NoClash(B_D_3_1)_indicator_var <= 1 + 0 <= NoClash(B_E_2_0)_indicator_var <= 1 + 0 <= NoClash(B_E_2_1)_indicator_var <= 1 + 0 <= NoClash(B_E_3_0)_indicator_var <= 1 + 0 <= NoClash(B_E_3_1)_indicator_var <= 1 + 0 <= NoClash(B_E_5_0)_indicator_var <= 1 + 0 <= NoClash(B_E_5_1)_indicator_var <= 1 + 0 <= NoClash(B_F_3_0)_indicator_var <= 1 + 0 <= NoClash(B_F_3_1)_indicator_var <= 1 + 0 <= NoClash(B_G_2_0)_indicator_var <= 1 + 0 <= NoClash(B_G_2_1)_indicator_var <= 1 + 0 <= NoClash(B_G_5_0)_indicator_var <= 1 + 0 <= NoClash(B_G_5_1)_indicator_var <= 1 + 0 <= NoClash(C_D_2_0)_indicator_var <= 1 + 0 <= NoClash(C_D_2_1)_indicator_var <= 1 + 0 <= NoClash(C_D_4_0)_indicator_var <= 1 + 0 <= NoClash(C_D_4_1)_indicator_var <= 1 + 0 <= NoClash(C_E_2_0)_indicator_var <= 1 + 0 <= NoClash(C_E_2_1)_indicator_var <= 1 + 0 <= NoClash(C_F_1_0)_indicator_var <= 1 + 0 <= NoClash(C_F_1_1)_indicator_var <= 1 + 0 <= NoClash(C_F_4_0)_indicator_var <= 1 + 0 <= NoClash(C_F_4_1)_indicator_var <= 1 + 0 <= NoClash(C_G_2_0)_indicator_var <= 1 + 0 <= NoClash(C_G_2_1)_indicator_var <= 1 + 0 <= NoClash(C_G_4_0)_indicator_var <= 1 + 0 <= NoClash(C_G_4_1)_indicator_var <= 1 + 0 <= NoClash(D_E_2_0)_indicator_var <= 1 + 0 <= NoClash(D_E_2_1)_indicator_var <= 1 + 0 <= NoClash(D_E_3_0)_indicator_var <= 1 + 0 <= NoClash(D_E_3_1)_indicator_var <= 1 + 0 <= NoClash(D_F_3_0)_indicator_var <= 1 + 0 <= NoClash(D_F_3_1)_indicator_var <= 1 + 0 <= NoClash(D_F_4_0)_indicator_var <= 1 + 0 <= NoClash(D_F_4_1)_indicator_var <= 1 + 0 <= NoClash(D_G_2_0)_indicator_var <= 1 + 0 <= NoClash(D_G_2_1)_indicator_var <= 1 + 0 <= NoClash(D_G_4_0)_indicator_var <= 1 + 0 <= NoClash(D_G_4_1)_indicator_var <= 1 + 0 <= NoClash(E_F_3_0)_indicator_var <= 1 + 0 <= NoClash(E_F_3_1)_indicator_var <= 1 + 0 <= NoClash(E_G_2_0)_indicator_var <= 1 + 0 <= NoClash(E_G_2_1)_indicator_var <= 1 + 0 <= NoClash(E_G_5_0)_indicator_var <= 1 + 0 <= NoClash(E_G_5_1)_indicator_var <= 1 + 0 <= NoClash(F_G_4_0)_indicator_var <= 1 + 0 <= NoClash(F_G_4_1)_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_t(A) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_t(B) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_t(C) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_t(D) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_t(E) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_t(F) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(G) <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_t(F) <= 92 +binary + NoClash(A_B_3_0)_indicator_var + NoClash(A_B_3_1)_indicator_var + NoClash(A_B_5_0)_indicator_var + NoClash(A_B_5_1)_indicator_var + NoClash(A_C_1_0)_indicator_var + NoClash(A_C_1_1)_indicator_var + NoClash(A_D_3_0)_indicator_var + NoClash(A_D_3_1)_indicator_var + NoClash(A_E_3_0)_indicator_var + NoClash(A_E_3_1)_indicator_var + NoClash(A_E_5_0)_indicator_var + NoClash(A_E_5_1)_indicator_var + NoClash(A_F_1_0)_indicator_var + NoClash(A_F_1_1)_indicator_var + NoClash(A_F_3_0)_indicator_var + NoClash(A_F_3_1)_indicator_var + NoClash(A_G_5_0)_indicator_var + NoClash(A_G_5_1)_indicator_var + NoClash(B_C_2_0)_indicator_var + NoClash(B_C_2_1)_indicator_var + NoClash(B_D_2_0)_indicator_var + NoClash(B_D_2_1)_indicator_var + NoClash(B_D_3_0)_indicator_var + NoClash(B_D_3_1)_indicator_var + NoClash(B_E_2_0)_indicator_var + NoClash(B_E_2_1)_indicator_var + NoClash(B_E_3_0)_indicator_var + NoClash(B_E_3_1)_indicator_var + NoClash(B_E_5_0)_indicator_var + NoClash(B_E_5_1)_indicator_var + NoClash(B_F_3_0)_indicator_var + NoClash(B_F_3_1)_indicator_var + NoClash(B_G_2_0)_indicator_var + NoClash(B_G_2_1)_indicator_var + NoClash(B_G_5_0)_indicator_var + NoClash(B_G_5_1)_indicator_var + NoClash(C_D_2_0)_indicator_var + NoClash(C_D_2_1)_indicator_var + NoClash(C_D_4_0)_indicator_var + NoClash(C_D_4_1)_indicator_var + NoClash(C_E_2_0)_indicator_var + NoClash(C_E_2_1)_indicator_var + NoClash(C_F_1_0)_indicator_var + NoClash(C_F_1_1)_indicator_var + NoClash(C_F_4_0)_indicator_var + NoClash(C_F_4_1)_indicator_var + NoClash(C_G_2_0)_indicator_var + NoClash(C_G_2_1)_indicator_var + NoClash(C_G_4_0)_indicator_var + NoClash(C_G_4_1)_indicator_var + NoClash(D_E_2_0)_indicator_var + NoClash(D_E_2_1)_indicator_var + NoClash(D_E_3_0)_indicator_var + NoClash(D_E_3_1)_indicator_var + NoClash(D_F_3_0)_indicator_var + NoClash(D_F_3_1)_indicator_var + NoClash(D_F_4_0)_indicator_var + NoClash(D_F_4_1)_indicator_var + NoClash(D_G_2_0)_indicator_var + NoClash(D_G_2_1)_indicator_var + NoClash(D_G_4_0)_indicator_var + NoClash(D_G_4_1)_indicator_var + NoClash(E_F_3_0)_indicator_var + NoClash(E_F_3_1)_indicator_var + NoClash(E_G_2_0)_indicator_var + NoClash(E_G_2_1)_indicator_var + NoClash(E_G_5_0)_indicator_var + NoClash(E_G_5_1)_indicator_var + NoClash(F_G_4_0)_indicator_var + NoClash(F_G_4_1)_indicator_var +end diff --git a/pyomo/gdp/tests/jobshop_small_bigm.lp b/pyomo/gdp/tests/jobshop_small_bigm.lp index 26b96f734a0..7512feff4c8 100644 --- a/pyomo/gdp/tests/jobshop_small_bigm.lp +++ b/pyomo/gdp/tests/jobshop_small_bigm.lp @@ -21,52 +21,52 @@ c_u_Feas(C)_: +1 t(C) <= -6 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_B_3)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_B_3)_: +1 NoClash(A_B_3_0)_indicator_var +1 NoClash(A_B_3_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(A_C_1)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(A_C_1)_: +1 NoClash(A_C_1_0)_indicator_var +1 NoClash(A_C_1_1)_indicator_var = 1 -c_e__pyomo_gdp_bigm_relaxation_disj_xor(B_C_2)_: +c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_C_2)_: +1 NoClash(B_C_2_0)_indicator_var +1 NoClash(B_C_2_1)_indicator_var = 1 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +19 NoClash(A_B_3_0)_indicator_var -1 t(A) +1 t(B) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +24 NoClash(A_B_3_1)_indicator_var +1 t(A) -1 t(B) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(2)_NoClash(A_C_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)_NoClash(A_C_1_0)_c(ub)_: +21 NoClash(A_C_1_0)_indicator_var -1 t(A) +1 t(C) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(3)_NoClash(A_C_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)_NoClash(A_C_1_1)_c(ub)_: +24 NoClash(A_C_1_1)_indicator_var +1 t(A) -1 t(C) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(4)_NoClash(B_C_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)_NoClash(B_C_2_0)_c(ub)_: +25 NoClash(B_C_2_0)_indicator_var -1 t(B) +1 t(C) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(5)_NoClash(B_C_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)_NoClash(B_C_2_1)_c(ub)_: +20 NoClash(B_C_2_1)_indicator_var +1 t(B) -1 t(C) diff --git a/pyomo/gdp/tests/jobshop_small_chull.lp b/pyomo/gdp/tests/jobshop_small_chull.lp deleted file mode 100644 index a217199f5c3..00000000000 --- a/pyomo/gdp/tests/jobshop_small_chull.lp +++ /dev/null @@ -1,203 +0,0 @@ -\* Source Pyomo model name=unknown *\ - -min -makespan: -+1 ms - -s.t. - -c_u_Feas(A)_: --1 ms -+1 t(A) -<= -8 - -c_u_Feas(B)_: --1 ms -+1 t(B) -<= -5 - -c_u_Feas(C)_: --1 ms -+1 t(C) -<= -6 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_B_3_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_B_3_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_C_1_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(A_C_1_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) -+1 t(A) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_C_2_0)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) -+1 t(C) -= 0 - -c_e__gdp_chull_relaxation_disj_disaggregation(B_C_2_1)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(B) -+1 t(B) -= 0 - -c_e__gdp_chull_relaxation_disj_xor(A_B_3)_: -+1 NoClash(A_B_3_0)_indicator_var -+1 NoClash(A_B_3_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(A_C_1)_: -+1 NoClash(A_C_1_0)_indicator_var -+1 NoClash(A_C_1_1)_indicator_var -= 1 - -c_e__gdp_chull_relaxation_disj_xor(B_C_2)_: -+1 NoClash(B_C_2_0)_indicator_var -+1 NoClash(B_C_2_1)_indicator_var -= 1 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B)_bounds(ub)_: --19 NoClash(A_B_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A)_bounds(ub)_: --19 NoClash(A_B_3_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B)_bounds(ub)_: --19 NoClash(A_B_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A)_bounds(ub)_: --19 NoClash(A_B_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: -+5 NoClash(A_B_3_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(C)_bounds(ub)_: --19 NoClash(A_C_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A)_bounds(ub)_: --19 NoClash(A_C_1_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_NoClash(A_C_1_0)_c(ub)_: -+2 NoClash(A_C_1_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(C)_bounds(ub)_: --19 NoClash(A_C_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A)_bounds(ub)_: --19 NoClash(A_C_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_NoClash(A_C_1_1)_c(ub)_: -+5 NoClash(A_C_1_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C)_bounds(ub)_: --19 NoClash(B_C_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(B)_bounds(ub)_: --19 NoClash(B_C_2_0)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_NoClash(B_C_2_0)_c(ub)_: -+6 NoClash(B_C_2_0)_indicator_var --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(B) -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C)_bounds(ub)_: --19 NoClash(B_C_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(B)_bounds(ub)_: --19 NoClash(B_C_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(B) -<= 0 - -c_u__pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_NoClash(B_C_2_1)_c(ub)_: -+1 NoClash(B_C_2_1)_indicator_var -+1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(B) --1 _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) -<= 0 - -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - -bounds - -inf <= ms <= +inf - 0 <= t(A) <= 19 - 0 <= t(B) <= 19 - 0 <= t(C) <= 19 - 0 <= NoClash(A_B_3_0)_indicator_var <= 1 - 0 <= NoClash(A_B_3_1)_indicator_var <= 1 - 0 <= NoClash(A_C_1_0)_indicator_var <= 1 - 0 <= NoClash(A_C_1_1)_indicator_var <= 1 - 0 <= NoClash(B_C_2_0)_indicator_var <= 1 - 0 <= NoClash(B_C_2_1)_indicator_var <= 1 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(B) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(0)_t(A) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(B) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(1)_t(A) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(C) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(2)_t(A) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(C) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(3)_t(A) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(C) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(4)_t(B) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(C) <= 19 - 0 <= _pyomo_gdp_chull_relaxation_relaxedDisjuncts(5)_t(B) <= 19 -binary - NoClash(A_B_3_0)_indicator_var - NoClash(A_B_3_1)_indicator_var - NoClash(A_C_1_0)_indicator_var - NoClash(A_C_1_1)_indicator_var - NoClash(B_C_2_0)_indicator_var - NoClash(B_C_2_1)_indicator_var -end diff --git a/pyomo/gdp/tests/jobshop_small_cuttingplane.lp b/pyomo/gdp/tests/jobshop_small_cuttingplane.lp index bf73da21b49..1226147d867 100644 --- a/pyomo/gdp/tests/jobshop_small_cuttingplane.lp +++ b/pyomo/gdp/tests/jobshop_small_cuttingplane.lp @@ -49,37 +49,37 @@ c_l__pyomo_gdp_cuttingplane_relaxation_cuts(0)_: +1.17006802835 t(C) >= 18.292120300259779 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +19 NoClash(A_B_3_0)_indicator_var -1 t(A) +1 t(B) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: +24 NoClash(A_B_3_1)_indicator_var +1 t(A) -1 t(B) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(2)_NoClash(A_C_1_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)_NoClash(A_C_1_0)_c(ub)_: +21 NoClash(A_C_1_0)_indicator_var -1 t(A) +1 t(C) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(3)_NoClash(A_C_1_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)_NoClash(A_C_1_1)_c(ub)_: +24 NoClash(A_C_1_1)_indicator_var +1 t(A) -1 t(C) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(4)_NoClash(B_C_2_0)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)_NoClash(B_C_2_0)_c(ub)_: +25 NoClash(B_C_2_0)_indicator_var -1 t(B) +1 t(C) <= 19 -c_u__pyomo_gdp_bigm_relaxation_relaxedDisjuncts(5)_NoClash(B_C_2_1)_c(ub)_: +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)_NoClash(B_C_2_1)_c(ub)_: +20 NoClash(B_C_2_1)_indicator_var +1 t(B) -1 t(C) diff --git a/pyomo/gdp/tests/jobshop_small_hull.lp b/pyomo/gdp/tests/jobshop_small_hull.lp new file mode 100644 index 00000000000..74a2d6e83aa --- /dev/null +++ b/pyomo/gdp/tests/jobshop_small_hull.lp @@ -0,0 +1,203 @@ +\* Source Pyomo model name=unknown *\ + +min +makespan: ++1 ms + +s.t. + +c_u_Feas(A)_: +-1 ms ++1 t(A) +<= -8 + +c_u_Feas(B)_: +-1 ms ++1 t(B) +<= -5 + +c_u_Feas(C)_: +-1 ms ++1 t(C) +<= -6 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_B_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_C_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_C_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) ++1 t(C) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_B_3)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_C_1)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) ++1 t(A) += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_C_2)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(B) ++1 t(B) += 0 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_3)_: ++1 NoClash(A_B_3_0)_indicator_var ++1 NoClash(A_B_3_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_C_1)_: ++1 NoClash(A_C_1_0)_indicator_var ++1 NoClash(A_C_1_1)_indicator_var += 1 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_C_2)_: ++1 NoClash(B_C_2_0)_indicator_var ++1 NoClash(B_C_2_1)_indicator_var += 1 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B)_bounds(ub)_: +-19 NoClash(A_B_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A)_bounds(ub)_: +-19 NoClash(A_B_3_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_NoClash(A_B_3_0)_c(ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B)_bounds(ub)_: +-19 NoClash(A_B_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A)_bounds(ub)_: +-19 NoClash(A_B_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_NoClash(A_B_3_1)_c(ub)_: ++5 NoClash(A_B_3_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(C)_bounds(ub)_: +-19 NoClash(A_C_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A)_bounds(ub)_: +-19 NoClash(A_C_1_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_NoClash(A_C_1_0)_c(ub)_: ++2 NoClash(A_C_1_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(C)_bounds(ub)_: +-19 NoClash(A_C_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A)_bounds(ub)_: +-19 NoClash(A_C_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_NoClash(A_C_1_1)_c(ub)_: ++5 NoClash(A_C_1_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C)_bounds(ub)_: +-19 NoClash(B_C_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(B)_bounds(ub)_: +-19 NoClash(B_C_2_0)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_NoClash(B_C_2_0)_c(ub)_: ++6 NoClash(B_C_2_0)_indicator_var +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(B) ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C)_bounds(ub)_: +-19 NoClash(B_C_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(B)_bounds(ub)_: +-19 NoClash(B_C_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(B) +<= 0 + +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_NoClash(B_C_2_1)_c(ub)_: ++1 NoClash(B_C_2_1)_indicator_var ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) +<= 0 + +c_e_ONE_VAR_CONSTANT: +ONE_VAR_CONSTANT = 1.0 + +bounds + -inf <= ms <= +inf + 0 <= t(A) <= 19 + 0 <= t(B) <= 19 + 0 <= t(C) <= 19 + 0 <= NoClash(A_B_3_0)_indicator_var <= 1 + 0 <= NoClash(A_B_3_1)_indicator_var <= 1 + 0 <= NoClash(A_C_1_0)_indicator_var <= 1 + 0 <= NoClash(A_C_1_1)_indicator_var <= 1 + 0 <= NoClash(B_C_2_0)_indicator_var <= 1 + 0 <= NoClash(B_C_2_1)_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(B) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_t(A) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(B) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_t(A) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(C) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_t(A) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(C) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_t(A) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(C) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_t(B) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(C) <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_t(B) <= 19 +binary + NoClash(A_B_3_0)_indicator_var + NoClash(A_B_3_1)_indicator_var + NoClash(A_C_1_0)_indicator_var + NoClash(A_C_1_1)_indicator_var + NoClash(B_C_2_0)_indicator_var + NoClash(B_C_2_1)_indicator_var +end diff --git a/pyomo/gdp/tests/models.py b/pyomo/gdp/tests/models.py index be6e6b852d1..43b89a321a0 100644 --- a/pyomo/gdp/tests/models.py +++ b/pyomo/gdp/tests/models.py @@ -1,5 +1,5 @@ from pyomo.core import (Block, ConcreteModel, Constraint, Objective, Param, - Set, Var, inequality, RangeSet, Any) + Set, Var, inequality, RangeSet, Any, Expression) from pyomo.gdp import Disjunct, Disjunction @@ -46,7 +46,7 @@ def d_rule(disjunct, flag): def makeTwoTermDisj_IndexedConstraints(): """Single two-term disjunction with IndexedConstraints on both disjuncts. - Does not bound the variables, so cannot be transformed by chull at all and + Does not bound the variables, so cannot be transformed by hull at all and requires specifying m values in bigm. """ m = ConcreteModel() @@ -266,6 +266,17 @@ def disjunction(m): return m +def add_disj_not_on_block(m): + def simpdisj_rule(disjunct): + m = disjunct.model() + disjunct.c = Constraint(expr=m.a >= 3) + m.simpledisj = Disjunct(rule=simpdisj_rule) + def simpledisj2_rule(disjunct): + m = disjunct.model() + disjunct.c = Constraint(expr=m.a <= 3.5) + m.simpledisj2 = Disjunct(rule=simpledisj2_rule) + m.disjunction2 = Disjunction(expr=[m.simpledisj, m.simpledisj2]) + return m def makeDisjunctionsOnIndexedBlock(): """Two disjunctions (one indexed an one not), each on a separate @@ -343,7 +354,7 @@ def makeNestedDisjunctions(): (makeNestedDisjunctions_NestedDisjuncts is a much simpler model. All this adds is that it has a nested disjunction on a DisjunctData as well - as on a SimpleDisjunct. So mostly exists for historical reasons.) + as on a SimpleDisjunct. So mostly it exists for historical reasons.) """ m = ConcreteModel() m.x = Var(bounds=(-9, 9)) @@ -525,6 +536,19 @@ def makeDisjunctWithRangeSet(): m.disj = Disjunction(expr=[m.d1, m.d2]) return m +def makeDisjunctWithExpression(): + """Two-term SimpleDisjunction where one of the disjuncts contains an + Expression. This is used to make sure that we correctly handle types we + hit in disjunct.component_objects(active=True)""" + m = ConcreteModel() + m.x = Var(bounds=(0, 1)) + m.d1 = Disjunct() + m.d1.e = Expression(expr=m.x**2) + m.d1.c = Constraint(rule=lambda _: m.x == 1) + m.d2 = Disjunct() + m.disj = Disjunction(expr=[m.d1, m.d2]) + return m + def makeDisjunctionOfDisjunctDatas(): """Two SimpleDisjunctions, where each are disjunctions of DisjunctDatas. This adds nothing to makeTwoSimpleDisjunctions but exists for convenience diff --git a/pyomo/gdp/tests/test_bigm.py b/pyomo/gdp/tests/test_bigm.py index 4be5cac9ae4..883030fd529 100644 --- a/pyomo/gdp/tests/test_bigm.py +++ b/pyomo/gdp/tests/test_bigm.py @@ -12,39 +12,23 @@ from pyomo.environ import * from pyomo.gdp import * -from pyomo.core.base import constraint +from pyomo.core.base import constraint, _ConstraintData from pyomo.core.expr import current as EXPR from pyomo.repn import generate_standard_repn from pyomo.common.log import LoggingIntercept +import logging import pyomo.gdp.tests.models as models +import pyomo.gdp.tests.common_tests as ct import random import sys from six import iteritems, StringIO -def check_linear_coef(self, repn, var, coef): - var_id = None - for i,v in enumerate(repn.linear_vars): - if v is var: - var_id = i - self.assertIsNotNone(var_id) - self.assertEqual(repn.linear_coefs[var_id], coef) - - class CommonTests: def diff_apply_to_and_create_using(self, model): - modelcopy = TransformationFactory('gdp.bigm').create_using(model) - modelcopy_buf = StringIO() - modelcopy.pprint(ostream=modelcopy_buf) - modelcopy_output = modelcopy_buf.getvalue() - - TransformationFactory('gdp.bigm').apply_to(model) - model_buf = StringIO() - model.pprint(ostream=model_buf) - model_output = model_buf.getvalue() - self.assertMultiLineEqual(modelcopy_output, model_output) + ct.diff_apply_to_and_create_using(self, model, 'gdp.bigm') class TwoTermDisj(unittest.TestCase, CommonTests): def setUp(self): @@ -56,10 +40,10 @@ def test_new_block_created(self): TransformationFactory('gdp.bigm').apply_to(m) # we have a transformation block - transBlock = m.component("_pyomo_gdp_bigm_relaxation") + transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) - # check that we have the lbub set on the transformation block + # check that we have the lbub set on the transformation block lbub = transBlock.component("lbub") self.assertIsInstance(lbub, Set) self.assertEqual(len(lbub), 2) @@ -69,95 +53,37 @@ def test_new_block_created(self): self.assertIsInstance(disjBlock, Block) self.assertEqual(len(disjBlock), 2) # it has the disjuncts on it - self.assertIsInstance( - disjBlock[1].component("d[1].c1"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("d[1].c2"), - Constraint) - self.assertIsInstance( - disjBlock[0].component("d[0].c"), - Constraint) + self.assertIsInstance( disjBlock[1].component("d[1].c1"), Constraint) + self.assertIsInstance( disjBlock[1].component("d[1].c2"), Constraint) + self.assertIsInstance( disjBlock[0].component("d[0].c"), Constraint) def test_disjunction_deactivated(self): - m = models.makeTwoTermDisj() - TransformationFactory('gdp.bigm').apply_to(m, targets=(m,)) + ct.check_disjunction_deactivated(self, 'bigm') - oldblock = m.component("disjunction") - self.assertIsInstance(oldblock, Disjunction) - self.assertFalse(oldblock.active) - - def test_disjunctdatas_deactivated(self): - m = models.makeTwoTermDisj() - TransformationFactory('gdp.bigm').apply_to(m, targets=(m,)) - - oldblock = m.component("disjunction") - self.assertFalse(oldblock.disjuncts[0].active) - self.assertFalse(oldblock.disjuncts[1].active) + def test_disjunctDatas_deactivated(self): + ct.check_disjunctDatas_deactivated(self, 'bigm') def test_do_not_transform_twice_if_disjunction_reactivated(self): - m = models.makeTwoTermDisj() - # this is a hack, but just diff the pprint from this and from calling - # the transformation again. - TransformationFactory('gdp.bigm').apply_to(m) - first_buf = StringIO() - m.pprint(ostream=first_buf) - first_output = first_buf.getvalue() - - TransformationFactory('gdp.bigm').apply_to(m) - second_buf = StringIO() - m.pprint(ostream=second_buf) - second_output = second_buf.getvalue() - - self.assertMultiLineEqual(first_output, second_output) - - # this is a stupid thing to do, but we should still know not to - # retransform because active status is now *not* the source of truth. - m.disjunction.activate() - - # This is kind of the wrong error, but I'll live with it: at least we - # get an error. - self.assertRaisesRegexp( - GDP_Error, - "The disjunct d\[0\] has been transformed, but a disjunction " - "it appears in has not. Putting the same disjunct in " - "multiple disjunctions is not supported.", - TransformationFactory('gdp.bigm').apply_to, - m) + ct.check_do_not_transform_twice_if_disjunction_reactivated(self, 'bigm') def test_xor_constraint_mapping(self): - m = models.makeTwoTermDisj() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to(m) - - transBlock = m._pyomo_gdp_bigm_relaxation - self.assertIs( bigm.get_src_disjunction(transBlock.disjunction_xor), - m.disjunction) - self.assertIs( m.disjunction.algebraic_constraint(), - transBlock.disjunction_xor) + ct.check_xor_constraint_mapping(self, 'bigm') def test_xor_constraint_mapping_two_disjunctions(self): - m = models.makeDisjunctionOfDisjunctDatas() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to(m) - - transBlock = m._pyomo_gdp_bigm_relaxation - transBlock2 = m._pyomo_gdp_bigm_relaxation_4 - self.assertIs( bigm.get_src_disjunction(transBlock.disjunction_xor), - m.disjunction) - self.assertIs( bigm.get_src_disjunction(transBlock2.disjunction2_xor), - m.disjunction2) - - self.assertIs( m.disjunction.algebraic_constraint(), - transBlock.disjunction_xor) - self.assertIs( m.disjunction2.algebraic_constraint(), - transBlock2.disjunction2_xor) + ct.check_xor_constraint_mapping_two_disjunctions(self, 'bigm') + + def test_disjunct_mapping(self): + ct.check_disjunct_mapping(self, 'bigm') def test_disjunct_and_constraint_maps(self): + """Tests the actual data structures used to store the maps.""" + # ESJ: Note that despite outward appearances, this test really is unique + # to bigm. Because hull handles the a == 0 constraint by fixing the + # disaggregated variable rather than creating a transformed constraint. m = models.makeTwoTermDisj() bigm = TransformationFactory('gdp.bigm') bigm.apply_to(m) - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts + disjBlock = m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts oldblock = m.component("d") # we are counting on the fact that the disjuncts get relaxed in the @@ -165,7 +91,7 @@ def test_disjunct_and_constraint_maps(self): for i in [0,1]: self.assertIs(oldblock[i].transformation_block(), disjBlock[i]) self.assertIs(bigm.get_src_disjunct(disjBlock[i]), oldblock[i]) - + # check the constraint mappings constraintdict1 = disjBlock[0]._constraintMap self.assertIsInstance(constraintdict1, dict) @@ -174,7 +100,7 @@ def test_disjunct_and_constraint_maps(self): constraintdict2 = disjBlock[1]._constraintMap self.assertIsInstance(constraintdict2, dict) self.assertEqual(len(constraintdict2), 2) - + # original -> transformed transformedConstraints1 = constraintdict1['transformedConstraints'] self.assertIsInstance(transformedConstraints1, ComponentMap) @@ -183,82 +109,54 @@ def test_disjunct_and_constraint_maps(self): self.assertIsInstance(transformedConstraints2, ComponentMap) self.assertEqual(len(transformedConstraints2), 2) # check constraint dict has right mapping - self.assertIs(transformedConstraints2[oldblock[1].c1], - disjBlock[1].component(oldblock[1].c1.name)) - self.assertIs(transformedConstraints2[oldblock[1].c2], - disjBlock[1].component(oldblock[1].c2.name)) - self.assertIs(transformedConstraints1[oldblock[0].c], - disjBlock[0].component(oldblock[0].c.name)) + c1_list = transformedConstraints2[oldblock[1].c1] + self.assertEqual(len(c1_list), 2) + # this is an equality, so we have both lb and ub + self.assertIs(c1_list[0], + disjBlock[1].component(oldblock[1].c1.name)['lb']) + self.assertIs(c1_list[1], + disjBlock[1].component(oldblock[1].c1.name)['ub']) + c2_list = transformedConstraints2[oldblock[1].c2] + # just ub + self.assertEqual(len(c2_list), 1) + self.assertIs(c2_list[0], + disjBlock[1].component(oldblock[1].c2.name)['ub']) + c_list = transformedConstraints1[oldblock[0].c] + # just lb + self.assertEqual(len(c_list), 1) + self.assertIs(c_list[0], + disjBlock[0].component(oldblock[0].c.name)['lb']) # transformed -> original srcdict1 = constraintdict1['srcConstraints'] self.assertIsInstance(srcdict1, ComponentMap) - self.assertEqual(len(srcdict1), 1) + self.assertEqual(len(srcdict1), 2) + self.assertIs(srcdict1[disjBlock[0].component(oldblock[0].c.name)], + oldblock[0].c) + self.assertIs(srcdict1[disjBlock[0].component(oldblock[0].c.name)['lb']], + oldblock[0].c) srcdict2 = constraintdict2['srcConstraints'] self.assertIsInstance(srcdict2, ComponentMap) - self.assertEqual(len(srcdict2), 2) + self.assertEqual(len(srcdict2), 5) self.assertIs(srcdict2[disjBlock[1].component("d[1].c1")], oldblock[1].c1) + self.assertIs(srcdict2[disjBlock[1].component("d[1].c1")['lb']], + oldblock[1].c1) + self.assertIs(srcdict2[disjBlock[1].component("d[1].c1")['ub']], + oldblock[1].c1) self.assertIs(srcdict2[disjBlock[1].component("d[1].c2")], oldblock[1].c2) - self.assertIs(srcdict1[disjBlock[0].component("d[0].c")], - oldblock[0].c) + self.assertIs(srcdict2[disjBlock[1].component("d[1].c2")['ub']], + oldblock[1].c2) def test_new_block_nameCollision(self): - # make sure that if the model already has a block called - # _pyomo_gdp_bigm_relaxation that we come up with a different name for - # the transformation block (and put the relaxed disjuncts on it) - m = models.makeTwoTermDisj() - m._pyomo_gdp_bigm_relaxation = Block(Any) - TransformationFactory('gdp.bigm').apply_to(m) - gdpblock = m.component("_pyomo_gdp_bigm_relaxation_4") - self.assertIsInstance(gdpblock, Block) - - disjBlock = gdpblock.relaxedDisjuncts - self.assertIsInstance(disjBlock, Block) - # both disjuncts on transformation block - self.assertEqual(len(disjBlock), 2) - # nothing got added to the block we collided with that's not ours - self.assertEqual(len(m._pyomo_gdp_bigm_relaxation), 0) - - # disjBlock has the disjuncts on it - self.assertIsInstance( - disjBlock[0].component("d[0].c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("d[1].c1"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("d[1].c2"), - Constraint) + ct.check_transformation_block_name_collision(self, 'bigm') def test_indicator_vars(self): - m = models.makeTwoTermDisj() - TransformationFactory('gdp.bigm').apply_to(m) - oldblock = m.component("d") - # have indicator variables on original disjuncts and they are still - # active. - self.assertIsInstance(oldblock[0].indicator_var, Var) - self.assertTrue(oldblock[0].indicator_var.active) - self.assertTrue(oldblock[0].indicator_var.is_binary()) - self.assertIsInstance(oldblock[1].indicator_var, Var) - self.assertTrue(oldblock[1].indicator_var.active) - self.assertTrue(oldblock[1].indicator_var.is_binary()) + ct.check_indicator_vars(self, 'bigm') def test_xor_constraints(self): - m = models.makeTwoTermDisj() - TransformationFactory('gdp.bigm').apply_to(m) - # make sure we created the xor constraint and put it on the relaxation - # block - xor = m._pyomo_gdp_bigm_relaxation.component("disjunction_xor") - self.assertIsInstance(xor, Constraint) - self.assertIs(m.d[0].indicator_var, xor.body.arg(0)) - self.assertIs(m.d[1].indicator_var, xor.body.arg(1)) - repn = generate_standard_repn(xor.body) - check_linear_coef(self, repn, m.d[0].indicator_var, 1) - check_linear_coef(self, repn, m.d[1].indicator_var, 1) - self.assertEqual(xor.lower, 1) - self.assertEqual(xor.upper, 1) + ct.check_xor_constraint(self, 'bigm') def test_or_constraints(self): m = models.makeTwoTermDisj() @@ -266,32 +164,18 @@ def test_or_constraints(self): TransformationFactory('gdp.bigm').apply_to(m) # check or constraint is an or (upper bound is None) - orcons = m._pyomo_gdp_bigm_relaxation.component("disjunction_xor") + orcons = m._pyomo_gdp_bigm_reformulation.component("disjunction_xor") self.assertIsInstance(orcons, Constraint) self.assertIs(m.d[0].indicator_var, orcons.body.arg(0)) self.assertIs(m.d[1].indicator_var, orcons.body.arg(1)) repn = generate_standard_repn(orcons.body) - check_linear_coef(self, repn, m.d[0].indicator_var, 1) - check_linear_coef(self, repn, m.d[1].indicator_var, 1) + ct.check_linear_coef(self, repn, m.d[0].indicator_var, 1) + ct.check_linear_coef(self, repn, m.d[1].indicator_var, 1) self.assertEqual(orcons.lower, 1) self.assertIsNone(orcons.upper) def test_deactivated_constraints(self): - m = models.makeTwoTermDisj() - TransformationFactory('gdp.bigm').apply_to(m) - oldblock = m.component("d") - # old constraints still there, deactivated - oldc1 = oldblock[1].component("c1") - self.assertIsInstance(oldc1, Constraint) - self.assertFalse(oldc1.active) - - oldc2 = oldblock[1].component("c2") - self.assertIsInstance(oldc2, Constraint) - self.assertFalse(oldc2.active) - - oldc = oldblock[0].component("c") - self.assertIsInstance(oldc, Constraint) - self.assertFalse(oldc.active) + ct.check_deactivated_constraints(self, 'bigm') def test_transformed_constraints(self): m = models.makeTwoTermDisj() @@ -299,38 +183,20 @@ def test_transformed_constraints(self): self.checkMs(m, -3, 2, 7, 2) def test_do_not_transform_userDeactivated_disjuncts(self): - m = models.makeTwoTermDisj() - m.d[0].deactivate() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to(m, targets=(m,)) + ct.check_user_deactivated_disjuncts(self, 'bigm') - self.assertFalse(m.disjunction.active) - self.assertFalse(m.d[1].active) - - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertIs(disjBlock[0], m.d[1].transformation_block()) - self.assertIs(bigm.get_src_disjunct(disjBlock[0]), m.d[1]) + def test_improperly_deactivated_disjuncts(self): + ct.check_improperly_deactivated_disjuncts(self, 'bigm') def test_do_not_transform_userDeactivated_IndexedDisjunction(self): - m = models.makeTwoTermIndexedDisjunction() - # If you truly want to transform nothing, deactivate everything - m.disjunction.deactivate() - for idx in m.disjunct: - m.disjunct[idx].deactivate() - TransformationFactory('gdp.bigm').apply_to(m) - - # no transformation block, nothing transformed - self.assertIsNone(m.component("_pyomo_gdp_bigm_transformation")) - for idx in m.disjunct: - self.assertIsNone(m.disjunct[idx].transformation_block) - for idx in m.disjunction: - self.assertIsNone(m.disjunction[idx].algebraic_constraint) + ct.check_do_not_transform_userDeactivated_indexedDisjunction(self, + 'bigm') # helper method to check the M values in all of the transformed # constraints (m, M) is the tuple for M. This also relies on the # disjuncts being transformed in the same order every time. def checkMs(self, model, cons1lb, cons2lb, cons2ub, cons3ub): - disjBlock = model._pyomo_gdp_bigm_relaxation.relaxedDisjuncts + disjBlock = model._pyomo_gdp_bigm_reformulation.relaxedDisjuncts # first constraint c = disjBlock[0].component("d[0].c") @@ -339,8 +205,8 @@ def checkMs(self, model, cons1lb, cons2lb, cons2ub, cons3ub): repn = generate_standard_repn(c['lb'].body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, model.a, 1) - check_linear_coef(self, repn, model.d[0].indicator_var, cons1lb) + ct.check_linear_coef(self, repn, model.a, 1) + ct.check_linear_coef(self, repn, model.d[0].indicator_var, cons1lb) self.assertEqual(repn.constant, -cons1lb) self.assertEqual(c['lb'].lower, model.d[0].c.lower) self.assertIsNone(c['lb'].upper) @@ -352,8 +218,8 @@ def checkMs(self, model, cons1lb, cons2lb, cons2ub, cons3ub): repn = generate_standard_repn(c['lb'].body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, model.a, 1) - check_linear_coef(self, repn, model.d[1].indicator_var, cons2lb) + ct.check_linear_coef(self, repn, model.a, 1) + ct.check_linear_coef(self, repn, model.d[1].indicator_var, cons2lb) self.assertEqual(repn.constant, -cons2lb) self.assertEqual(c['lb'].lower, model.d[1].c1.lower) self.assertIsNone(c['lb'].upper) @@ -361,8 +227,8 @@ def checkMs(self, model, cons1lb, cons2lb, cons2ub, cons3ub): repn = generate_standard_repn(c['ub'].body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, model.a, 1) - check_linear_coef(self, repn, model.d[1].indicator_var, cons2ub) + ct.check_linear_coef(self, repn, model.a, 1) + ct.check_linear_coef(self, repn, model.d[1].indicator_var, cons2ub) self.assertEqual(repn.constant, -cons2ub) self.assertIsNone(c['ub'].lower) self.assertEqual(c['ub'].upper, model.d[1].c1.upper) @@ -374,8 +240,8 @@ def checkMs(self, model, cons1lb, cons2lb, cons2ub, cons3ub): repn = generate_standard_repn(c['ub'].body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, model.x, 1) - check_linear_coef(self, repn, model.d[1].indicator_var, cons3ub) + ct.check_linear_coef(self, repn, model.x, 1) + ct.check_linear_coef(self, repn, model.d[1].indicator_var, cons3ub) self.assertEqual(repn.constant, -cons3ub) self.assertIsNone(c['ub'].lower) self.assertEqual(c['ub'].upper, model.d[1].c2.upper) @@ -577,7 +443,7 @@ def d_rule(d,j): m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m._pyomo_gdp_bigm_relaxation + transBlock = m._pyomo_gdp_bigm_reformulation # 2 blocks: the original Disjunct and the transformation block self.assertEqual( @@ -610,7 +476,7 @@ def d_rule(d,j): m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m._pyomo_gdp_bigm_relaxation + transBlock = m._pyomo_gdp_bigm_reformulation # 2 blocks: the original Disjunct and the transformation block self.assertEqual( @@ -628,7 +494,7 @@ def d_rule(d,j): len(list(relaxed.component_objects(Constraint))), 1) self.assertEqual( len(list(relaxed.component_data_objects(Constraint))), i) - self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) + self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) def test_local_var(self): m = models.localVar() @@ -637,21 +503,22 @@ def test_local_var(self): # we just need to make sure that constraint was transformed correctly, # which just means that the M values were correct. - transformedC = bigm.get_transformed_constraint(m.disj2.cons) - lb = transformedC['lb'] - ub = transformedC['ub'] + transformedC = bigm.get_transformed_constraints(m.disj2.cons) + self.assertEqual(len(transformedC), 2) + lb = transformedC[0] + ub = transformedC[1] repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) - check_linear_coef(self, repn, m.disj2.indicator_var, -2) + ct.check_linear_coef(self, repn, m.disj2.indicator_var, -2) repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) - check_linear_coef(self, repn, m.disj2.indicator_var, 3) + ct.check_linear_coef(self, repn, m.disj2.indicator_var, 3) class TwoTermDisjNonlinear(unittest.TestCase, CommonTests): def test_nonlinear_bigM(self): m = models.makeTwoTermDisj_Nonlinear() TransformationFactory('gdp.bigm').apply_to(m) - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts + disjBlock = m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts # first constraint c = disjBlock[0].component("d[0].c") @@ -660,8 +527,8 @@ def test_nonlinear_bigM(self): repn = generate_standard_repn(c['ub'].body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, m.x, 1) - check_linear_coef(self, repn, m.d[0].indicator_var, 94) + ct.check_linear_coef(self, repn, m.x, 1) + ct.check_linear_coef(self, repn, m.d[0].indicator_var, 94) self.assertEqual(repn.constant, -94) self.assertEqual(c['ub'].upper, m.d[0].c.upper) self.assertIsNone(c['ub'].lower) @@ -673,7 +540,7 @@ def test_nonlinear_bigM_missing_var_bounds(self): GDP_Error, "Cannot estimate M for unbounded nonlinear " "expressions.\n\t\(found while processing " - "constraint d\[0\].c\)", + "constraint 'd\[0\].c'\)", TransformationFactory('gdp.bigm').apply_to, m) @@ -686,7 +553,7 @@ def test_nonlinear_disjoint(self): [(x - 3)**2 + (y - 3)**2 <= 1] ]) TransformationFactory('gdp.bigm').apply_to(m) - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts + disjBlock = m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts # first disjunct, first constraint c = disjBlock[0].component("disj_disjuncts[0].constraint") @@ -694,8 +561,7 @@ def test_nonlinear_disjoint(self): repn = generate_standard_repn(c[1, 'ub'].body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 1) - # check_linear_coef(self, repn, m.x, 1) - check_linear_coef(self, repn, m.disj_disjuncts[0].indicator_var, 114) + ct.check_linear_coef(self, repn, m.disj_disjuncts[0].indicator_var, 114) self.assertEqual(repn.constant, -114) self.assertEqual(c[1, 'ub'].upper, m.disj_disjuncts[0].constraint[1].upper) @@ -704,8 +570,8 @@ def test_nonlinear_disjoint(self): repn = generate_standard_repn(c[2, 'lb'].body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 1) - # check_linear_coef(self, repn, m.x, 1) - check_linear_coef(self, repn, m.disj_disjuncts[0].indicator_var, -104.5) + ct.check_linear_coef(self, repn, m.disj_disjuncts[0].indicator_var, + -104.5) self.assertEqual(repn.constant, 104.5) self.assertEqual(c[2, 'lb'].lower, m.disj_disjuncts[0].constraint[2].lower) @@ -716,9 +582,9 @@ def test_nonlinear_disjoint(self): repn = generate_standard_repn(c[1, 'ub'].body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 3) - check_linear_coef(self, repn, m.x, -6) - check_linear_coef(self, repn, m.y, -6) - check_linear_coef(self, repn, m.disj_disjuncts[1].indicator_var, 217) + ct.check_linear_coef(self, repn, m.x, -6) + ct.check_linear_coef(self, repn, m.y, -6) + ct.check_linear_coef(self, repn, m.disj_disjuncts[1].indicator_var, 217) self.assertEqual(repn.constant, -199) self.assertEqual(c[1, 'ub'].upper, m.disj_disjuncts[1].constraint[1].upper) @@ -745,34 +611,15 @@ def setUp(self): ] def test_xor_constraints(self): - m = models.makeTwoTermMultiIndexedDisjunction() - TransformationFactory('gdp.bigm').apply_to(m) - - xor = m._pyomo_gdp_bigm_relaxation.component("disjunction_xor") - self.assertIsInstance(xor, Constraint) - for i in m.disjunction.index_set(): - repn = generate_standard_repn(xor[i].body) - self.assertEqual(repn.constant, 0) - self.assertTrue(repn.is_linear()) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef( - self, repn, m.disjunction[i].disjuncts[0].indicator_var, 1) - check_linear_coef( - self, repn, m.disjunction[i].disjuncts[1].indicator_var, 1) - self.assertEqual(xor[i].lower, 1) - self.assertEqual(xor[i].upper, 1) + ct.check_indexed_xor_constraints(self, 'bigm') def test_deactivated_constraints(self): - m = models.makeTwoTermMultiIndexedDisjunction() - TransformationFactory('gdp.bigm').apply_to(m) - - for i in m.disjunct.index_set(): - self.assertFalse(m.disjunct[i].c.active) + ct.check_constraints_deactivated_indexedDisjunction(self, 'bigm') def test_transformed_block_structure(self): m = models.makeTwoTermMultiIndexedDisjunction() TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m.component("_pyomo_gdp_bigm_relaxation") + transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) # check that we have the lbub set on the transformation block @@ -797,7 +644,7 @@ def test_disjunct_and_constraint_maps(self): bigm = TransformationFactory('gdp.bigm') bigm.apply_to(m) - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts + disjBlock = m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts oldblock = m.component("disjunct") # this test relies on the fact that the disjuncts are going to be @@ -811,46 +658,52 @@ def test_disjunct_and_constraint_maps(self): self.assertIs(transformedDisjunct, srcDisjunct.transformation_block()) - self.assertIs(bigm.get_transformed_constraint(srcDisjunct.c), - disjBlock[dest].component(srcDisjunct.c.name)) - + transformed = bigm.get_transformed_constraints(srcDisjunct.c) + if src[0]: + # equality + self.assertEqual(len(transformed), 2) + self.assertIsInstance(transformed[0], _ConstraintData) + self.assertIsInstance(transformed[1], _ConstraintData) + self.assertIs( + transformed[0], + disjBlock[dest].component(srcDisjunct.c.name)['lb']) + self.assertIs( + transformed[1], + disjBlock[dest].component(srcDisjunct.c.name)['ub']) + # check reverse maps from the _ConstraintDatas + self.assertIs(bigm.get_src_constraint( + disjBlock[dest].component(srcDisjunct.c.name)['lb']), + srcDisjunct.c) + self.assertIs(bigm.get_src_constraint( + disjBlock[dest].component(srcDisjunct.c.name)['ub']), + srcDisjunct.c) + else: + # >= + self.assertEqual(len(transformed), 1) + self.assertIsInstance(transformed[0], _ConstraintData) + self.assertIs( + transformed[0], + disjBlock[dest].component(srcDisjunct.c.name)['lb']) + self.assertIs(bigm.get_src_constraint( + disjBlock[dest].component(srcDisjunct.c.name)['lb']), + srcDisjunct.c) + # check reverse map from the container self.assertIs(bigm.get_src_constraint( disjBlock[dest].component(srcDisjunct.c.name)), srcDisjunct.c) def test_deactivated_disjuncts(self): - m = models.makeTwoTermMultiIndexedDisjunction() - TransformationFactory('gdp.bigm').apply_to(m, targets=(m,)) - # all the disjuncts got transformed, so all should be deactivated - for i in m.disjunct.index_set(): - self.assertFalse(m.disjunct[i].active) - self.assertFalse(m.disjunct.active) + ct.check_deactivated_disjuncts(self, 'bigm') def test_deactivated_disjunction(self): - m = models.makeTwoTermMultiIndexedDisjunction() - TransformationFactory('gdp.bigm').apply_to(m, targets=(m,)) - - # all the disjunctions got transformed, so they should be - # deactivated too - for i in m.disjunction.index_set(): - self.assertFalse(m.disjunction[i].active) - self.assertFalse(m.disjunction.active) + ct.check_deactivated_disjunctions(self, 'bigm') def test_create_using(self): m = models.makeTwoTermMultiIndexedDisjunction() self.diff_apply_to_and_create_using(m) def test_targets_with_container_as_arg(self): - m = models.makeTwoTermIndexedDisjunction() - TransformationFactory('gdp.bigm').apply_to(m.disjunction, - targets=(m.disjunction[2])) - transBlock = m._pyomo_gdp_bigm_relaxation - self.assertIsNone(m.disjunction[1].algebraic_constraint) - self.assertIsNone(m.disjunction[3].algebraic_constraint) - self.assertIs(m.disjunction[2].algebraic_constraint(), - transBlock.disjunction_xor[2]) - self.assertIs(m.disjunction._algebraic_constraint(), - transBlock.disjunction_xor) + ct.check_targets_with_container_as_arg(self, 'bigm') class DisjOnBlock(unittest.TestCase, CommonTests): # when the disjunction is on a block, we want all of the stuff created by @@ -858,87 +711,65 @@ class DisjOnBlock(unittest.TestCase, CommonTests): # maintains its meaning def test_xor_constraint_added(self): - m = models.makeTwoTermDisjOnBlock() - TransformationFactory('gdp.bigm').apply_to(m) - - self.assertIsInstance( - m.b._pyomo_gdp_bigm_relaxation.component('b.disjunction_xor'), - Constraint) + ct.check_xor_constraint_added(self, 'bigm') def test_trans_block_created(self): - m = models.makeTwoTermDisjOnBlock() - TransformationFactory('gdp.bigm').apply_to(m) - - # test that the transformation block go created on the model - transBlock = m.b.component('_pyomo_gdp_bigm_relaxation') - self.assertIsInstance(transBlock, Block) - disjBlock = transBlock.component("relaxedDisjuncts") - self.assertIsInstance(disjBlock, Block) - self.assertEqual(len(disjBlock), 2) - # and that it didn't get created on the model - self.assertIsNone(m.component('_pyomo_gdp_bigm_relaxation')) - - def add_disj_not_on_block(self, m): - def simpdisj_rule(disjunct): - m = disjunct.model() - disjunct.c = Constraint(expr=m.a >= 3) - m.simpledisj = Disjunct(rule=simpdisj_rule) - def simpledisj2_rule(disjunct): - m = disjunct.model() - disjunct.c = Constraint(expr=m.a <= 3.5) - m.simpledisj2 = Disjunct(rule=simpledisj2_rule) - m.disjunction2 = Disjunction(expr=[m.simpledisj, m.simpledisj2]) - return m + ct.check_trans_block_created(self, 'bigm') def checkFirstDisjMs(self, model, disj1c1lb, disj1c1ub, disj1c2): bigm = TransformationFactory('gdp.bigm') - c1 = bigm.get_transformed_constraint(model.b.disjunct[0].c) + c1 = bigm.get_transformed_constraints(model.b.disjunct[0].c) self.assertEqual(len(c1), 2) - repn = generate_standard_repn(c1['lb'].body) + lb = c1[0] + ub = c1[1] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1lb) - check_linear_coef( + ct.check_linear_coef( self, repn, model.b.disjunct[0].indicator_var, disj1c1lb) - repn = generate_standard_repn(c1['ub'].body) + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1ub) - check_linear_coef( + ct.check_linear_coef( self, repn, model.b.disjunct[0].indicator_var, disj1c1ub) - c2 = bigm.get_transformed_constraint(model.b.disjunct[1].c) + c2 = bigm.get_transformed_constraints(model.b.disjunct[1].c) self.assertEqual(len(c2), 1) - repn = generate_standard_repn(c2['ub'].body) + ub = c2[0] + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c2) - check_linear_coef( + ct.check_linear_coef( self, repn, model.b.disjunct[1].indicator_var, disj1c2) def checkMs(self, model, disj1c1lb, disj1c1ub, disj1c2, disj2c1, disj2c2): bigm = TransformationFactory('gdp.bigm') self.checkFirstDisjMs(model, disj1c1lb, disj1c1ub, disj1c2) - c = bigm.get_transformed_constraint(model.simpledisj.c) + c = bigm.get_transformed_constraints(model.simpledisj.c) self.assertEqual(len(c), 1) - repn = generate_standard_repn(c['lb'].body) + lb = c[0] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c1) - check_linear_coef( + ct.check_linear_coef( self, repn, model.simpledisj.indicator_var, disj2c1) - c = bigm.get_transformed_constraint(model.simpledisj2.c) + c = bigm.get_transformed_constraints(model.simpledisj2.c) self.assertEqual(len(c), 1) - repn = generate_standard_repn(c['ub'].body) + ub = c[0] + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c2) - check_linear_coef( + ct.check_linear_coef( self, repn, model.simpledisj2.indicator_var, disj2c2) def test_suffix_M_onBlock(self): m = models.makeTwoTermDisjOnBlock() # adding something that's not on the block so that I know that only # the stuff on the block was changed - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) m.b.BigM = Suffix(direction=Suffix.LOCAL) m.b.BigM[None] = 34 bigm = TransformationFactory('gdp.bigm') @@ -946,7 +777,7 @@ def test_suffix_M_onBlock(self): # check m values self.checkMs(m, -34, 34, 34, -3, 1.5) - + # check the source of the values (src, key) = bigm.get_m_value_src(m.simpledisj.c) self.assertEqual(src, -3) @@ -963,7 +794,7 @@ def test_suffix_M_onBlock(self): def test_block_M_arg(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) bigms = {m.b: 100, m.b.disjunct[1].c: 13} bigm = TransformationFactory('gdp.bigm') bigm.apply_to(m, bigM=bigms) @@ -985,7 +816,7 @@ def test_block_M_arg(self): def test_disjunct_M_arg(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) bigm = TransformationFactory('gdp.bigm') bigms = {m.b: 100, m.b.disjunct[1]: 13} bigm.apply_to(m, bigM=bigms) @@ -1007,7 +838,7 @@ def test_disjunct_M_arg(self): def test_block_M_arg_with_default(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) bigm = TransformationFactory('gdp.bigm') bigms = {m.b: 100, m.b.disjunct[1].c: 13, None: 34} bigm.apply_to(m, bigM=bigms) @@ -1029,11 +860,11 @@ def test_block_M_arg_with_default(self): def test_model_M_arg(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) out = StringIO() with LoggingIntercept(out, 'pyomo.gdp.bigm'): TransformationFactory('gdp.bigm').apply_to( - m, + m, bigM={m: 100, m.b.disjunct[1].c: 13}) self.checkMs(m, -100, 100, 13, -100, 100) @@ -1042,11 +873,11 @@ def test_model_M_arg(self): def test_model_M_arg_overrides_None(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) out = StringIO() with LoggingIntercept(out, 'pyomo.gdp.bigm'): TransformationFactory('gdp.bigm').apply_to( - m, + m, bigM={m: 100, m.b.disjunct[1].c: 13, None: 34}) @@ -1058,7 +889,7 @@ def test_model_M_arg_overrides_None(self): def test_warning_for_crazy_bigm_args(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) out = StringIO() bigM = ComponentMap({m: 100, m.b.disjunct[1].c: 13}) # this is silly @@ -1073,7 +904,7 @@ def test_warning_for_crazy_bigm_args(self): def test_use_above_scope_m_value(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) bigM = ComponentMap({m: 100, m.b.disjunct[1].c: 13}) out = StringIO() # transform just the block. We expect to use the M value specified on @@ -1085,7 +916,7 @@ def test_use_above_scope_m_value(self): def test_unused_arguments_transform_block(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) m.BigM = Suffix(direction=Suffix.LOCAL) m.BigM[None] = 1e6 @@ -1094,12 +925,12 @@ def test_unused_arguments_transform_block(self): out = StringIO() with LoggingIntercept(out, 'pyomo.gdp.bigm'): - TransformationFactory('gdp.bigm').apply_to( - m.b, - bigM={m: 100, + TransformationFactory('gdp.bigm').apply_to( + m.b, + bigM={m: 100, m.b: 13, m.simpledisj2.c: 10}) - + self.checkFirstDisjMs(m, -13, 13, 13) # The order these get printed depends on a dictionary order, so test @@ -1113,7 +944,7 @@ def test_unused_arguments_transform_block(self): def test_suffix_M_simple_disj(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) m.simpledisj.BigM = Suffix(direction=Suffix.LOCAL) m.simpledisj.BigM[None] = 45 m.BigM = Suffix(direction=Suffix.LOCAL) @@ -1158,7 +989,7 @@ def test_suffix_M_constraintKeyOnModel(self): def test_suffix_M_constraintKeyOnSimpleDisj(self): m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) + m = models.add_disj_not_on_block(m) m.simpledisj.BigM = Suffix(direction=Suffix.LOCAL) m.simpledisj.BigM[None] = 45 m.simpledisj.BigM[m.simpledisj.c] = 87 @@ -1184,46 +1015,14 @@ def test_suffix_M_constraintKeyOnSimpleDisj(self): self.assertIsNone(key) def test_block_targets_inactive(self): - m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.b]) - - self.assertFalse(m.b.disjunct[0].active) - self.assertFalse(m.b.disjunct[1].active) - self.assertFalse(m.b.disjunct.active) - self.assertTrue(m.simpledisj.active) - self.assertTrue(m.simpledisj2.active) + ct.check_block_targets_inactive(self, 'bigm') def test_block_only_targets_transformed(self): - m = models.makeTwoTermDisjOnBlock() - m = self.add_disj_not_on_block(m) - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to( - m, - targets=[m.b]) - - disjBlock = m.b._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("b.disjunct[0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("b.disjunct[1].c"), - Constraint) - - # this relies on the disjuncts being transformed in the same order every - # time - pairs = [ - (0,0), - (1,1), - ] - for i, j in pairs: - self.assertIs(m.b.disjunct[i].transformation_block(), disjBlock[j]) - self.assertIs(bigm.get_src_disjunct(disjBlock[j]), m.b.disjunct[i]) + ct.check_block_only_targets_transformed(self, 'bigm') def test_create_using(self): m = models.makeTwoTermDisjOnBlock() - self.diff_apply_to_and_create_using(m) + ct.diff_apply_to_and_create_using(self, m, 'gdp.bigm') class SimpleDisjIndexedConstraints(unittest.TestCase, CommonTests): @@ -1232,6 +1031,8 @@ def setUp(self): random.seed(666) def test_do_not_transform_deactivated_constraintDatas(self): + # ESJ: specific to how bigM transforms constraints (so not a common test + # with hull) m = models.makeTwoTermDisj_IndexedConstraints() m.BigM = Suffix(direction=Suffix.LOCAL) m.BigM[None] = 30 @@ -1239,56 +1040,73 @@ def test_do_not_transform_deactivated_constraintDatas(self): bigm = TransformationFactory('gdp.bigm') bigm.apply_to(m) - indexedCons = bigm.get_transformed_constraint(m.b.simpledisj1.c) - self.assertEqual(len(indexedCons), 2) - self.assertIsInstance(indexedCons[2, 'lb'], - constraint._GeneralConstraintData) - self.assertIsInstance(indexedCons[2, 'ub'], - constraint._GeneralConstraintData) - - self.assertRaisesRegexp( - GDP_Error, - "Constraint b.simpledisj1.c\[1\] has not been transformed.", - bigm.get_transformed_constraint, - m.b.simpledisj1.c[1]) + # the real test: This wasn't transformed + log = StringIO() + with LoggingIntercept(log, 'pyomo.gdp', logging.ERROR): + self.assertRaisesRegexp( + KeyError, + ".*b.simpledisj1.c\[1\]", + bigm.get_transformed_constraints, + m.b.simpledisj1.c[1]) + self.assertRegexpMatches(log.getvalue(), + ".*Constraint 'b.simpledisj1.c\[1\]' " + "has not been transformed.") + + # and the rest of the container was transformed + cons_list = bigm.get_transformed_constraints(m.b.simpledisj1.c[2]) + self.assertEqual(len(cons_list), 2) + lb = cons_list[0] + ub = cons_list[1] + self.assertIsInstance(lb, constraint._GeneralConstraintData) + self.assertIsInstance(ub, constraint._GeneralConstraintData) def checkMs(self, m, disj1c1lb, disj1c1ub, disj1c2lb, disj1c2ub, disj2c1ub, disj2c2ub): bigm = TransformationFactory('gdp.bigm') - c = bigm.get_transformed_constraint(m.b.simpledisj1.c) - self.assertEqual(len(c), 4) - repn = generate_standard_repn(c[1, 'lb'].body) + c = bigm.get_transformed_constraints(m.b.simpledisj1.c[1]) + self.assertEqual(len(c), 2) + lb = c[0] + ub = c[1] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1lb) - check_linear_coef( + ct.check_linear_coef( self, repn, m.b.simpledisj1.indicator_var, disj1c1lb) - repn = generate_standard_repn(c[1, 'ub'].body) + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1ub) - check_linear_coef( + ct.check_linear_coef( self, repn, m.b.simpledisj1.indicator_var, disj1c1ub) - repn = generate_standard_repn(c[2, 'lb'].body) + c = bigm.get_transformed_constraints(m.b.simpledisj1.c[2]) + self.assertEqual(len(c), 2) + lb = c[0] + ub = c[1] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c2lb) - check_linear_coef( + ct.check_linear_coef( self, repn, m.b.simpledisj1.indicator_var, disj1c2lb) - repn = generate_standard_repn(c[2, 'ub'].body) + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c2ub) - check_linear_coef( + ct.check_linear_coef( self, repn, m.b.simpledisj1.indicator_var, disj1c2ub) - c = bigm.get_transformed_constraint(m.b.simpledisj2.c) - self.assertEqual(len(c), 2) - repn = generate_standard_repn(c[1, 'ub'].body) + c = bigm.get_transformed_constraints(m.b.simpledisj2.c[1]) + self.assertEqual(len(c), 1) + ub = c[0] + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c1ub) - check_linear_coef( + ct.check_linear_coef( self, repn, m.b.simpledisj2.indicator_var, disj2c1ub) - repn = generate_standard_repn(c[2, 'ub'].body) + c = bigm.get_transformed_constraints(m.b.simpledisj2.c[2]) + self.assertEqual(len(c), 1) + ub = c[0] + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c2ub) - check_linear_coef( + ct.check_linear_coef( self, repn, m.b.simpledisj2.indicator_var, disj2c2ub) def test_suffix_M_constraintData_on_block(self): @@ -1334,8 +1152,8 @@ def test_unbounded_var_m_estimation_err(self): self.assertRaisesRegexp( GDP_Error, "Cannot estimate M for expressions with unbounded variables." - "\n\t\(found unbounded var a\[1\] while processing constraint " - "b.simpledisj1.c\)", + "\n\t\(found unbounded var 'a\[1\]' while processing constraint " + "'b.simpledisj1.c'\)", TransformationFactory('gdp.bigm').apply_to, m) @@ -1352,30 +1170,7 @@ def setUp(self): random.seed(666) def test_xor_constraint(self): - # check that the xor constraint has all the indicator variables... - m = models.makeThreeTermIndexedDisj() - TransformationFactory('gdp.bigm').apply_to(m) - - xor = m._pyomo_gdp_bigm_relaxation.component("disjunction_xor") - self.assertIsInstance(xor, Constraint) - self.assertEqual(xor[1].lower, 1) - self.assertEqual(xor[1].upper, 1) - self.assertEqual(xor[2].lower, 1) - self.assertEqual(xor[2].upper, 1) - - repn = generate_standard_repn(xor[1].body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 3) - for i in range(3): - check_linear_coef(self, repn, m.disjunct[i,1].indicator_var, 1) - - repn = generate_standard_repn(xor[2].body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 3) - for i in range(3): - check_linear_coef(self, repn, m.disjunct[i,2].indicator_var, 1) + ct.check_three_term_xor_constraint(self, 'bigm') def test_create_using(self): m = models.makeThreeTermIndexedDisj() @@ -1394,7 +1189,7 @@ def test_transformed_constraints_on_block(self): m = models.makeTwoTermDisj_IndexedConstraints_BoundedVars() TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m.component("_pyomo_gdp_bigm_relaxation") + transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) disjBlock = transBlock.component("relaxedDisjuncts") self.assertIsInstance(disjBlock, Block) @@ -1416,41 +1211,51 @@ def test_transformed_constraints_on_block(self): def checkMs(self, model, c11lb, c12lb, c21lb, c21ub, c22lb, c22ub): bigm = TransformationFactory('gdp.bigm') - c = bigm.get_transformed_constraint(model.disjunct[0].c) - self.assertEqual(len(c), 2) - repn = generate_standard_repn(c[1, 'lb'].body) + c = bigm.get_transformed_constraints(model.disjunct[0].c[1]) + self.assertEqual(len(c), 1) + lb = c[0] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.constant, -c11lb) - check_linear_coef(self, repn, model.disjunct[0].indicator_var, c11lb) - repn = generate_standard_repn(c[2, 'lb'].body) + ct.check_linear_coef(self, repn, model.disjunct[0].indicator_var, c11lb) + c = bigm.get_transformed_constraints(model.disjunct[0].c[2]) + self.assertEqual(len(c), 1) + lb = c[0] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.constant, -c12lb) - check_linear_coef(self, repn, model.disjunct[0].indicator_var, c12lb) + ct.check_linear_coef(self, repn, model.disjunct[0].indicator_var, c12lb) - c = bigm.get_transformed_constraint(model.disjunct[1].c) - self.assertEqual(len(c), 4) - repn = generate_standard_repn(c[1, 'lb'].body) + c = bigm.get_transformed_constraints(model.disjunct[1].c[1]) + self.assertEqual(len(c), 2) + lb = c[0] + ub = c[1] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.constant, -c21lb) - check_linear_coef(self, repn, model.disjunct[1].indicator_var, c21lb) - repn = generate_standard_repn(c[1, 'ub'].body) + ct.check_linear_coef(self, repn, model.disjunct[1].indicator_var, c21lb) + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.constant, -c21ub) - check_linear_coef(self, repn, model.disjunct[1].indicator_var, c21ub) - repn = generate_standard_repn(c[2, 'lb'].body) + ct.check_linear_coef(self, repn, model.disjunct[1].indicator_var, c21ub) + c = bigm.get_transformed_constraints(model.disjunct[1].c[2]) + self.assertEqual(len(c), 2) + lb = c[0] + ub = c[1] + repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.constant, -c22lb) - check_linear_coef(self, repn, model.disjunct[1].indicator_var, c22lb) - repn = generate_standard_repn(c[2, 'ub'].body) + ct.check_linear_coef(self, repn, model.disjunct[1].indicator_var, c22lb) + repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.constant, -c22ub) - check_linear_coef(self, repn, model.disjunct[1].indicator_var, c22ub) + ct.check_linear_coef(self, repn, model.disjunct[1].indicator_var, c22ub) def test_arg_M_constraintdata(self): m = models.makeTwoTermDisj_IndexedConstraints_BoundedVars() @@ -1533,83 +1338,21 @@ def test_create_using(self): class DisjunctInMultipleDisjunctions(unittest.TestCase, CommonTests): def test_error_for_same_disjunct_in_multiple_disjunctions(self): - m = models.makeDisjunctInMultipleDisjunctions() - self.assertRaisesRegexp( - GDP_Error, - "The disjunct disjunct1\[1\] has been transformed, " - "but a disjunction it appears in has not. Putting the same " - "disjunct in multiple disjunctions is not supported.", - TransformationFactory('gdp.bigm').apply_to, - m) + ct.check_error_for_same_disjunct_in_multiple_disjunctions(self, 'bigm') class TestTargets_SingleDisjunction(unittest.TestCase, CommonTests): def test_only_targets_inactive(self): - m = models.makeTwoSimpleDisjunctions() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.disjunction1]) - - self.assertFalse(m.disjunction1.active) - # disjunction2 still active - self.assertTrue(m.disjunction2.active) - - self.assertFalse(m.disjunct1[0].active) - self.assertFalse(m.disjunct1[1].active) - self.assertFalse(m.disjunct1.active) - self.assertTrue(m.disjunct2[0].active) - self.assertTrue(m.disjunct2[1].active) - self.assertTrue(m.disjunct2.active) + ct.check_only_targets_inactive(self, 'bigm') def test_only_targets_transformed(self): - m = models.makeTwoSimpleDisjunctions() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to( - m, - targets=[m.disjunction1]) - - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - # only two disjuncts relaxed - self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("disjunct1[0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("disjunct1[1].c"), - Constraint) - - pairs = [ - (0, 0), - (1, 1) - ] - for i, j in pairs: - self.assertIs(disjBlock[i], m.disjunct1[j].transformation_block()) - self.assertIs(bigm.get_src_disjunct(disjBlock[i]), m.disjunct1[j]) - - self.assertIsNone(m.disjunct2[0].transformation_block) - self.assertIsNone(m.disjunct2[1].transformation_block) + ct.check_only_targets_get_transformed(self, 'bigm') def test_target_not_a_component_err(self): - decoy = ConcreteModel() - decoy.block = Block() - m = models.makeTwoSimpleDisjunctions() - self.assertRaisesRegexp( - GDP_Error, - "Target block is not a component on instance unknown!", - TransformationFactory('gdp.bigm').apply_to, - m, - targets=[decoy.block]) + ct.check_target_not_a_component_error(self, 'bigm') def test_targets_cannot_be_cuids(self): - m = models.makeTwoTermDisj() - self.assertRaisesRegexp( - ValueError, - "invalid value for configuration 'targets':\n" - "\tFailed casting \[disjunction\]\n" - "\tto target_list\n" - "\tError: Expected Component or list of Components." - "\n\tRecieved %s" % type(ComponentUID(m.disjunction)), - TransformationFactory('gdp.bigm').apply_to, - m, - targets=[ComponentUID(m.disjunction)]) + ct.check_targets_cannot_be_cuids(self, 'bigm') # [ESJ 09/14/2019] See my rant in #1072, but I think this is why we cannot # actually support this! @@ -1624,279 +1367,38 @@ def test_targets_cannot_be_cuids(self): class TestTargets_IndexedDisjunction(unittest.TestCase, CommonTests): def test_indexedDisj_targets_inactive(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.disjunction1]) - - self.assertFalse(m.disjunction1.active) - self.assertFalse(m.disjunction1[1].active) - self.assertFalse(m.disjunction1[2].active) - - self.assertFalse(m.disjunct1[1,0].active) - self.assertFalse(m.disjunct1[1,1].active) - self.assertFalse(m.disjunct1[2,0].active) - self.assertFalse(m.disjunct1[2,1].active) - self.assertFalse(m.disjunct1.active) - - self.assertTrue(m.b[0].disjunct[0].active) - self.assertTrue(m.b[0].disjunct[1].active) - self.assertTrue(m.b[1].disjunct0.active) - self.assertTrue(m.b[1].disjunct1.active) + ct.check_indexedDisj_targets_inactive(self, 'bigm') def test_indexedDisj_only_targets_transformed(self): - m = models.makeDisjunctionsOnIndexedBlock() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to( - m, - targets=[m.disjunction1]) - - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock), 4) - self.assertIsInstance(disjBlock[0].component("disjunct1[1,0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("disjunct1[1,1].c"), - Constraint) - self.assertIsInstance(disjBlock[2].component("disjunct1[2,0].c"), - Constraint) - self.assertIsInstance(disjBlock[3].component("disjunct1[2,1].c"), - Constraint) - - # This relies on the disjunctions being transformed in the same order - # every time. These are the mappings between the indices of the original - # disjuncts and the indices on the indexed block on the transformation - # block. - pairs = [ - ((1,0), 0), - ((1,1), 1), - ((2,0), 2), - ((2,1), 3), - ] - for i, j in pairs: - self.assertIs(bigm.get_src_disjunct(disjBlock[j]), m.disjunct1[i]) - self.assertIs(disjBlock[j], m.disjunct1[i].transformation_block()) + ct.check_indexedDisj_only_targets_transformed(self, 'bigm') def test_warn_for_untransformed(self): - m = models.makeDisjunctionsOnIndexedBlock() - def innerdisj_rule(d, flag): - m = d.model() - if flag: - d.c = Constraint(expr=m.a[1] <= 2) - else: - d.c = Constraint(expr=m.a[1] >= 65) - m.disjunct1[1,1].innerdisjunct = Disjunct([0,1], rule=innerdisj_rule) - m.disjunct1[1,1].innerdisjunction = Disjunction([0], - rule=lambda a,i: [m.disjunct1[1,1].innerdisjunct[0], - m.disjunct1[1,1].innerdisjunct[1]]) - # This test relies on the order that the component objects of - # the disjunct get considered. In this case, the disjunct - # causes the error, but in another world, it could be the - # disjunction, which is also active. - self.assertRaisesRegexp( - GDP_Error, - "Found active disjunct disjunct1\[1,1\].innerdisjunct\[0\] " - "in disjunct disjunct1\[1,1\]!.*", - TransformationFactory('gdp.bigm').create_using, - m, - targets=[m.disjunction1[1]]) - # - # we will make that disjunction come first now... - # - tmp = m.disjunct1[1,1].innerdisjunct - m.disjunct1[1,1].del_component(tmp) - m.disjunct1[1,1].add_component('innerdisjunct', tmp) - self.assertRaisesRegexp( - GDP_Error, - "Found untransformed disjunction disjunct1\[1,1\]." - "innerdisjunction\[0\] in disjunct disjunct1\[1,1\]!.*", - TransformationFactory('gdp.bigm').create_using, - m, - targets=[m.disjunction1[1]]) - # Deactivating the disjunction will allow us to get past it back - # to the Disjunct (after we realize there are no active - # DisjunctionData within the active Disjunction) - m.disjunct1[1,1].innerdisjunction[0].deactivate() - self.assertRaisesRegexp( - GDP_Error, - "Found active disjunct disjunct1\[1,1\].innerdisjunct\[0\] " - "in disjunct disjunct1\[1,1\]!.*", - TransformationFactory('gdp.bigm').create_using, - m, - targets=[m.disjunction1[1]]) + ct.check_warn_for_untransformed(self, 'bigm') def test_disjData_targets_inactive(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.disjunction1[2]]) - - self.assertFalse(m.disjunction1[2].active) - - self.assertTrue(m.disjunct1.active) - self.assertTrue(m.disjunct1[1,0].active) - self.assertTrue(m.disjunct1[1,1].active) - self.assertFalse(m.disjunct1[2,0].active) - self.assertFalse(m.disjunct1[2,1].active) - - self.assertTrue(m.b[0].disjunct.active) - self.assertTrue(m.b[0].disjunct[0].active) - self.assertTrue(m.b[0].disjunct[1].active) - self.assertTrue(m.b[1].disjunct0.active) - self.assertTrue(m.b[1].disjunct1.active) + ct.check_disjData_targets_inactive(self, 'bigm') def test_disjData_only_targets_transformed(self): - m = models.makeDisjunctionsOnIndexedBlock() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to( - m, - targets=[m.disjunction1[2]]) - - disjBlock = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("disjunct1[2,0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("disjunct1[2,1].c"), - Constraint) - - # This relies on the disjunctions being transformed in the same order - # every time. These are the mappings between the indices of the original - # disjuncts and the indices on the indexed block on the transformation - # block. - pairs = [ - ((2,0), 0), - ((2,1), 1), - ] - for i, j in pairs: - self.assertIs(m.disjunct1[i].transformation_block(), disjBlock[j]) - self.assertIs(bigm.get_src_disjunct(disjBlock[j]), m.disjunct1[i]) + ct.check_disjData_only_targets_transformed(self, 'bigm') def test_indexedBlock_targets_inactive(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.b]) - - self.assertTrue(m.disjunct1.active) - self.assertTrue(m.disjunct1[1,0].active) - self.assertTrue(m.disjunct1[1,1].active) - self.assertTrue(m.disjunct1[2,0].active) - self.assertTrue(m.disjunct1[2,1].active) - - self.assertFalse(m.b[0].disjunct.active) - self.assertFalse(m.b[0].disjunct[0].active) - self.assertFalse(m.b[0].disjunct[1].active) - self.assertFalse(m.b[1].disjunct0.active) - self.assertFalse(m.b[1].disjunct1.active) + ct.check_indexedBlock_targets_inactive(self, 'bigm') def test_indexedBlock_only_targets_transformed(self): - m = models.makeDisjunctionsOnIndexedBlock() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to( - m, - targets=[m.b]) - - disjBlock1 = m.b[0]._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock1), 2) - self.assertIsInstance(disjBlock1[0].component("b[0].disjunct[0].c"), - Constraint) - self.assertIsInstance(disjBlock1[1].component("b[0].disjunct[1].c"), - Constraint) - disjBlock2 = m.b[1]._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock2), 2) - self.assertIsInstance(disjBlock2[0].component("b[1].disjunct0.c"), - Constraint) - self.assertIsInstance(disjBlock2[1].component("b[1].disjunct1.c"), - Constraint) - - # This relies on the disjunctions being transformed in the same order - # every time. This dictionary maps the block index to the list of - # pairs of (originalDisjunctIndex, transBlockIndex) - pairs = { - 0: - [ - ('disjunct',0,0), - ('disjunct',1,1), - ], - 1: - [ - ('disjunct0',None,0), - ('disjunct1',None,1), - ] - } - - for blocknum, lst in iteritems(pairs): - for comp, i, j in lst: - original = m.b[blocknum].component(comp) - if blocknum == 0: - disjBlock = disjBlock1 - if blocknum == 1: - disjBlock = disjBlock2 - self.assertIs(original[i].transformation_block(), disjBlock[j]) - self.assertIs(bigm.get_src_disjunct(disjBlock[j]), original[i]) - - def checkb0TargetsInactive(self, m): - self.assertTrue(m.disjunct1.active) - self.assertTrue(m.disjunct1[1,0].active) - self.assertTrue(m.disjunct1[1,1].active) - self.assertTrue(m.disjunct1[2,0].active) - self.assertTrue(m.disjunct1[2,1].active) - - self.assertFalse(m.b[0].disjunct.active) - self.assertFalse(m.b[0].disjunct[0].active) - self.assertFalse(m.b[0].disjunct[1].active) - self.assertTrue(m.b[1].disjunct0.active) - self.assertTrue(m.b[1].disjunct1.active) - - def checkb0TargetsTransformed(self, m): - bigm = TransformationFactory('gdp.bigm') - disjBlock = m.b[0]._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("b[0].disjunct[0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("b[0].disjunct[1].c"), - Constraint) - - # This relies on the disjunctions being transformed in the same order - # every time. This dictionary maps the block index to the list of - # pairs of (originalDisjunctIndex, transBlockIndex) - pairs = [ - (0,0), - (1,1), - ] - for i, j in pairs: - self.assertIs(m.b[0].disjunct[i].transformation_block(), - disjBlock[j]) - self.assertIs(bigm.get_src_disjunct(disjBlock[j]), - m.b[0].disjunct[i]) + ct.check_indexedBlock_only_targets_transformed(self, 'bigm') def test_blockData_targets_inactive(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.b[0]]) - - self.checkb0TargetsInactive(m) + ct.check_blockData_targets_inactive(self, 'bigm') def test_blockData_only_targets_transformed(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.b[0]]) - self.checkb0TargetsTransformed(m) + ct.check_blockData_only_targets_transformed(self, 'bigm') def test_do_not_transform_deactivated_targets(self): - m = models.makeDisjunctionsOnIndexedBlock() - m.b[1].deactivate() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.b[0], m.b[1]]) - - self.checkb0TargetsInactive(m) - self.checkb0TargetsTransformed(m) + ct.check_do_not_transform_deactivated_targets(self, 'bigm') def test_create_using(self): m = models.makeDisjunctionsOnIndexedBlock() - self.diff_apply_to_and_create_using(m) + ct.diff_apply_to_and_create_using(self, m, 'gdp.bigm') class DisjunctionInDisjunct(unittest.TestCase, CommonTests): @@ -1905,20 +1407,16 @@ def setUp(self): random.seed(666) def test_disjuncts_inactive(self): - m = models.makeNestedDisjunctions() - TransformationFactory('gdp.bigm').apply_to(m, targets=(m,)) + ct.check_disjuncts_inactive_nested(self, 'bigm') - self.assertFalse(m.disjunction.active) - self.assertFalse(m.simpledisjunct.active) - self.assertFalse(m.disjunct[0].active) - self.assertFalse(m.disjunct[1].active) - self.assertFalse(m.disjunct.active) + def test_deactivated_disjunct_leaves_nested_disjuncts_active(self): + ct.check_deactivated_disjunct_leaves_nested_disjunct_active(self, 'bigm') def test_transformation_block_structure(self): m = models.makeNestedDisjunctions() TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m._pyomo_gdp_bigm_relaxation + transBlock = m._pyomo_gdp_bigm_reformulation self.assertIsInstance(transBlock, Block) # check that we have the lbub set on the transformation block @@ -1936,12 +1434,12 @@ def test_transformation_block_structure(self): # All the outer and inner disjuncts should be on Block: self.assertEqual(len(disjBlock), 7) pairs = [ - (0, ["simpledisjunct._pyomo_gdp_bigm_relaxation.simpledisjunct." + (0, ["simpledisjunct._pyomo_gdp_bigm_reformulation.simpledisjunct." "innerdisjunction_xor"]), (1, ["simpledisjunct.innerdisjunct0.c"]), (2, ["simpledisjunct.innerdisjunct1.c"]), (3, ["disjunct[0].c"]), - (4, ["disjunct[1]._pyomo_gdp_bigm_relaxation.disjunct[1]." + (4, ["disjunct[1]._pyomo_gdp_bigm_reformulation.disjunct[1]." "innerdisjunction_xor", "disjunct[1].c"]), (5, ["disjunct[1].innerdisjunct[0].c"]), @@ -1956,43 +1454,26 @@ def test_transformation_block_structure(self): disjBlock[i].component(nm), Constraint) - def test_transformation_block_not_on_disjunct_anymore(self): + def test_transformation_block_on_disjunct_empty(self): m = models.makeNestedDisjunctions() TransformationFactory('gdp.bigm').apply_to(m) + self.assertEqual(len(m.disjunct[1]._pyomo_gdp_bigm_reformulation.\ + component("relaxedDisjuncts")), 0) + self.assertEqual(len(m.simpledisjunct._pyomo_gdp_bigm_reformulation.\ + component("relaxedDisjuncts")), 0) - self.assertIsNone(m.disjunct[1]._pyomo_gdp_bigm_relaxation.\ - component("relaxedDisjuncts")) - self.assertIsNone(m.simpledisjunct._pyomo_gdp_bigm_relaxation.\ - component("relaxedDisjuncts")) - def test_mappings_between_disjunctions_and_xors(self): - m = models.makeNestedDisjunctions() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to(m) - - transBlock = m._pyomo_gdp_bigm_relaxation - - disjunctionPairs = [ - (m.disjunction, transBlock.disjunction_xor), - (m.disjunct[1].innerdisjunction[0], - m.disjunct[1]._pyomo_gdp_bigm_relaxation.component( - "disjunct[1].innerdisjunction_xor")[0]), - (m.simpledisjunct.innerdisjunction, - m.simpledisjunct._pyomo_gdp_bigm_relaxation.component( - "simpledisjunct.innerdisjunction_xor")) - ] - - # check disjunction mappings - for disjunction, xor in disjunctionPairs: - self.assertIs(disjunction.algebraic_constraint(), xor) - self.assertIs(bigm.get_src_disjunction(xor), disjunction) + # Note this test actually checks that the inner disjunction maps to its + # original xor (which will be transformed again by the outer + # disjunction.) + ct.check_mappings_between_disjunctions_and_xors(self, 'bigm') def test_disjunct_mappings(self): m = models.makeNestedDisjunctions() bigm = TransformationFactory('gdp.bigm') bigm.apply_to(m) - disjunctBlocks = m._pyomo_gdp_bigm_relaxation.relaxedDisjuncts + disjunctBlocks = m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts # I want to check that I correctly updated the pointers to the # transformation blocks on the inner Disjuncts. @@ -2051,25 +1532,25 @@ def check_bigM_constraint(self, cons, variable, M, indicator_var): self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -M) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, variable, 1) - check_linear_coef(self, repn, indicator_var, M) + ct.check_linear_coef(self, repn, variable, 1) + ct.check_linear_coef(self, repn, indicator_var, M) def check_xor_relaxation(self, cons, indvar1, indvar2, indvar3, lb): repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 3) - check_linear_coef(self, repn, indvar1, 1) - check_linear_coef(self, repn, indvar2, 1) + ct.check_linear_coef(self, repn, indvar1, 1) + ct.check_linear_coef(self, repn, indvar2, 1) if not lb: self.assertEqual(cons.upper, 1) self.assertIsNone(cons.lower) self.assertEqual(repn.constant, -1) - check_linear_coef(self, repn, indvar3, 1) + ct.check_linear_coef(self, repn, indvar3, 1) else: self.assertEqual(cons.lower, 1) self.assertIsNone(cons.upper) self.assertEqual(repn.constant, 1) - check_linear_coef(self, repn, indvar3, -1) + ct.check_linear_coef(self, repn, indvar3, -1) def test_transformed_constraints(self): # We'll check all the transformed constraints to make sure @@ -2116,7 +1597,7 @@ def test_transformed_constraints(self): # Here we check that the xor constraint from # simpledisjunct.innerdisjunction is transformed. cons5 = m.simpledisjunct.transformation_block().component( - "simpledisjunct._pyomo_gdp_bigm_relaxation.simpledisjunct." + "simpledisjunct._pyomo_gdp_bigm_reformulation.simpledisjunct." "innerdisjunction_xor") cons5lb = cons5['lb'] self.check_xor_relaxation( @@ -2148,7 +1629,7 @@ def test_transformed_constraints(self): # disjunct[1].innerdisjunction gets transformed alongside the # other constraint in disjunct[1]. cons7 = m.disjunct[1].transformation_block().component( - "disjunct[1]._pyomo_gdp_bigm_relaxation.disjunct[1]." + "disjunct[1]._pyomo_gdp_bigm_reformulation.disjunct[1]." "innerdisjunction_xor") cons7lb = cons7[0,'lb'] self.check_xor_relaxation( @@ -2172,164 +1653,69 @@ def test_transformed_constraints(self): self.check_bigM_constraint(cons8, m.a, 21, m.disjunct[1].indicator_var) def test_disjunct_targets_inactive(self): - m = models.makeNestedDisjunctions() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.simpledisjunct]) - - self.assertTrue(m.disjunct.active) - self.assertTrue(m.disjunct[0].active) - self.assertTrue(m.disjunct[1].active) - self.assertTrue(m.disjunct[1].innerdisjunct.active) - self.assertTrue(m.disjunct[1].innerdisjunct[0].active) - self.assertTrue(m.disjunct[1].innerdisjunct[1].active) - - # We basically just treated simpledisjunct as a block. It - # itself has not been transformed and should not be - # deactivated. We just transformed everything in it. - self.assertTrue(m.simpledisjunct.active) - self.assertFalse(m.simpledisjunct.innerdisjunct0.active) - self.assertFalse(m.simpledisjunct.innerdisjunct1.active) + ct.check_disjunct_targets_inactive(self, 'bigm') def test_disjunct_only_targets_transformed(self): - m = models.makeNestedDisjunctions() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to( - m, - targets=[m.simpledisjunct]) - - disjBlock = m.simpledisjunct._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock), 2) - self.assertIsInstance( - disjBlock[0].component("simpledisjunct.innerdisjunct0.c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("simpledisjunct.innerdisjunct1.c"), - Constraint) - - # This also relies on the disjuncts being transformed in the same - # order every time. - pairs = [ - (0,0), - (1,1), - ] - for i, j in pairs: - self.assertIs(m.simpledisjunct.component('innerdisjunct%d'%i), - bigm.get_src_disjunct(disjBlock[j])) - self.assertIs(disjBlock[j], - m.simpledisjunct.component( - 'innerdisjunct%d'%i).transformation_block()) + ct.check_disjunct_only_targets_transformed(self, 'bigm') def test_disjunctData_targets_inactive(self): - m = models.makeNestedDisjunctions() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.disjunct[1]]) - - self.assertTrue(m.disjunct[0].active) - self.assertTrue(m.disjunct[1].active) - self.assertTrue(m.disjunct.active) - self.assertFalse(m.disjunct[1].innerdisjunct[0].active) - self.assertFalse(m.disjunct[1].innerdisjunct[1].active) - self.assertFalse(m.disjunct[1].innerdisjunct.active) - - self.assertTrue(m.simpledisjunct.active) - self.assertTrue(m.simpledisjunct.innerdisjunct0.active) - self.assertTrue(m.simpledisjunct.innerdisjunct1.active) + ct.check_disjunctData_targets_inactive(self, 'bigm') def test_disjunctData_only_targets_transformed(self): - m = models.makeNestedDisjunctions() - # This is so convoluted, but you can treat a disjunct like a block: - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to( - m, - targets=[m.disjunct[1]]) - - disjBlock = m.disjunct[1]._pyomo_gdp_bigm_relaxation.relaxedDisjuncts - self.assertEqual(len(disjBlock), 2) - self.assertIsInstance( - disjBlock[0].component("disjunct[1].innerdisjunct[0].c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("disjunct[1].innerdisjunct[1].c"), - Constraint) - - # This also relies on the disjuncts being transformed in the same - # order every time. - pairs = [ - (0,0), - (1,1), - ] - for i, j in pairs: - self.assertIs(bigm.get_src_disjunct(disjBlock[j]), - m.disjunct[1].innerdisjunct[i]) - self.assertIs(m.disjunct[1].innerdisjunct[i].transformation_block(), - disjBlock[j]) + ct.check_disjunctData_only_targets_transformed(self, 'bigm') def test_disjunction_target_err(self): - m = models.makeNestedDisjunctions() - self.assertRaisesRegexp( - GDP_Error, - "Found active disjunct simpledisjunct.innerdisjunct0 in " - "disjunct simpledisjunct!.*", - TransformationFactory('gdp.bigm').apply_to, - m, - - targets=[m.disjunction]) + ct.check_disjunction_target_err(self, 'bigm') def test_create_using(self): m = models.makeNestedDisjunctions() self.diff_apply_to_and_create_using(m) + def test_indexed_nested_disjunction(self): + # When we have a nested disjunction inside of a disjunct, we need to + # make sure that we don't delete the relaxedDisjuncts container because + # we will end up moving things out of it in two different steps. If that + # were to happen, this would throw an error when it can't find the block + # the second time. + m = ConcreteModel() + m.d1 = Disjunct() + m.d1.indexedDisjunct1 = Disjunct([0,1]) + m.d1.indexedDisjunct2 = Disjunct([0,1]) + @m.d1.Disjunction([0,1]) + def innerIndexed(d, i): + return [d.indexedDisjunct1[i], d.indexedDisjunct2[i]] + m.d2 = Disjunct() + m.outer = Disjunction(expr=[m.d1, m.d2]) + + TransformationFactory('gdp.bigm').apply_to(m) + + # we check that they all ended up on the same Block in the end (I don't + # really care in what order for this test) + disjuncts = [m.d1, m.d2, m.d1.indexedDisjunct1[0], + m.d1.indexedDisjunct1[1], m.d1.indexedDisjunct2[0], + m.d1.indexedDisjunct2[1]] + for disjunct in disjuncts: + self.assertIs(disjunct.transformation_block().parent_component(), + m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts) + + # and we check that nothing remains on original transformation block + self.assertEqual(len(m.d1._pyomo_gdp_bigm_reformulation.relaxedDisjuncts), + 0) class IndexedDisjunction(unittest.TestCase): # this tests that if the targets are a subset of the # _DisjunctDatas in an IndexedDisjunction that the xor constraint # created on the parent block will still be indexed as expected. def test_xor_constraint(self): - m = models.makeTwoTermIndexedDisjunction_BoundedVars() - TransformationFactory('gdp.bigm').apply_to( - m, - targets=[m.disjunction[1], - m.disjunction[3]]) - - xorC = m.disjunction[1].algebraic_constraint().parent_component() - self.assertIsInstance(xorC, Constraint) - self.assertEqual(len(xorC), 2) - - # check the constraints - for i in [1,3]: - self.assertEqual(xorC[i].lower, 1) - self.assertEqual(xorC[i].upper, 1) - repn = generate_standard_repn(xorC[i].body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - check_linear_coef(self, repn, m.disjunct[i, 0].indicator_var, 1) - check_linear_coef(self, repn, m.disjunct[i, 1].indicator_var, 1) + ct.check_indexed_xor_constraints_with_targets(self, 'bigm') def test_partial_deactivate_indexed_disjunction(self): - """Test for partial deactivation of an indexed disjunction.""" - m = ConcreteModel() - m.x = Var(bounds=(0, 10)) - @m.Disjunction([0, 1]) - def disj(m, i): - if i == 0: - return [m.x >= 1, m.x >= 2] - else: - return [m.x >= 3, m.x >= 4] - - m.disj[0].disjuncts[0].indicator_var.fix(1) - m.disj[0].disjuncts[1].indicator_var.fix(1) - m.disj[0].deactivate() - TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m._pyomo_gdp_bigm_relaxation - self.assertEqual( - len(transBlock.disj_xor), 1, - "There should only be one XOR constraint generated. Found %s." % - len(transBlock.disj_xor)) + ct.check_partial_deactivate_indexed_disjunction(self, 'bigm') class BlocksOnDisjuncts(unittest.TestCase): + # ESJ: All of these tests are specific to bigm because they check how much + # stuff is on the transformation blocks. def setUp(self): # set seed so we can test name collisions predictably random.seed(666) @@ -2338,7 +1724,7 @@ def test_transformed_constraint_nameConflicts(self): m = models.makeTwoTermDisj_BlockOnDisj() TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m._pyomo_gdp_bigm_relaxation + transBlock = m._pyomo_gdp_bigm_reformulation disjBlock = transBlock.relaxedDisjuncts self.assertIsInstance(disjBlock, Block) @@ -2361,7 +1747,7 @@ def test_do_not_transform_deactivated_constraint(self): TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m._pyomo_gdp_bigm_relaxation + transBlock = m._pyomo_gdp_bigm_reformulation disjBlock = transBlock.relaxedDisjuncts self.assertIsInstance(disjBlock, Block) @@ -2381,7 +1767,7 @@ def test_do_not_transform_deactivated_block(self): TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m._pyomo_gdp_bigm_relaxation + transBlock = m._pyomo_gdp_bigm_reformulation disjBlock = transBlock.relaxedDisjuncts self.assertIsInstance(disjBlock, Block) @@ -2395,84 +1781,89 @@ def test_do_not_transform_deactivated_block(self): self.assertIsInstance( disjBlock[1].component("evil[1].b.c_4"), Constraint) + def test_pick_up_bigm_suffix_on_block(self): + m = models.makeTwoTermDisj_BlockOnDisj() + m.evil[1].b.BigM = Suffix(direction=Suffix.LOCAL) + m.evil[1].b.BigM[m.evil[1].b.c] = 2000 + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + + # check that the m value got used + cons_list = bigm.get_transformed_constraints(m.evil[1].b.c) + ub = cons_list[1] + self.assertEqual(ub.index(), 'ub') + self.assertEqual(ub.upper, 0) + self.assertIsNone(ub.lower) + repn = generate_standard_repn(ub.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, -2000) + self.assertEqual(len(repn.linear_vars), 2) + self.assertIs(repn.linear_vars[0], m.x) + self.assertEqual(repn.linear_coefs[0], 1) + self.assertIs(repn.linear_vars[1], m.evil[1].indicator_var) + self.assertEqual(repn.linear_coefs[1], 2000) + + def test_use_correct_none_suffix(self): + m = ConcreteModel() + m.x = Var(bounds=(-100, 111)) + m.b = Block() + m.b.d = Disjunct() + m.b.d.foo = Block() + + m.b.d.c = Constraint(expr=m.x>=9) + + m.b.BigM = Suffix() + m.b.BigM[None] = 10 + m.b.d.foo.BigM = Suffix() + m.b.d.foo.BigM[None] = 1 + + m.d = Disjunct() + m.disj = Disjunction(expr=[m.d, m.b.d]) + + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + + # we should have picked up 10 for m.b.d.c + cons_list = bigm.get_transformed_constraints(m.b.d.c) + lb = cons_list[0] + self.assertEqual(lb.index(), 'lb') + self.assertEqual(lb.lower, 9) + self.assertIsNone(lb.upper) + repn = generate_standard_repn(lb.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 10) + self.assertEqual(len(repn.linear_vars), 2) + self.assertIs(repn.linear_vars[0], m.x) + self.assertEqual(repn.linear_coefs[0], 1) + self.assertIs(repn.linear_vars[1], m.b.d.indicator_var) + self.assertEqual(repn.linear_coefs[1], -10) class InnerDisjunctionSharedDisjuncts(unittest.TestCase): def test_activeInnerDisjunction_err(self): - m = models.makeDuplicatedNestedDisjunction() - self.assertRaisesRegexp( - GDP_Error, - "Found untransformed disjunction " - "outerdisjunct\[1\].duplicateddisjunction in disjunct " - "outerdisjunct\[1\]! The disjunction must be transformed before " - "the disjunct. If you are using targets, put the disjunction " - "before the disjunct in the list.*", - TransformationFactory('gdp.bigm').apply_to, - m, - targets=[m.outerdisjunct[1].innerdisjunction, - m.disjunction]) + ct.check_activeInnerDisjunction_err(self, 'bigm') - -class RangeSetOnDisjunct(unittest.TestCase): +class UntransformableObjectsOnDisjunct(unittest.TestCase): def test_RangeSet(self): - m = models.makeDisjunctWithRangeSet() - TransformationFactory('gdp.bigm').apply_to(m) - self.assertIsInstance(m.d1.s, RangeSet) + ct.check_RangeSet(self, 'bigm') + def test_Expression(self): + ct.check_Expression(self, 'bigm') class TransformABlock(unittest.TestCase): - # If you transform a block as if it is a model, the transformation should - # only modify the block you passed it, else when you solve the block, you - # are missing the disjunction you thought was on there. def test_transformation_simple_block(self): - m = models.makeTwoTermDisjOnBlock() - TransformationFactory('gdp.bigm').apply_to(m.b) - - # transformation block not on m - self.assertIsNone(m.component("_pyomo_gdp_bigm_relaxation")) - - # transformation block on m.b - self.assertIsInstance(m.b.component("_pyomo_gdp_bigm_relaxation"), Block) + ct.check_transformation_simple_block(self, 'bigm') def test_transform_block_data(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to(m.b[0]) - - self.assertIsNone(m.component("_pyomo_gdp_bigm_relaxation")) - - self.assertIsInstance(m.b[0].component("_pyomo_gdp_bigm_relaxation"), - Block) + ct.check_transform_block_data(self, 'bigm') def test_simple_block_target(self): - m = models.makeTwoTermDisjOnBlock() - TransformationFactory('gdp.bigm').apply_to(m, targets=[m.b]) - - # transformation block not on m - self.assertIsNone(m.component("_pyomo_gdp_bigm_relaxation")) - - # transformation block on m.b - self.assertIsInstance(m.b.component("_pyomo_gdp_bigm_relaxation"), Block) + ct.check_simple_block_target(self, 'bigm') def test_block_data_target(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to(m, targets=[m.b[0]]) - - self.assertIsNone(m.component("_pyomo_gdp_bigm_relaxation")) - - self.assertIsInstance(m.b[0].component("_pyomo_gdp_bigm_relaxation"), - Block) + ct.check_block_data_target(self, 'bigm') def test_indexed_block_target(self): - m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.bigm').apply_to(m, targets=[m.b]) - - # We expect the transformation block on each of the BlockDatas. Because - # it is always going on the parent block of the disjunction. - - self.assertIsNone(m.component("_pyomo_gdp_bigm_relaxation")) - - for i in [0,1]: - self.assertIsInstance(m.b[i].component("_pyomo_gdp_bigm_relaxation"), - Block) + ct.check_indexed_block_target(self, 'bigm') class IndexedDisjunctions(unittest.TestCase): def setUp(self): @@ -2480,53 +1871,17 @@ def setUp(self): random.seed(666) def test_disjunction_data_target(self): - m = models.makeThreeTermIndexedDisj() - TransformationFactory('gdp.bigm').apply_to(m, targets=[m.disjunction[2]]) - - # we got a transformation block on the model - transBlock = m.component("_pyomo_gdp_bigm_relaxation") - self.assertIsInstance(transBlock, Block) - self.assertIsInstance(transBlock.component( "disjunction_xor"), - Constraint) - self.assertIsInstance(transBlock.disjunction_xor[2], - constraint._GeneralConstraintData) - self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock.relaxedDisjuncts), 3) - - # suppose we transform the next one separately - TransformationFactory('gdp.bigm').apply_to(m, targets=[m.disjunction[1]]) - self.assertIsInstance(transBlock.disjunction_xor[1], - constraint._GeneralConstraintData) - self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock.relaxedDisjuncts), 6) - - def check_relaxation_block(self, m, name, numDisjuncts): - transBlock = m.component(name) - self.assertIsInstance(transBlock, Block) - self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock.relaxedDisjuncts), numDisjuncts) + ct.check_disjunction_data_target(self, 'bigm') def test_disjunction_data_target_any_index(self): - m = ConcreteModel() - m.x = Var(bounds=(-100, 100)) - m.disjunct3 = Disjunct(Any) - m.disjunct4 = Disjunct(Any) - m.disjunction2=Disjunction(Any) - for i in range(2): - m.disjunct3[i].cons = Constraint(expr=m.x == 2) - m.disjunct4[i].cons = Constraint(expr=m.x <= 3) - m.disjunction2[i] = [m.disjunct3[i], m.disjunct4[i]] - - TransformationFactory('gdp.bigm').apply_to( - m, targets=[m.disjunction2[i]]) - - if i == 0: - self.check_relaxation_block(m, "_pyomo_gdp_bigm_relaxation", 2) - if i == 2: - self.check_relaxation_block(m, "_pyomo_gdp_bigm_relaxation", 4) + ct.check_disjunction_data_target_any_index(self, 'bigm') + # ESJ: This and the following tests are *very* similar to those in hull, + # but I actually bothered to check the additional transformed objects in + # hull (disaggregated variables, bounds constraints...), so they are + # reproduced independently there. def check_trans_block_disjunctions_of_disjunct_datas(self, m): - transBlock1 = m.component("_pyomo_gdp_bigm_relaxation") + transBlock1 = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock1, Block) self.assertIsInstance(transBlock1.component("relaxedDisjuncts"), Block) # We end up with a transformation block for every SimpleDisjunction or @@ -2540,7 +1895,7 @@ def check_trans_block_disjunctions_of_disjunct_datas(self, m): "secondTerm[1].cons"), Constraint) self.assertEqual(len(transBlock1.relaxedDisjuncts[1].component( "secondTerm[1].cons")), 1) - transBlock2 = m.component("_pyomo_gdp_bigm_relaxation_4") + transBlock2 = m.component("_pyomo_gdp_bigm_reformulation_4") self.assertIsInstance(transBlock2, Block) self.assertIsInstance(transBlock2.component("relaxedDisjuncts"), Block) self.assertEqual(len(transBlock2.relaxedDisjuncts), 2) @@ -2552,28 +1907,15 @@ def check_trans_block_disjunctions_of_disjunct_datas(self, m): "secondTerm[2].cons"), Constraint) self.assertEqual(len(transBlock2.relaxedDisjuncts[1].component( "secondTerm[2].cons")), 1) - - def test_simple_disjunction_of_disjunct_datas(self): - # This is actually a reasonable use case if you are generating - # disjunctions with the same structure. So you might have Disjuncts - # indexed by Any and disjunctions indexed by Any and be adding a - # disjunction of two of the DisjunctDatas in every iteration. - m = models.makeDisjunctionOfDisjunctDatas() - TransformationFactory('gdp.bigm').apply_to(m) - self.check_trans_block_disjunctions_of_disjunct_datas(m) - transBlock = m._pyomo_gdp_bigm_relaxation - self.assertIsInstance( transBlock.component("disjunction_xor"), - Constraint) - transBlock2 = m._pyomo_gdp_bigm_relaxation_4 - self.assertIsInstance( transBlock2.component("disjunction2_xor"), - Constraint) + def test_simple_disjunction_of_disjunct_datas(self): + ct.check_simple_disjunction_of_disjunct_datas(self, 'bigm') def test_any_indexed_disjunction_of_disjunct_datas(self): m = models.makeAnyIndexedDisjunctionOfDisjunctDatas() TransformationFactory('gdp.bigm').apply_to(m) - transBlock = m.component("_pyomo_gdp_bigm_relaxation") + transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 4) @@ -2598,7 +1940,7 @@ def test_any_indexed_disjunction_of_disjunct_datas(self): self.assertEqual( len(transBlock.component("disjunction_xor")), 2) def check_first_iteration(self, model): - transBlock = model.component("_pyomo_gdp_bigm_relaxation") + transBlock = model.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) self.assertIsInstance( transBlock.component("disjunctionList_xor"), @@ -2608,25 +1950,7 @@ def check_first_iteration(self, model): self.assertFalse(model.disjunctionList[0].active) def check_second_iteration(self, model): - transBlock = model.component("_pyomo_gdp_bigm_relaxation") - self.assertIsInstance(transBlock, Block) - self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock.relaxedDisjuncts), 4) - self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( - "firstTerm1.cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( - "firstTerm1.cons")), 2) - self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( - "secondTerm1.cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( - "secondTerm1.cons")), 1) - self.assertEqual( - len(model._pyomo_gdp_bigm_relaxation.disjunctionList_xor), 2) - self.assertFalse(model.disjunctionList[1].active) - self.assertFalse(model.disjunctionList[0].active) - - def check_second_iteration_any_index(self, model): - transBlock = model.component("_pyomo_gdp_bigm_relaxation") + transBlock = model.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 4) @@ -2639,202 +1963,40 @@ def check_second_iteration_any_index(self, model): self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( "secondTerm[1].cons")), 1) self.assertEqual( - len(model._pyomo_gdp_bigm_relaxation.disjunctionList_xor), 2) + len(model._pyomo_gdp_bigm_reformulation.disjunctionList_xor), 2) self.assertFalse(model.disjunctionList[1].active) self.assertFalse(model.disjunctionList[0].active) def test_disjunction_and_disjuncts_indexed_by_any(self): - model = ConcreteModel() - model.x = Var(bounds=(-100, 100)) - - model.firstTerm = Disjunct(Any) - model.secondTerm = Disjunct(Any) - model.disjunctionList = Disjunction(Any) - - model.obj = Objective(expr=model.x) - - for i in range(2): - model.firstTerm[i].cons = Constraint(expr=model.x == 2*i) - model.secondTerm[i].cons = Constraint(expr=model.x >= i + 2) - model.disjunctionList[i] = [model.firstTerm[i], model.secondTerm[i]] - - TransformationFactory('gdp.bigm').apply_to(model) - - if i == 0: - self.check_first_iteration(model) - - if i == 1: - self.check_second_iteration_any_index(model) + ct.check_disjunction_and_disjuncts_indexed_by_any(self, 'bigm') def test_iteratively_adding_disjunctions_transform_container(self): - # If you are iteratively adding Disjunctions to an IndexedDisjunction, - # then if you are lazy about what you transform, you might shoot - # yourself in the foot because if the whole IndexedDisjunction gets - # deactivated by the first transformation, the new DisjunctionDatas - # don't get transformed. Interestingly, this isn't what happens. We - # deactivate the container and then still transform what's inside. I - # don't think we should deactivate the container at all, maybe? - model = ConcreteModel() - model.x = Var(bounds=(-100, 100)) - model.disjunctionList = Disjunction(Any) - model.obj = Objective(expr=model.x) - for i in range(2): - firstTermName = "firstTerm%s" % i - model.add_component(firstTermName, Disjunct()) - model.component(firstTermName).cons = Constraint( - expr=model.x == 2*i) - secondTermName = "secondTerm%s" % i - model.add_component(secondTermName, Disjunct()) - model.component(secondTermName).cons = Constraint( - expr=model.x >= i + 2) - model.disjunctionList[i] = [model.component(firstTermName), - model.component(secondTermName)] - - # we're lazy and we just transform the disjunctionList (and in - # theory we are transforming at every iteration because we are - # solving at every iteration) - TransformationFactory('gdp.bigm').apply_to( - model, targets=[model.disjunctionList]) - if i == 0: - self.check_first_iteration(model) - - if i == 1: - self.check_second_iteration(model) + ct.check_iteratively_adding_disjunctions_transform_container(self, + 'bigm') def test_iteratively_adding_disjunctions_transform_model(self): - # Same as above, but transforming whole model in every iteration - model = ConcreteModel() - model.x = Var(bounds=(-100, 100)) - model.disjunctionList = Disjunction(Any) - model.obj = Objective(expr=model.x) - for i in range(2): - firstTermName = "firstTerm%s" % i - model.add_component(firstTermName, Disjunct()) - model.component(firstTermName).cons = Constraint( - expr=model.x == 2*i) - secondTermName = "secondTerm%s" % i - model.add_component(secondTermName, Disjunct()) - model.component(secondTermName).cons = Constraint( - expr=model.x >= i + 2) - model.disjunctionList[i] = [model.component(firstTermName), - model.component(secondTermName)] - - # we're lazy and we just transform the model (and in - # theory we are transforming at every iteration because we are - # solving at every iteration) - TransformationFactory('gdp.bigm').apply_to(model) - if i == 0: - self.check_first_iteration(model) - - if i == 1: - self.check_second_iteration(model) + ct.check_iteratively_adding_disjunctions_transform_model(self, 'bigm') def test_iteratively_adding_to_indexed_disjunction_on_block(self): - m = ConcreteModel() - m.b = Block() - m.b.x = Var(bounds=(-100, 100)) - m.b.firstTerm = Disjunct([1,2]) - m.b.firstTerm[1].cons = Constraint(expr=m.b.x == 0) - m.b.firstTerm[2].cons = Constraint(expr=m.b.x == 2) - m.b.secondTerm = Disjunct([1,2]) - m.b.secondTerm[1].cons = Constraint(expr=m.b.x >= 2) - m.b.secondTerm[2].cons = Constraint(expr=m.b.x >= 3) - m.b.disjunctionList = Disjunction(Any) - - m.b.obj = Objective(expr=m.b.x) - - for i in range(1,3): - m.b.disjunctionList[i] = [m.b.firstTerm[i], m.b.secondTerm[i]] - - TransformationFactory('gdp.bigm').apply_to(m, targets=[m.b]) - m.b.disjunctionList[i] = [m.b.firstTerm[i], m.b.secondTerm[i]] - - TransformationFactory('gdp.bigm').apply_to(m, targets=[m.b]) - - if i == 1: - self.check_relaxation_block(m.b, "_pyomo_gdp_bigm_relaxation", 2) - if i == 2: - self.check_relaxation_block(m.b, "_pyomo_gdp_bigm_relaxation", 4) + ct.check_iteratively_adding_to_indexed_disjunction_on_block(self, + 'bigm') class TestErrors(unittest.TestCase): def test_transform_empty_disjunction(self): - m = ConcreteModel() - m.empty = Disjunction(expr=[]) - - self.assertRaisesRegexp( - GDP_Error, - "Disjunction empty is empty. This is likely indicative of a " - "modeling error.*", - TransformationFactory('gdp.bigm').apply_to, - m) + ct.check_transform_empty_disjunction(self, 'bigm') def test_deactivated_disjunct_nonzero_indicator_var(self): - m = ConcreteModel() - m.x = Var(bounds=(0,8)) - m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) - - m.disjunction.disjuncts[0].deactivate() - m.disjunction.disjuncts[0].indicator_var.fix(1) - - self.assertRaisesRegexp( - GDP_Error, - "The disjunct disjunction_disjuncts\[0\] is deactivated, but the " - "indicator_var is fixed to 1. This makes no sense.", - TransformationFactory('gdp.bigm').apply_to, - m) + ct.check_deactivated_disjunct_nonzero_indicator_var(self, + 'bigm') def test_deactivated_disjunct_unfixed_indicator_var(self): - m = ConcreteModel() - m.x = Var(bounds=(0,8)) - m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) - - m.disjunction.disjuncts[0].deactivate() - m.disjunction.disjuncts[0].indicator_var.fixed = False - - self.assertRaisesRegexp( - GDP_Error, - "The disjunct disjunction_disjuncts\[0\] is deactivated, but the " - "indicator_var is not fixed and the disjunct does not " - "appear to have been relaxed. This makes no sense. " - "\(If the intent is to deactivate the disjunct, fix its " - "indicator_var to 0.\)", - TransformationFactory('gdp.bigm').apply_to, - m) + ct.check_deactivated_disjunct_unfixed_indicator_var(self, 'bigm') def test_infeasible_xor_because_all_disjuncts_deactivated(self): - m = ConcreteModel() - m.x = Var(bounds=(0,8)) - m.y = Var(bounds=(0,7)) - m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) - m.disjunction_disjuncts[0].nestedDisjunction = Disjunction( - expr=[m.y == 6, m.y <= 1]) - # Note that this fixes the indicator variables to 0, but since the - # disjunction is still active, the XOR constraint will be created. So we - # will have to land in the second disjunct of m.disjunction - m.disjunction.disjuncts[0].nestedDisjunction.disjuncts[0].deactivate() - m.disjunction.disjuncts[0].nestedDisjunction.disjuncts[1].deactivate() - # This should create a 0 = 1 XOR constraint, actually... - TransformationFactory('gdp.bigm').apply_to( - m, - targets=m.disjunction.disjuncts[0].nestedDisjunction) - - # check that our XOR is the bad thing it should be. - transBlock = m.disjunction.disjuncts[0].component( - "_pyomo_gdp_bigm_relaxation") - xor = transBlock.component( - "disjunction_disjuncts[0].nestedDisjunction_xor") - self.assertIsInstance(xor, Constraint) - self.assertEqual(value(xor.lower), 1) - self.assertEqual(value(xor.upper), 1) - repn = generate_standard_repn(xor.body) - for v in repn.linear_vars: - self.assertTrue(v.is_fixed()) - self.assertEqual(value(v), 0) - - # make sure when we transform the outer thing, all is well - TransformationFactory('gdp.bigm').apply_to(m) + m = ct.setup_infeasible_xor_because_all_disjuncts_deactivated(self, + 'bigm') - transBlock = m.component("_pyomo_gdp_bigm_relaxation") + transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 2) self.assertIsInstance(transBlock.component("disjunction_xor"), @@ -2842,7 +2004,7 @@ def test_infeasible_xor_because_all_disjuncts_deactivated(self): disjunct1 = transBlock.relaxedDisjuncts[0] # longest constraint name EVER... relaxed_xor = disjunct1.component( - "disjunction_disjuncts[0]._pyomo_gdp_bigm_relaxation." + "disjunction_disjuncts[0]._pyomo_gdp_bigm_reformulation." "disjunction_disjuncts[0].nestedDisjunction_xor") self.assertIsInstance(relaxed_xor, Constraint) repn = generate_standard_repn(relaxed_xor['lb'].body) @@ -2850,19 +2012,15 @@ def test_infeasible_xor_because_all_disjuncts_deactivated(self): self.assertIsNone(relaxed_xor['lb'].upper) # the other variables got eaten in the constant because they are fixed. self.assertEqual(len(repn.linear_vars), 1) - check_linear_coef( - self, repn, - m.disjunction.disjuncts[0].indicator_var, - -1) + ct.check_linear_coef( self, repn, + m.disjunction.disjuncts[0].indicator_var, -1) self.assertEqual(repn.constant, 1) repn = generate_standard_repn(relaxed_xor['ub'].body) self.assertIsNone(relaxed_xor['ub'].lower) self.assertEqual(value(relaxed_xor['ub'].upper), 1) self.assertEqual(len(repn.linear_vars), 1) - check_linear_coef( - self, repn, - m.disjunction.disjuncts[0].indicator_var, - -1) + ct.check_linear_coef( self, repn, + m.disjunction.disjuncts[0].indicator_var, 1) # and last check that the other constraints here look fine x0 = disjunct1.component("disjunction_disjuncts[0].constraint") @@ -2873,7 +2031,7 @@ def test_infeasible_xor_because_all_disjuncts_deactivated(self): repn = generate_standard_repn(lb.body) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 1) - check_linear_coef(self, repn, m.x, 1) + ct.check_linear_coef(self, repn, m.x, 1) ub = x0[(1, 'ub')] self.assertIsNone(ub.lower) @@ -2881,75 +2039,59 @@ def test_infeasible_xor_because_all_disjuncts_deactivated(self): repn = generate_standard_repn(ub.body) self.assertEqual(repn.constant, -8) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, m.x, 1) - check_linear_coef(self, repn, m.disjunction_disjuncts[0].indicator_var, - 8) + ct.check_linear_coef(self, repn, m.x, 1) + ct.check_linear_coef(self, repn, + m.disjunction_disjuncts[0].indicator_var, 8) def test_retrieving_nondisjunctive_components(self): - m = models.makeTwoTermDisj() - m.b = Block() - m.b.global_cons = Constraint(expr=m.a + m.x >= 8) - m.another_global_cons = Constraint(expr=m.a + m.x <= 11) - - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to(m) - - self.assertRaisesRegexp( - GDP_Error, - "Constraint b.global_cons is not on a disjunct and so was not " - "transformed", - bigm.get_transformed_constraint, - m.b.global_cons) - - self.assertRaisesRegexp( - GDP_Error, - "Constraint b.global_cons is not a transformed constraint", - bigm.get_src_constraint, - m.b.global_cons) - - self.assertRaisesRegexp( - GDP_Error, - "Constraint another_global_cons is not a transformed constraint", - bigm.get_src_constraint, - m.another_global_cons) - - self.assertRaisesRegexp( - GDP_Error, - "Block b doesn't appear to be a transformation block for a " - "disjunct. No source disjunct found.*", - bigm.get_src_disjunct, - m.b) - - self.assertRaisesRegexp( - GDP_Error, - "It appears that another_global_cons is not an XOR or OR" - " constraint resulting from transforming a Disjunction.", - bigm.get_src_disjunction, - m.another_global_cons) + ct.check_retrieving_nondisjunctive_components(self, 'bigm') def test_ask_for_transformed_constraint_from_untransformed_disjunct(self): - m = models.makeTwoTermIndexedDisjunction() - bigm = TransformationFactory('gdp.bigm') - bigm.apply_to(m, targets=m.disjunction[1]) - - self.assertRaisesRegexp( - GDP_Error, - "Constraint disjunct\[2,b\].cons_b is on a disjunct which has " - "not been transformed", - bigm.get_transformed_constraint, - m.disjunct[2, 'b'].cons_b) + ct.check_ask_for_transformed_constraint_from_untransformed_disjunct( + self, 'bigm') def test_silly_target(self): - m = models.makeTwoTermDisj() - self.assertRaisesRegexp( - GDP_Error, - "Target d\[1\].c1 was not a Block, Disjunct, or Disjunction. " - "It was of type " - " and " - "can't be transformed.", - TransformationFactory('gdp.bigm').apply_to, - m, - targets=[m.d[1].c1]) + ct.check_silly_target(self, 'bigm') + +class EstimatingMwithFixedVars(unittest.TestCase): + def test_tighter_Ms_when_vars_fixed_forever(self): + m = ConcreteModel() + m.x = Var(bounds=(0, 10)) + m.y = Var(bounds=(0, 70)) + m.d = Disjunct() + m.d.c = Constraint(expr=m.x + m.y <= 13) + m.d2 = Disjunct() + m.d2.c = Constraint(expr=m.x >= 7) + m.disj = Disjunction(expr=[m.d, m.d2]) + m.y.fix(10) + bigm = TransformationFactory('gdp.bigm') + promise = bigm.create_using(m, assume_fixed_vars_permanent=True) + bigm.apply_to(m, assume_fixed_vars_permanent=False) + + # check the M values in both cases + # first where y might be unfixed: + xformed = bigm.get_transformed_constraints(m.d.c) + self.assertEqual(len(xformed), 1) + cons = xformed[0] + self.assertEqual(cons.upper, 13) + self.assertIsNone(cons.lower) + repn = generate_standard_repn(cons.body) + self.assertEqual(repn.constant, -57) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, m.x, 1) + ct.check_linear_coef(self, repn, m.d.indicator_var, 67) + + # then where it won't + xformed = bigm.get_transformed_constraints(promise.d.c) + self.assertEqual(len(xformed), 1) + cons = xformed[0] + self.assertEqual(cons.upper, 13) + self.assertIsNone(cons.lower) + repn = generate_standard_repn(cons.body) + self.assertEqual(repn.constant, 3) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, promise.x, 1) + ct.check_linear_coef(self, repn, promise.d.indicator_var, 7) if __name__ == '__main__': unittest.main() diff --git a/pyomo/gdp/tests/test_chull.py b/pyomo/gdp/tests/test_chull.py deleted file mode 100644 index 824947a1bf4..00000000000 --- a/pyomo/gdp/tests/test_chull.py +++ /dev/null @@ -1,788 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import pyutilib.th as unittest - -from pyomo.environ import * -from pyomo.repn import generate_standard_repn - -from pyomo.gdp import * -import pyomo.gdp.tests.models as models - -import pyomo.opt -linear_solvers = pyomo.opt.check_available_solvers( - 'glpk','cbc','gurobi','cplex') - -import random -from six import iteritems, iterkeys - - -EPS = TransformationFactory('gdp.chull').CONFIG.EPS - -def check_linear_coef(self, repn, var, coef): - var_id = None - for i,v in enumerate(repn.linear_vars): - if v is var: - var_id = i - self.assertIsNotNone(var_id) - self.assertEqual(repn.linear_coefs[var_id], coef) - - -class TwoTermDisj(unittest.TestCase): - def setUp(self): - # set seed to test unique namer - random.seed(666) - - def test_transformation_block(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - transBlock = m._pyomo_gdp_chull_relaxation - self.assertIsInstance(transBlock, Block) - lbub = transBlock.lbub - self.assertIsInstance(lbub, Set) - self.assertEqual(lbub, ['lb', 'ub', 'eq']) - - disjBlock = transBlock.relaxedDisjuncts - self.assertIsInstance(disjBlock, Block) - self.assertEqual(len(disjBlock), 2) - - def test_transformation_block_name_collision(self): - m = models.makeTwoTermDisj_Nonlinear() - # add block with the name we are about to try to use - m._pyomo_gdp_chull_relaxation = Block(Any) - TransformationFactory('gdp.chull').apply_to(m) - - # check that we got a uniquely named block - transBlock = m.component("_pyomo_gdp_chull_relaxation_4") - self.assertIsInstance(transBlock, Block) - - # check that the relaxed disjuncts really are here. - disjBlock = transBlock.relaxedDisjuncts - self.assertIsInstance(disjBlock, Block) - self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("d[0].c"), Constraint) - self.assertIsInstance(disjBlock[1].component("d[1].c1"), Constraint) - self.assertIsInstance(disjBlock[1].component("d[1].c2"), Constraint) - - # we didn't add to the block that wasn't ours - self.assertEqual(len(m._pyomo_gdp_chull_relaxation), 0) - - def test_info_dict_name_collision(self): - m = models.makeTwoTermDisj_Nonlinear() - # we never have a way to know if the dictionary we made was ours. But we - # should yell if there is a non-dictionary component of the same name. - m._gdp_transformation_info = Block() - self.assertRaisesRegexp( - GDP_Error, - "Component unknown contains an attribute named " - "_gdp_transformation_info. The transformation requires that it can " - "create this attribute!*", - TransformationFactory('gdp.chull').apply_to, - m) - - def test_indicator_vars_still_active(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - self.assertIsInstance(m.d[0].indicator_var, Var) - self.assertTrue(m.d[0].indicator_var.active) - self.assertTrue(m.d[0].indicator_var.is_binary()) - self.assertIsInstance(m.d[1].indicator_var, Var) - self.assertTrue(m.d[1].indicator_var.active) - self.assertTrue(m.d[1].indicator_var.is_binary()) - - def test_disaggregated_vars(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - # same on both disjuncts - for i in [0,1]: - relaxationBlock = disjBlock[i] - w = relaxationBlock.w - x = relaxationBlock.x - y = relaxationBlock.y - # variables created - self.assertIsInstance(w, Var) - self.assertIsInstance(x, Var) - self.assertIsInstance(y, Var) - # the are in reals - self.assertIsInstance(w.domain, RealSet) - self.assertIsInstance(x.domain, RealSet) - self.assertIsInstance(y.domain, RealSet) - # they don't have bounds - self.assertEqual(w.lb, 0) - self.assertEqual(w.ub, 7) - self.assertEqual(x.lb, 0) - self.assertEqual(x.ub, 8) - self.assertEqual(y.lb, -10) - self.assertEqual(y.ub, 0) - - def check_furman_et_al_denominator(self, expr, ind_var): - self.assertEqual(expr._const, EPS) - self.assertEqual(len(expr._args), 1) - self.assertEqual(len(expr._coef), 1) - self.assertEqual(expr._coef[0], 1 - EPS) - self.assertIs(expr._args[0], ind_var) - - def test_transformed_constraint_nonlinear(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - - # the only constraint on the first block is the non-linear one - disj1c = disjBlock[0].component("d[0].c") - self.assertIsInstance(disj1c, Constraint) - # we only have an upper bound - self.assertEqual(len(disj1c), 1) - cons = disj1c['ub'] - self.assertIsNone(cons.lower) - self.assertEqual(cons.upper, 0) - repn = generate_standard_repn(cons.body) - self.assertFalse(repn.is_linear()) - self.assertEqual(len(repn.linear_vars), 1) - # This is a weak test, but as good as any to ensure that the - # substitution was done correctly - EPS_1 = 1-EPS - self.assertEqual( - str(cons.body), - "(%s*d[0].indicator_var + %s)*(" - "_pyomo_gdp_chull_relaxation.relaxedDisjuncts[0].x" - "/(%s*d[0].indicator_var + %s) + " - "(_pyomo_gdp_chull_relaxation.relaxedDisjuncts[0].y/" - "(%s*d[0].indicator_var + %s))**2) - " - "%s*(0.0 + 0.0**2)*(1 - d[0].indicator_var) " - "- 14.0*d[0].indicator_var" - % (EPS_1, EPS, EPS_1, EPS, EPS_1, EPS, EPS)) - - def test_transformed_constraints_linear(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - - # the only constraint on the first block is the non-linear one - c1 = disjBlock[1].component("d[1].c1") - # has only lb - self.assertEqual(len(c1), 1) - cons = c1['lb'] - self.assertIsNone(cons.lower) - self.assertEqual(cons.upper, 0) - repn = generate_standard_repn(cons.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, disjBlock[1].x, -1) - check_linear_coef(self, repn, m.d[1].indicator_var, 2) - self.assertEqual(repn.constant, 0) - self.assertEqual(disjBlock[1].x.lb, 0) - self.assertEqual(disjBlock[1].x.ub, 8) - - c2 = disjBlock[1].component("d[1].c2") - # 'eq' is preserved - self.assertEqual(len(c2), 1) - cons = c2['eq'] - self.assertEqual(cons.lower, 0) - self.assertEqual(cons.upper, 0) - repn = generate_standard_repn(cons.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, disjBlock[1].w, 1) - check_linear_coef(self, repn, m.d[1].indicator_var, -3) - self.assertEqual(repn.constant, 0) - self.assertEqual(disjBlock[1].w.lb, 0) - self.assertEqual(disjBlock[1].w.ub, 7) - - c3 = disjBlock[1].component("d[1].c3") - # bounded inequality is split - self.assertEqual(len(c3), 2) - cons = c3['lb'] - self.assertIsNone(cons.lower) - self.assertEqual(cons.upper, 0) - repn = generate_standard_repn(cons.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, disjBlock[1].x, -1) - check_linear_coef(self, repn, m.d[1].indicator_var, 1) - self.assertEqual(repn.constant, 0) - - cons = c3['ub'] - self.assertIsNone(cons.lower) - self.assertEqual(cons.upper, 0) - repn = generate_standard_repn(cons.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, disjBlock[1].x, 1) - check_linear_coef(self, repn, m.d[1].indicator_var, -3) - self.assertEqual(repn.constant, 0) - - def check_bound_constraints(self, cons, disvar, indvar, lb, ub): - self.assertIsInstance(cons, Constraint) - # both lb and ub - self.assertEqual(len(cons), 2) - varlb = cons['lb'] - self.assertIsNone(varlb.lower) - self.assertEqual(varlb.upper, 0) - repn = generate_standard_repn(varlb.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, indvar, lb) - check_linear_coef(self, repn, disvar, -1) - - varub = cons['ub'] - self.assertIsNone(varub.lower) - self.assertEqual(varub.upper, 0) - repn = generate_standard_repn(varub.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, indvar, -ub) - check_linear_coef(self, repn, disvar, 1) - - def test_disaggregatedVar_bounds(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - for i in [0,1]: - # check bounds constraints for each variable on each of the two - # disjuncts. - self.check_bound_constraints(disjBlock[i].w_bounds, disjBlock[i].w, - m.d[i].indicator_var, 2, 7) - self.check_bound_constraints(disjBlock[i].x_bounds, disjBlock[i].x, - m.d[i].indicator_var, 1, 8) - self.check_bound_constraints(disjBlock[i].y_bounds, disjBlock[i].y, - m.d[i].indicator_var, -10, -3) - - def test_xor_constraint(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - xorC = m._gdp_chull_relaxation_disjunction_xor - self.assertIsInstance(xorC, Constraint) - self.assertEqual(len(xorC), 1) - - repn = generate_standard_repn(xorC.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef(self, repn, m.d[0].indicator_var, 1) - check_linear_coef(self, repn, m.d[1].indicator_var, 1) - - def test_error_for_or(self): - m = models.makeTwoTermDisj_Nonlinear() - m.disjunction.xor = False - - self.assertRaisesRegexp( - GDP_Error, - "Cannot do convex hull transformation for disjunction disjunction " - "with or constraint. Must be an xor!*", - TransformationFactory('gdp.chull').apply_to, - m) - - def check_disaggregation_constraint(self, cons, var, disvar1, disvar2): - repn = generate_standard_repn(cons.body) - self.assertEqual(cons.lower, 0) - self.assertEqual(cons.upper, 0) - self.assertEqual(len(repn.linear_vars), 3) - check_linear_coef(self, repn, var, 1) - check_linear_coef(self, repn, disvar1, -1) - check_linear_coef(self, repn, disvar2, -1) - - def test_disaggregation_constraint(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - - disCons = m._gdp_chull_relaxation_disjunction_disaggregation - self.assertIsInstance(disCons, Constraint) - # one for each of the variables - self.assertEqual(len(disCons), 3) - self.check_disaggregation_constraint(disCons[2], m.w, disjBlock[0].w, - disjBlock[1].w) - self.check_disaggregation_constraint(disCons[0], m.x, disjBlock[0].x, - disjBlock[1].x) - self.check_disaggregation_constraint(disCons[1], m.y, disjBlock[0].y, - disjBlock[1].y) - - def test_original_disjuncts_deactivated(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m, targets=(m,)) - - self.assertFalse(m.d.active) - self.assertFalse(m.d[0].active) - self.assertFalse(m.d[1].active) - # COnstraints aren't deactived: only disjuncts - self.assertTrue(m.d[0].c.active) - self.assertTrue(m.d[1].c1.active) - self.assertTrue(m.d[1].c2.active) - - def test_transformed_disjunct_mappings(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - - # the disjuncts will always be transformed in the same order, - # and d[0] goes first, so we can check in a loop. - for i in [0,1]: - infodict = disjBlock[i]._gdp_transformation_info - self.assertIsInstance(infodict, dict) - self.assertEqual(len(infodict), 4) - self.assertIs(infodict['src'], m.d[i]) - self.assertIsInstance(infodict['srcConstraints'], ComponentMap) - self.assertIsInstance(infodict['srcVars'], ComponentMap) - self.assertIsInstance( - infodict['boundConstraintToSrcVar'], ComponentMap) - - disjDict = m.d[i]._gdp_transformation_info - self.assertIsInstance(disjDict, dict) - self.assertEqual(sorted(iterkeys(disjDict)), ['chull','relaxed']) - self.assertTrue(disjDict['relaxed']) - self.assertIs(disjDict['chull']['relaxationBlock'], disjBlock[i]) - disaggregatedVars = disjDict['chull']['disaggregatedVars'] - self.assertIsInstance(disaggregatedVars, ComponentMap) - bigmConstraints = disjDict['chull']['bigmConstraints'] - self.assertIsInstance(bigmConstraints, ComponentMap) - relaxedConstraints = disjDict['chull']['relaxedConstraints'] - self.assertIsInstance(relaxedConstraints, ComponentMap) - - def test_transformed_constraint_mappings(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - - # first disjunct - srcConsdict = disjBlock[0]._gdp_transformation_info['srcConstraints'] - transConsdict = m.d[0]._gdp_transformation_info['chull'][ - 'relaxedConstraints'] - - self.assertEqual(len(srcConsdict), 1) - self.assertEqual(len(transConsdict), 1) - orig1 = m.d[0].c - trans1 = disjBlock[0].component("d[0].c") - self.assertIs(srcConsdict[trans1], orig1) - self.assertIs(transConsdict[orig1], trans1) - - # second disjunct - srcConsdict = disjBlock[1]._gdp_transformation_info['srcConstraints'] - transConsdict = m.d[1]._gdp_transformation_info['chull'][ - 'relaxedConstraints'] - - self.assertEqual(len(srcConsdict), 3) - self.assertEqual(len(transConsdict), 3) - # first constraint - orig1 = m.d[1].c1 - trans1 = disjBlock[1].component("d[1].c1") - self.assertIs(srcConsdict[trans1], orig1) - self.assertIs(transConsdict[orig1], trans1) - # second constraint - orig2 = m.d[1].c2 - trans2 = disjBlock[1].component("d[1].c2") - self.assertIs(srcConsdict[trans2], orig2) - self.assertIs(transConsdict[orig2], trans2) - # third constraint - orig3 = m.d[1].c3 - trans3 = disjBlock[1].component("d[1].c3") - self.assertIs(srcConsdict[trans3], orig3) - self.assertIs(transConsdict[orig3], trans3) - - def test_disaggregatedVar_mappings(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - - for i in [0,1]: - srcVars = disjBlock[i]._gdp_transformation_info['srcVars'] - disVars = m.d[i]._gdp_transformation_info['chull'][ - 'disaggregatedVars'] - self.assertEqual(len(srcVars), 3) - self.assertEqual(len(disVars), 3) - # TODO: there has got to be better syntax for this?? - mappings = ComponentMap() - mappings[m.w] = disjBlock[i].w - mappings[m.y] = disjBlock[i].y - mappings[m.x] = disjBlock[i].x - for orig, disagg in iteritems(mappings): - self.assertIs(srcVars[disagg], orig) - self.assertIs(disVars[orig], disagg) - - def test_bigMConstraint_mappings(self): - m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.chull').apply_to(m) - - disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - - for i in [0,1]: - srcBigm = disjBlock[i]._gdp_transformation_info[ - 'boundConstraintToSrcVar'] - bigm = m.d[i]._gdp_transformation_info['chull']['bigmConstraints'] - self.assertEqual(len(srcBigm), 3) - self.assertEqual(len(bigm), 3) - # TODO: this too... - mappings = ComponentMap() - mappings[m.w] = disjBlock[i].w_bounds - mappings[m.y] = disjBlock[i].y_bounds - mappings[m.x] = disjBlock[i].x_bounds - for var, cons in iteritems(mappings): - self.assertIs(srcBigm[cons], var) - self.assertIs(bigm[var], cons) - - def test_target_not_a_component_err(self): - decoy = ConcreteModel() - decoy.block = Block() - m = models.makeTwoSimpleDisjunctions() - self.assertRaisesRegexp( - GDP_Error, - "Target block is not a component on instance unknown!", - TransformationFactory('gdp.chull').apply_to, - m, - targets=[decoy.block]) - - def test_do_not_transform_user_deactivated_disjuncts(self): - # TODO - pass - - def test_unbounded_var_error(self): - m = models.makeTwoTermDisj_Nonlinear() - # no bounds - m.w.setlb(None) - m.w.setub(None) - self.assertRaisesRegexp( - GDP_Error, - "Variables that appear in disjuncts must be " - "bounded in order to use the chull " - "transformation! Missing bound for w.*", - TransformationFactory('gdp.chull').apply_to, - m) - - def test_indexed_constraints_in_disjunct(self): - m = models.makeThreeTermDisj_IndexedConstraints() - - TransformationFactory('gdp.chull').apply_to(m) - transBlock = m._pyomo_gdp_chull_relaxation - - # 2 blocks: the original Disjunct and the transformation block - self.assertEqual( - len(list(m.component_objects(Block, descend_into=False))), 2) - self.assertEqual( - len(list(m.component_objects(Disjunct))), 0) - - # Each relaxed disjunct should have 3 vars, but i "d[i].c" - # Constraints - for i in [1,2,3]: - relaxed = transBlock.relaxedDisjuncts[i-1] - self.assertEqual(len(list(relaxed.component_objects(Var))), 3) - self.assertEqual(len(list(relaxed.component_data_objects(Var))), 3) - self.assertEqual( - len(list(relaxed.component_objects(Constraint))), 4) - # Note: m.x LB == 0, so only 3 bounds constriants (not 6) - self.assertEqual( - len(list(relaxed.component_data_objects(Constraint))), 3+i) - self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) - - def test_virtual_indexed_constraints_in_disjunct(self): - m = ConcreteModel() - m.I = [1,2,3] - m.x = Var(m.I, bounds=(-1,10)) - def d_rule(d,j): - m = d.model() - d.c = Constraint(Any) - for k in range(j): - d.c[k+1] = m.x[k+1] >= k+1 - m.d = Disjunct(m.I, rule=d_rule) - m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) - - TransformationFactory('gdp.chull').apply_to(m) - transBlock = m._pyomo_gdp_chull_relaxation - - # 2 blocks: the original Disjunct and the transformation block - self.assertEqual( - len(list(m.component_objects(Block, descend_into=False))), 2) - self.assertEqual( - len(list(m.component_objects(Disjunct))), 0) - - # Each relaxed disjunct should have 3 vars, but i "d[i].c" - # Constraints - for i in [1,2,3]: - relaxed = transBlock.relaxedDisjuncts[i-1] - self.assertEqual(len(list(relaxed.component_objects(Var))), 3) - self.assertEqual(len(list(relaxed.component_data_objects(Var))), 3) - self.assertEqual( - len(list(relaxed.component_objects(Constraint))), 4) - self.assertEqual( - len(list(relaxed.component_data_objects(Constraint))), 3*2+i) - self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) - - -class IndexedDisjunction(unittest.TestCase): - - def test_disaggregation_constraints(self): - m = models.makeTwoTermIndexedDisjunction() - TransformationFactory('gdp.chull').apply_to(m) - - disaggregationCons = m._gdp_chull_relaxation_disjunction_disaggregation - relaxedDisjuncts = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts - self.assertIsInstance(disaggregationCons, Constraint) - self.assertEqual(len(disaggregationCons), 3) - - disaggregatedVars = { - (1, 0): [relaxedDisjuncts[0].component('x[1]'), - relaxedDisjuncts[1].component('x[1]')], - (2, 0): [relaxedDisjuncts[2].component('x[2]'), - relaxedDisjuncts[3].component('x[2]')], - (3, 0): [relaxedDisjuncts[4].component('x[3]'), - relaxedDisjuncts[5].component('x[3]')], - } - - for i, disVars in iteritems(disaggregatedVars): - cons = disaggregationCons[i] - self.assertEqual(cons.lower, 0) - self.assertEqual(cons.upper, 0) - repn = generate_standard_repn(cons.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 3) - check_linear_coef(self, repn, m.x[i[0]], 1) - check_linear_coef(self, repn, disVars[0], -1) - check_linear_coef(self, repn, disVars[1], -1) - - # TODO: also test disaggregation constraints for when we have a disjunction - # where the indices are tuples. (This is to test that when we combine the - # indices and the constraint name we get what we expect in both cases.) - -class DisaggregatedVarNamingConflict(unittest.TestCase): - @staticmethod - def makeModel(): - m = ConcreteModel() - m.b = Block() - m.b.x = Var(bounds=(0, 10)) - m.add_component("b.x", Var(bounds=(-9, 9))) - def disjunct_rule(d, i): - m = d.model() - if i: - d.cons_block = Constraint(expr=m.b.x >= 5) - d.cons_model = Constraint(expr=m.component("b.x")==0) - else: - d.cons_model = Constraint(expr=m.component("b.x") <= -5) - m.disjunct = Disjunct([0,1], rule=disjunct_rule) - m.disjunction = Disjunction(expr=[m.disjunct[0], m.disjunct[1]]) - - return m - - def test_disaggregation_constraints(self): - m = self.makeModel() - TransformationFactory('gdp.chull').apply_to(m) - - disCons = m._gdp_chull_relaxation_disjunction_disaggregation - self.assertIsInstance(disCons, Constraint) - self.assertEqual(len(disCons), 2) - # TODO: the above thing fails because the index gets overwritten. I - # don't know how to keep them unique at the moment. When I do, I also - # need to test that the indices are actually what we expect. - -class NestedDisjunction(unittest.TestCase): - - def test_deactivated_disjunct_leaves_nested_disjuncts_active(self): - m = models.makeNestedDisjunctions_FlatDisjuncts() - m.d1.deactivate() - # Specifying 'targets' prevents the HACK_GDP_Disjunct_Reclassifier - # transformation of Disjuncts to Blocks - TransformationFactory('gdp.chull').apply_to(m, targets=[m]) - - self.assertFalse(m.d1.active) - self.assertTrue(m.d1.indicator_var.fixed) - self.assertEqual(m.d1.indicator_var.value, 0) - - self.assertFalse(m.d2.active) - self.assertFalse(m.d2.indicator_var.fixed) - - self.assertTrue(m.d3.active) - self.assertFalse(m.d3.indicator_var.fixed) - - self.assertTrue(m.d4.active) - self.assertFalse(m.d4.indicator_var.fixed) - - m = models.makeNestedDisjunctions_NestedDisjuncts() - m.d1.deactivate() - # Specifying 'targets' prevents the HACK_GDP_Disjunct_Reclassifier - # transformation of Disjuncts to Blocks - TransformationFactory('gdp.chull').apply_to(m, targets=[m]) - - self.assertFalse(m.d1.active) - self.assertTrue(m.d1.indicator_var.fixed) - self.assertEqual(m.d1.indicator_var.value, 0) - - self.assertFalse(m.d2.active) - self.assertFalse(m.d2.indicator_var.fixed) - - self.assertTrue(m.d1.d3.active) - self.assertFalse(m.d1.d3.indicator_var.fixed) - - self.assertTrue(m.d1.d4.active) - self.assertFalse(m.d1.d4.indicator_var.fixed) - - @unittest.skipIf(not linear_solvers, "No linear solver available") - def test_relaxation_feasibility(self): - m = models.makeNestedDisjunctions_FlatDisjuncts() - TransformationFactory('gdp.chull').apply_to(m) - - solver = SolverFactory(linear_solvers[0]) - - cases = [ - (1,1,1,1,None), - (0,0,0,0,None), - (1,0,0,0,None), - (0,1,0,0,1.1), - (0,0,1,0,None), - (0,0,0,1,None), - (1,1,0,0,None), - (1,0,1,0,1.2), - (1,0,0,1,1.3), - (1,0,1,1,None), - ] - for case in cases: - m.d1.indicator_var.fix(case[0]) - m.d2.indicator_var.fix(case[1]) - m.d3.indicator_var.fix(case[2]) - m.d4.indicator_var.fix(case[3]) - results = solver.solve(m) - print(case, results.solver) - if case[4] is None: - self.assertEqual(results.solver.termination_condition, - pyomo.opt.TerminationCondition.infeasible) - else: - self.assertEqual(results.solver.termination_condition, - pyomo.opt.TerminationCondition.optimal) - self.assertEqual(value(m.obj), case[4]) - - -class TestSpecialCases(unittest.TestCase): - def test_warn_for_untransformed(self): - m = models.makeDisjunctionsOnIndexedBlock() - def innerdisj_rule(d, flag): - m = d.model() - if flag: - d.c = Constraint(expr=m.a[1] <= 2) - else: - d.c = Constraint(expr=m.a[1] >= 65) - m.disjunct1[1,1].innerdisjunct = Disjunct([0,1], rule=innerdisj_rule) - m.disjunct1[1,1].innerdisjunction = Disjunction([0], - rule=lambda a,i: [m.disjunct1[1,1].innerdisjunct[0], - m.disjunct1[1,1].innerdisjunct[1]]) - # This test relies on the order that the component objects of - # the disjunct get considered. In this case, the disjunct - # causes the error, but in another world, it could be the - # disjunction, which is also active. - self.assertRaisesRegexp( - GDP_Error, - "Found active disjunct disjunct1\[1,1\].innerdisjunct\[0\] " - "in disjunct disjunct1\[1,1\]!.*", - TransformationFactory('gdp.chull').create_using, - m, - targets=[m.disjunction1[1]]) - # - # we will make that disjunction come first now... - # - tmp = m.disjunct1[1,1].innerdisjunct - m.disjunct1[1,1].del_component(tmp) - m.disjunct1[1,1].add_component('innerdisjunct', tmp) - self.assertRaisesRegexp( - GDP_Error, - "Found untransformed disjunction disjunct1\[1,1\]." - "innerdisjunction\[0\] in disjunct disjunct1\[1,1\]!.*", - TransformationFactory('gdp.chull').create_using, - m, - targets=[m.disjunction1[1]]) - # Deactivating the disjunction will allow us to get past it back - # to the Disjunct (after we realize there are no active - # DisjunctionData within the active Disjunction) - m.disjunct1[1,1].innerdisjunction[0].deactivate() - self.assertRaisesRegexp( - GDP_Error, - "Found active disjunct disjunct1\[1,1\].innerdisjunct\[0\] " - "in disjunct disjunct1\[1,1\]!.*", - TransformationFactory('gdp.chull').create_using, - m, - targets=[m.disjunction1[1]]) - - def test_local_vars(self): - m = ConcreteModel() - m.x = Var(bounds=(5,100)) - m.y = Var(bounds=(0,100)) - m.d1 = Disjunct() - m.d1.c = Constraint(expr=m.y >= m.x) - m.d2 = Disjunct() - m.d2.z = Var() - m.d2.c = Constraint(expr=m.y >= m.d2.z) - m.disj = Disjunction(expr=[m.d1, m.d2]) - - self.assertRaisesRegexp( - GDP_Error, - ".*Missing bound for d2.z.*", - TransformationFactory('gdp.chull').create_using, - m) - m.d2.z.setlb(7) - self.assertRaisesRegexp( - GDP_Error, - ".*Missing bound for d2.z.*", - TransformationFactory('gdp.chull').create_using, - m) - m.d2.z.setub(9) - - i = TransformationFactory('gdp.chull').create_using(m) - rd = i._pyomo_gdp_chull_relaxation.relaxedDisjuncts[1] - self.assertEqual(sorted(rd.component_map(Var)), ['x','y']) - self.assertEqual(len(rd.component_map(Constraint)), 4) - self.assertEqual(i.d2.z.bounds, (0,9)) - self.assertEqual(len(rd.z_bounds), 2) - self.assertEqual(rd.z_bounds['lb'].lower, None) - self.assertEqual(rd.z_bounds['lb'].upper, 0) - self.assertEqual(rd.z_bounds['ub'].lower, None) - self.assertEqual(rd.z_bounds['ub'].upper, 0) - i.d2.indicator_var = 1 - i.d2.z = 2 - self.assertEqual(rd.z_bounds['lb'].body(), 5) - self.assertEqual(rd.z_bounds['ub'].body(), -7) - - m.d2.z.setlb(-9) - m.d2.z.setub(-7) - i = TransformationFactory('gdp.chull').create_using(m) - rd = i._pyomo_gdp_chull_relaxation.relaxedDisjuncts[1] - self.assertEqual(sorted(rd.component_map(Var)), ['x','y']) - self.assertEqual(len(rd.component_map(Constraint)), 4) - self.assertEqual(i.d2.z.bounds, (-9,0)) - self.assertEqual(len(rd.z_bounds), 2) - self.assertEqual(rd.z_bounds['lb'].lower, None) - self.assertEqual(rd.z_bounds['lb'].upper, 0) - self.assertEqual(rd.z_bounds['ub'].lower, None) - self.assertEqual(rd.z_bounds['ub'].upper, 0) - i.d2.indicator_var = 1 - i.d2.z = 2 - self.assertEqual(rd.z_bounds['lb'].body(), -11) - self.assertEqual(rd.z_bounds['ub'].body(), 9) - - -class RangeSetOnDisjunct(unittest.TestCase): - def test_RangeSet(self): - m = models.makeDisjunctWithRangeSet() - TransformationFactory('gdp.chull').apply_to(m) - self.assertIsInstance(m.d1.s, RangeSet) - - -# TODO (based on coverage): - -# test targets of all flavors -# test container deactivation -# test something with multiple indices diff --git a/pyomo/gdp/tests/test_gdp.py b/pyomo/gdp/tests/test_gdp.py index f9dbdb15437..86c4b4b7c89 100644 --- a/pyomo/gdp/tests/test_gdp.py +++ b/pyomo/gdp/tests/test_gdp.py @@ -142,17 +142,17 @@ def test_bigm_jobshop_large(self): # preprocess='bigm', solver='cplex') # self.check( 'constrained_layout', 'bigm') - def test_chull_jobshop_small(self): - self.problem='test_chull_jobshop_small' - # Run the small jobshop example using the CHull transformation - self.pyomo('jobshop-small.dat', preprocess='chull') - self.check( 'jobshop_small', 'chull' ) - - def test_chull_jobshop_large(self): - self.problem='test_chull_jobshop_large' - # Run the large jobshop example using the CHull transformation - self.pyomo('jobshop.dat', preprocess='chull') - self.check( 'jobshop_large', 'chull' ) + def test_hull_jobshop_small(self): + self.problem='test_hull_jobshop_small' + # Run the small jobshop example using the Hull transformation + self.pyomo('jobshop-small.dat', preprocess='hull') + self.check( 'jobshop_small', 'hull' ) + + def test_hull_jobshop_large(self): + self.problem='test_hull_jobshop_large' + # Run the large jobshop example using the Hull transformation + self.pyomo('jobshop.dat', preprocess='hull') + self.check( 'jobshop_large', 'hull' ) @unittest.skip("cutting plane LP file tests are too fragile") @unittest.skipIf('gurobi' not in solvers, 'Gurobi solver not available') @@ -188,6 +188,8 @@ def referenceFile(self, problem, solver): def check(self, problem, solver): self.assertFileEqualsBaseline( join(currdir,self.problem+'_result.lp'), self.referenceFile(problem,solver) ) + if os.path.exists(join(currdir,self.problem+'_result.lp')): + os.remove(join(currdir,self.problem+'_result.lp')) class Solver(unittest.TestCase): @@ -208,6 +210,9 @@ def check(self, problem, solver): ansObj[i].get(key,{}).get('Value', None), 6 ) + # Clean up test files + if os.path.exists(join(currdir,self.problem+'_result.lp')): + os.remove(join(currdir,self.problem+'_result.lp')) @unittest.skipIf(not yaml_available, "YAML is not available") diff --git a/pyomo/gdp/tests/test_hull.py b/pyomo/gdp/tests/test_hull.py new file mode 100644 index 00000000000..92d1bb70c45 --- /dev/null +++ b/pyomo/gdp/tests/test_hull.py @@ -0,0 +1,1923 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyutilib.th as unittest +from pyomo.common.log import LoggingIntercept +import logging + +from pyomo.environ import * +from pyomo.core.base import constraint +from pyomo.repn import generate_standard_repn + +from pyomo.gdp import * +import pyomo.gdp.tests.models as models +import pyomo.gdp.tests.common_tests as ct + +import pyomo.opt +linear_solvers = pyomo.opt.check_available_solvers( + 'glpk','cbc','gurobi','cplex') + +import random +from six import iteritems, iterkeys, StringIO + +EPS = TransformationFactory('gdp.hull').CONFIG.EPS + +class CommonTests: + def setUp(self): + # set seed so we can test name collisions predictably + random.seed(666) + + def diff_apply_to_and_create_using(self, model): + ct.diff_apply_to_and_create_using(self, model, 'gdp.hull') + +class TwoTermDisj(unittest.TestCase, CommonTests): + def setUp(self): + # set seed to test unique namer + random.seed(666) + + def test_transformation_block(self): + m = models.makeTwoTermDisj_Nonlinear() + TransformationFactory('gdp.hull').apply_to(m) + + transBlock = m._pyomo_gdp_hull_reformulation + self.assertIsInstance(transBlock, Block) + lbub = transBlock.lbub + self.assertIsInstance(lbub, Set) + self.assertEqual(lbub, ['lb', 'ub', 'eq']) + + disjBlock = transBlock.relaxedDisjuncts + self.assertIsInstance(disjBlock, Block) + self.assertEqual(len(disjBlock), 2) + + def test_transformation_block_name_collision(self): + ct.check_transformation_block_name_collision(self, 'hull') + + def test_disaggregated_vars(self): + m = models.makeTwoTermDisj_Nonlinear() + TransformationFactory('gdp.hull').apply_to(m) + + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + # same on both disjuncts + for i in [0,1]: + relaxationBlock = disjBlock[i] + w = relaxationBlock.w + x = relaxationBlock.x + y = relaxationBlock.y + # variables created + self.assertIsInstance(w, Var) + self.assertIsInstance(x, Var) + self.assertIsInstance(y, Var) + # the are in reals + self.assertIsInstance(w.domain, RealSet) + self.assertIsInstance(x.domain, RealSet) + self.assertIsInstance(y.domain, RealSet) + # they don't have bounds + self.assertEqual(w.lb, 0) + self.assertEqual(w.ub, 7) + self.assertEqual(x.lb, 0) + self.assertEqual(x.ub, 8) + self.assertEqual(y.lb, -10) + self.assertEqual(y.ub, 0) + + def check_furman_et_al_denominator(self, expr, ind_var): + self.assertEqual(expr._const, EPS) + self.assertEqual(len(expr._args), 1) + self.assertEqual(len(expr._coef), 1) + self.assertEqual(expr._coef[0], 1 - EPS) + self.assertIs(expr._args[0], ind_var) + + def test_transformed_constraint_nonlinear(self): + m = models.makeTwoTermDisj_Nonlinear() + TransformationFactory('gdp.hull').apply_to(m) + + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + # the only constraint on the first block is the non-linear one + disj1c = disjBlock[0].component("d[0].c") + self.assertIsInstance(disj1c, Constraint) + # we only have an upper bound + self.assertEqual(len(disj1c), 1) + cons = disj1c['ub'] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertFalse(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 1) + # This is a weak test, but as good as any to ensure that the + # substitution was done correctly + EPS_1 = 1-EPS + self.assertEqual( + str(cons.body), + "(%s*d[0].indicator_var + %s)*(" + "_pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].x" + "/(%s*d[0].indicator_var + %s) + " + "(_pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].y/" + "(%s*d[0].indicator_var + %s))**2) - " + "%s*(0.0 + 0.0**2)*(1 - d[0].indicator_var) " + "- 14.0*d[0].indicator_var" + % (EPS_1, EPS, EPS_1, EPS, EPS_1, EPS, EPS)) + + def test_transformed_constraints_linear(self): + m = models.makeTwoTermDisj_Nonlinear() + TransformationFactory('gdp.hull').apply_to(m) + + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + # the only constraint on the first block is the non-linear one + c1 = disjBlock[1].component("d[1].c1") + # has only lb + self.assertEqual(len(c1), 1) + cons = c1['lb'] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, disjBlock[1].x, -1) + ct.check_linear_coef(self, repn, m.d[1].indicator_var, 2) + self.assertEqual(repn.constant, 0) + self.assertEqual(disjBlock[1].x.lb, 0) + self.assertEqual(disjBlock[1].x.ub, 8) + + c2 = disjBlock[1].component("d[1].c2") + # 'eq' is preserved + self.assertEqual(len(c2), 1) + cons = c2['eq'] + self.assertEqual(cons.lower, 0) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, disjBlock[1].w, 1) + ct.check_linear_coef(self, repn, m.d[1].indicator_var, -3) + self.assertEqual(repn.constant, 0) + self.assertEqual(disjBlock[1].w.lb, 0) + self.assertEqual(disjBlock[1].w.ub, 7) + + c3 = disjBlock[1].component("d[1].c3") + # bounded inequality is split + self.assertEqual(len(c3), 2) + cons = c3['lb'] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, disjBlock[1].x, -1) + ct.check_linear_coef(self, repn, m.d[1].indicator_var, 1) + self.assertEqual(repn.constant, 0) + + cons = c3['ub'] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, disjBlock[1].x, 1) + ct.check_linear_coef(self, repn, m.d[1].indicator_var, -3) + self.assertEqual(repn.constant, 0) + + def check_bound_constraints(self, cons, disvar, indvar, lb, ub): + self.assertIsInstance(cons, Constraint) + # both lb and ub + self.assertEqual(len(cons), 2) + varlb = cons['lb'] + self.assertIsNone(varlb.lower) + self.assertEqual(varlb.upper, 0) + repn = generate_standard_repn(varlb.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, indvar, lb) + ct.check_linear_coef(self, repn, disvar, -1) + + varub = cons['ub'] + self.assertIsNone(varub.lower) + self.assertEqual(varub.upper, 0) + repn = generate_standard_repn(varub.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, indvar, -ub) + ct.check_linear_coef(self, repn, disvar, 1) + + def test_disaggregatedVar_bounds(self): + m = models.makeTwoTermDisj_Nonlinear() + TransformationFactory('gdp.hull').apply_to(m) + + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + for i in [0,1]: + # check bounds constraints for each variable on each of the two + # disjuncts. + self.check_bound_constraints(disjBlock[i].w_bounds, disjBlock[i].w, + m.d[i].indicator_var, 2, 7) + self.check_bound_constraints(disjBlock[i].x_bounds, disjBlock[i].x, + m.d[i].indicator_var, 1, 8) + self.check_bound_constraints(disjBlock[i].y_bounds, disjBlock[i].y, + m.d[i].indicator_var, -10, -3) + + def test_error_for_or(self): + m = models.makeTwoTermDisj_Nonlinear() + m.disjunction.xor = False + + self.assertRaisesRegexp( + GDP_Error, + "Cannot do hull reformulation for Disjunction " + "'disjunction' with OR constraint. Must be an XOR!*", + TransformationFactory('gdp.hull').apply_to, + m) + + def check_disaggregation_constraint(self, cons, var, disvar1, disvar2): + repn = generate_standard_repn(cons.body) + self.assertEqual(cons.lower, 0) + self.assertEqual(cons.upper, 0) + self.assertEqual(len(repn.linear_vars), 3) + ct.check_linear_coef(self, repn, var, 1) + ct.check_linear_coef(self, repn, disvar1, -1) + ct.check_linear_coef(self, repn, disvar2, -1) + + def test_disaggregation_constraint(self): + m = models.makeTwoTermDisj_Nonlinear() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + self.check_disaggregation_constraint( + hull.get_disaggregation_constraint(m.w, m.disjunction), m.w, + disjBlock[0].w, disjBlock[1].w) + self.check_disaggregation_constraint( + hull.get_disaggregation_constraint(m.x, m.disjunction), m.x, + disjBlock[0].x, disjBlock[1].x) + self.check_disaggregation_constraint( + hull.get_disaggregation_constraint(m.y, m.disjunction), m.y, + disjBlock[0].y, disjBlock[1].y) + + def test_xor_constraint_mapping(self): + ct.check_xor_constraint_mapping(self, 'hull') + + def test_xor_constraint_mapping_two_disjunctions(self): + ct.check_xor_constraint_mapping_two_disjunctions(self, 'hull') + + def test_transformed_disjunct_mappings(self): + ct.check_disjunct_mapping(self, 'hull') + + def test_transformed_constraint_mappings(self): + # ESJ: Letting bigm and hull test their own constraint mappings + # because, though the paradigm is the same, hull doesn't always create + # a transformed constraint when it can instead accomplish an x == 0 + # constraint by fixing the disaggregated variable. + m = models.makeTwoTermDisj_Nonlinear() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + # first disjunct + orig1 = m.d[0].c + trans1 = disjBlock[0].component("d[0].c") + self.assertIs(hull.get_src_constraint(trans1), orig1) + self.assertIs(hull.get_src_constraint(trans1['ub']), orig1) + trans_list = hull.get_transformed_constraints(orig1) + self.assertEqual(len(trans_list), 1) + self.assertIs(trans_list[0], trans1['ub']) + + # second disjunct + + # first constraint + orig1 = m.d[1].c1 + trans1 = disjBlock[1].component("d[1].c1") + self.assertIs(hull.get_src_constraint(trans1), orig1) + self.assertIs(hull.get_src_constraint(trans1['lb']), orig1) + trans_list = hull.get_transformed_constraints(orig1) + self.assertEqual(len(trans_list), 1) + self.assertIs(trans_list[0], trans1['lb']) + + # second constraint + orig2 = m.d[1].c2 + trans2 = disjBlock[1].component("d[1].c2") + self.assertIs(hull.get_src_constraint(trans2), orig2) + self.assertIs(hull.get_src_constraint(trans2['eq']), orig2) + trans_list = hull.get_transformed_constraints(orig2) + self.assertEqual(len(trans_list), 1) + self.assertIs(trans_list[0], trans2['eq']) + + # third constraint + orig3 = m.d[1].c3 + trans3 = disjBlock[1].component("d[1].c3") + self.assertIs(hull.get_src_constraint(trans3), orig3) + self.assertIs(hull.get_src_constraint(trans3['lb']), orig3) + self.assertIs(hull.get_src_constraint(trans3['ub']), orig3) + trans_list = hull.get_transformed_constraints(orig3) + self.assertEqual(len(trans_list), 2) + self.assertIs(trans_list[0], trans3['lb']) + self.assertIs(trans_list[1], trans3['ub']) + + def test_disaggregatedVar_mappings(self): + m = models.makeTwoTermDisj_Nonlinear() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + for i in [0,1]: + mappings = ComponentMap() + mappings[m.w] = disjBlock[i].w + mappings[m.y] = disjBlock[i].y + mappings[m.x] = disjBlock[i].x + + for orig, disagg in iteritems(mappings): + self.assertIs(hull.get_src_var(disagg), orig) + self.assertIs(hull.get_disaggregated_var(orig, m.d[i]), disagg) + + def test_bigMConstraint_mappings(self): + m = models.makeTwoTermDisj_Nonlinear() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + for i in [0,1]: + mappings = ComponentMap() + # [ESJ 11/05/2019] I think this test was useless before... I think + # this *map* was useless before. It should be disaggregated variable + # to the constraints, not the original variable? Why did this even + # work?? + mappings[disjBlock[i].w] = disjBlock[i].w_bounds + mappings[disjBlock[i].y] = disjBlock[i].y_bounds + mappings[disjBlock[i].x] = disjBlock[i].x_bounds + for var, cons in iteritems(mappings): + self.assertIs(hull.get_var_bounds_constraint(var), cons) + + def test_create_using_nonlinear(self): + m = models.makeTwoTermDisj_Nonlinear() + self.diff_apply_to_and_create_using(m) + + # [ESJ 02/14/2020] In order to match bigm and the (unfortunate) expectation + # we have established, we never decide something is local based on where it + # is declared. We treat variables declared on Disjuncts as if they are + # declared globally. We need to use the bounds as if they are global and + # also disaggregate the variable + def test_locally_declared_var_bounds_used_globally(self): + m = models.localVar() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + # check that we used the bounds on the local variable as if they are + # global. Which means checking the bounds constraints... + y_disagg = m.disj2.transformation_block().y + cons = hull.get_var_bounds_constraint(y_disagg) + lb = cons['lb'] + self.assertIsNone(lb.lower) + self.assertEqual(value(lb.upper), 0) + repn = generate_standard_repn(lb.body) + self.assertTrue(repn.is_linear()) + ct.check_linear_coef(self, repn, m.disj2.indicator_var, 1) + ct.check_linear_coef(self, repn, y_disagg, -1) + + ub = cons['ub'] + self.assertIsNone(ub.lower) + self.assertEqual(value(ub.upper), 0) + repn = generate_standard_repn(ub.body) + self.assertTrue(repn.is_linear()) + ct.check_linear_coef(self, repn, y_disagg, 1) + ct.check_linear_coef(self, repn, m.disj2.indicator_var, -3) + + def test_locally_declared_variables_disaggregated(self): + m = models.localVar() + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + # two birds one stone: test the mappings too + disj1y = hull.get_disaggregated_var(m.disj2.y, m.disj1) + disj2y = hull.get_disaggregated_var(m.disj2.y, m.disj2) + self.assertIs(disj1y, m.disj1._transformation_block().y) + self.assertIs(disj2y, m.disj2._transformation_block().y) + self.assertIs(hull.get_src_var(disj1y), m.disj2.y) + self.assertIs(hull.get_src_var(disj2y), m.disj2.y) + + def test_global_vars_local_to_a_disjunction_disaggregated(self): + # The point of this is that where a variable is declared has absolutely + # nothing to do with whether or not it should be disaggregated. With the + # only exception being that we can tell disaggregated variables and we + # know they are really and truly local to only one disjunct (EVER, in the + # whole model) because we declared them. + + # So here, for some perverse reason, we declare the variables on disj1, + # but we use them in disj2. Both of them need to be disaggregated in + # both disjunctions though: Neither is local. (And, unless we want to do + # a search of the whole model (or disallow this kind of insanity) we + # can't be smarter because what if you transformed this one disjunction + # at a time? You can never assume a variable isn't used elsewhere in the + # model, and if it is, you must disaggregate it.) + m = ConcreteModel() + m.disj1 = Disjunct() + m.disj1.x = Var(bounds=(1, 10)) + m.disj1.y = Var(bounds=(2, 11)) + m.disj1.cons1 = Constraint(expr=m.disj1.x + m.disj1.y <= 5) + m.disj2 = Disjunct() + m.disj2.cons = Constraint(expr=m.disj1.y >= 8) + m.disjunction1 = Disjunction(expr=[m.disj1, m.disj2]) + + m.disj3 = Disjunct() + m.disj3.cons = Constraint(expr=m.disj1.x >= 7) + m.disj4 = Disjunct() + m.disj4.cons = Constraint(expr=m.disj1.y == 3) + m.disjunction2 = Disjunction(expr=[m.disj3, m.disj4]) + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + # check that all the variables are disaggregated + for disj in [m.disj1, m.disj2, m.disj3, m.disj4]: + transBlock = disj.transformation_block() + self.assertEqual(len([v for v in + transBlock.component_data_objects(Var)]), 2) + x = transBlock.component("x") + y = transBlock.component("y") + self.assertIsInstance(x, Var) + self.assertIsInstance(y, Var) + self.assertIs(hull.get_disaggregated_var(m.disj1.x, disj), x) + self.assertIs(hull.get_src_var(x), m.disj1.x) + self.assertIs(hull.get_disaggregated_var(m.disj1.y, disj), y) + self.assertIs(hull.get_src_var(y), m.disj1.y) + + def check_name_collision_disaggregated_vars(self, m, disj, name): + hull = TransformationFactory('gdp.hull') + transBlock = disj.transformation_block() + self.assertEqual(len([v for v in + transBlock.component_data_objects(Var)]), 2) + x = transBlock.component("x") + x2 = transBlock.component(name) + self.assertIsInstance(x, Var) + self.assertIsInstance(x2, Var) + self.assertIs(hull.get_disaggregated_var(m.disj1.x, disj), x) + self.assertIs(hull.get_src_var(x), m.disj1.x) + self.assertIs(hull.get_disaggregated_var(m.x, disj), x2) + self.assertIs(hull.get_src_var(x2), m.x) + + def test_disaggregated_var_name_collision(self): + # same model as the test above, but now I am putting what was disj1.y + # as m.x, just to invite disaster. + m = ConcreteModel() + m.x = Var(bounds=(2, 11)) + m.disj1 = Disjunct() + m.disj1.x = Var(bounds=(1, 10)) + m.disj1.cons1 = Constraint(expr=m.disj1.x + m.x <= 5) + m.disj2 = Disjunct() + m.disj2.cons = Constraint(expr=m.x >= 8) + m.disjunction1 = Disjunction(expr=[m.disj1, m.disj2]) + + m.disj3 = Disjunct() + m.disj3.cons = Constraint(expr=m.disj1.x >= 7) + m.disj4 = Disjunct() + m.disj4.cons = Constraint(expr=m.x == 3) + m.disjunction2 = Disjunction(expr=[m.disj3, m.disj4]) + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + for disj, nm in ((m.disj1, "x_4"), (m.disj2, "x_9"), + (m.disj3, "x_5"), (m.disj4, "x_8")): + self.check_name_collision_disaggregated_vars(m, disj, nm) + + def test_do_not_transform_user_deactivated_disjuncts(self): + ct.check_user_deactivated_disjuncts(self, 'hull') + + def test_improperly_deactivated_disjuncts(self): + ct.check_improperly_deactivated_disjuncts(self, 'hull') + + def test_do_not_transform_userDeactivated_IndexedDisjunction(self): + ct.check_do_not_transform_userDeactivated_indexedDisjunction(self, + 'hull') + + def test_disjunction_deactivated(self): + ct.check_disjunction_deactivated(self, 'hull') + + def test_disjunctDatas_deactivated(self): + ct.check_disjunctDatas_deactivated(self, 'hull') + + def test_deactivated_constraints(self): + ct.check_deactivated_constraints(self, 'hull') + + def check_no_double_transformation(self): + ct.check_do_not_transform_twice_if_disjunction_reactivated(self, + 'hull') + + def test_indicator_vars(self): + ct.check_indicator_vars(self, 'hull') + + def test_xor_constraints(self): + ct.check_xor_constraint(self, 'hull') + + def test_unbounded_var_error(self): + m = models.makeTwoTermDisj_Nonlinear() + # no bounds + m.w.setlb(None) + m.w.setub(None) + self.assertRaisesRegexp( + GDP_Error, + "Variables that appear in disjuncts must be " + "bounded in order to use the hull " + "transformation! Missing bound for w.*", + TransformationFactory('gdp.hull').apply_to, + m) + + def test_indexed_constraints_in_disjunct(self): + m = models.makeThreeTermDisj_IndexedConstraints() + + TransformationFactory('gdp.hull').apply_to(m) + transBlock = m._pyomo_gdp_hull_reformulation + + # 2 blocks: the original Disjunct and the transformation block + self.assertEqual( + len(list(m.component_objects(Block, descend_into=False))), 2) + self.assertEqual( + len(list(m.component_objects(Disjunct))), 0) + + # Each relaxed disjunct should have 3 vars, but i "d[i].c" + # Constraints + for i in [1,2,3]: + relaxed = transBlock.relaxedDisjuncts[i-1] + self.assertEqual(len(list(relaxed.component_objects(Var))), 3) + self.assertEqual(len(list(relaxed.component_data_objects(Var))), 3) + self.assertEqual( + len(list(relaxed.component_objects(Constraint))), 4) + # Note: m.x LB == 0, so only 3 bounds constriants (not 6) + self.assertEqual( + len(list(relaxed.component_data_objects(Constraint))), 3+i) + self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) + + def test_virtual_indexed_constraints_in_disjunct(self): + m = ConcreteModel() + m.I = [1,2,3] + m.x = Var(m.I, bounds=(-1,10)) + def d_rule(d,j): + m = d.model() + d.c = Constraint(Any) + for k in range(j): + d.c[k+1] = m.x[k+1] >= k+1 + m.d = Disjunct(m.I, rule=d_rule) + m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) + + TransformationFactory('gdp.hull').apply_to(m) + transBlock = m._pyomo_gdp_hull_reformulation + + # 2 blocks: the original Disjunct and the transformation block + self.assertEqual( + len(list(m.component_objects(Block, descend_into=False))), 2) + self.assertEqual( + len(list(m.component_objects(Disjunct))), 0) + + # Each relaxed disjunct should have 3 vars, but i "d[i].c" + # Constraints + for i in [1,2,3]: + relaxed = transBlock.relaxedDisjuncts[i-1] + self.assertEqual(len(list(relaxed.component_objects(Var))), 3) + self.assertEqual(len(list(relaxed.component_data_objects(Var))), 3) + self.assertEqual( + len(list(relaxed.component_objects(Constraint))), 4) + self.assertEqual( + len(list(relaxed.component_data_objects(Constraint))), 3*2+i) + self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) + + def test_do_not_transform_deactivated_constraintDatas(self): + m = models.makeTwoTermDisj_IndexedConstraints() + m.a[1].setlb(0) + m.a[1].setub(100) + m.a[2].setlb(0) + m.a[2].setub(100) + m.b.simpledisj1.c[1].deactivate() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + # can't ask for simpledisj1.c[1]: it wasn't transformed + log = StringIO() + with LoggingIntercept(log, 'pyomo.gdp', logging.ERROR): + self.assertRaisesRegexp( + KeyError, + ".*b.simpledisj1.c\[1\]", + hull.get_transformed_constraints, + m.b.simpledisj1.c[1]) + self.assertRegexpMatches(log.getvalue(), + ".*Constraint 'b.simpledisj1.c\[1\]' has not " + "been transformed.") + + # this fixes a[2] to 0, so we should get the disggregated var + transformed = hull.get_transformed_constraints(m.b.simpledisj1.c[2]) + self.assertEqual(len(transformed), 1) + disaggregated_a2 = hull.get_disaggregated_var(m.a[2], m.b.simpledisj1) + self.assertIs(transformed[0], disaggregated_a2) + self.assertIsInstance(disaggregated_a2, Var) + self.assertTrue(disaggregated_a2.is_fixed()) + self.assertEqual(value(disaggregated_a2), 0) + + transformed = hull.get_transformed_constraints(m.b.simpledisj2.c[1]) + # simpledisj2.c[1] is a <= constraint + self.assertEqual(len(transformed), 1) + self.assertIs(transformed[0], + m.b.simpledisj2.transformation_block().\ + component("b.simpledisj2.c")[(1,'ub')]) + + transformed = hull.get_transformed_constraints(m.b.simpledisj2.c[2]) + # simpledisj2.c[2] is a <= constraint + self.assertEqual(len(transformed), 1) + self.assertIs(transformed[0], + m.b.simpledisj2.transformation_block().\ + component("b.simpledisj2.c")[(2,'ub')]) + + +class MultiTermDisj(unittest.TestCase, CommonTests): + def test_xor_constraint(self): + ct.check_three_term_xor_constraint(self, 'hull') + + def test_create_using(self): + m = models.makeThreeTermIndexedDisj() + self.diff_apply_to_and_create_using(m) + +class IndexedDisjunction(unittest.TestCase, CommonTests): + def setUp(self): + # set seed so we can test name collisions predictably + random.seed(666) + + def test_disaggregation_constraints(self): + m = models.makeTwoTermIndexedDisjunction() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + relaxedDisjuncts = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + disaggregatedVars = { + 1: [relaxedDisjuncts[0].component('x[1]'), + relaxedDisjuncts[1].component('x[1]')], + 2: [relaxedDisjuncts[2].component('x[2]'), + relaxedDisjuncts[3].component('x[2]')], + 3: [relaxedDisjuncts[4].component('x[3]'), + relaxedDisjuncts[5].component('x[3]')], + } + + for i, disVars in iteritems(disaggregatedVars): + cons = hull.get_disaggregation_constraint(m.x[i], + m.disjunction[i]) + self.assertEqual(cons.lower, 0) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 3) + ct.check_linear_coef(self, repn, m.x[i], 1) + ct.check_linear_coef(self, repn, disVars[0], -1) + ct.check_linear_coef(self, repn, disVars[1], -1) + + def test_disaggregation_constraints_tuple_indices(self): + m = models.makeTwoTermMultiIndexedDisjunction() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + relaxedDisjuncts = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts + + disaggregatedVars = { + (1,'A'): [relaxedDisjuncts[0].component('a[1,A]'), + relaxedDisjuncts[1].component('a[1,A]')], + (1,'B'): [relaxedDisjuncts[2].component('a[1,B]'), + relaxedDisjuncts[3].component('a[1,B]')], + (2,'A'): [relaxedDisjuncts[4].component('a[2,A]'), + relaxedDisjuncts[5].component('a[2,A]')], + (2,'B'): [relaxedDisjuncts[6].component('a[2,B]'), + relaxedDisjuncts[7].component('a[2,B]')], + } + + for i, disVars in iteritems(disaggregatedVars): + cons = hull.get_disaggregation_constraint(m.a[i], + m.disjunction[i]) + self.assertEqual(cons.lower, 0) + self.assertEqual(cons.upper, 0) + # NOTE: fixed variables are evaluated here. + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + # The flag=1 disjunct disaggregated variable is fixed to 0, so the + # below is actually correct: + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, m.a[i], 1) + ct.check_linear_coef(self, repn, disVars[0], -1) + self.assertTrue(disVars[1].is_fixed()) + self.assertEqual(value(disVars[1]), 0) + + def test_xor_constraints(self): + ct.check_indexed_xor_constraints(self, 'hull') + + def test_xor_constraints_with_targets(self): + ct.check_indexed_xor_constraints_with_targets(self, 'hull') + + def test_create_using(self): + m = models.makeTwoTermMultiIndexedDisjunction() + ct.diff_apply_to_and_create_using(self, m, 'gdp.hull') + + def test_deactivated_constraints(self): + ct.check_constraints_deactivated_indexedDisjunction(self, 'hull') + + def test_deactivated_disjuncts(self): + ct.check_deactivated_disjuncts(self, 'hull') + + def test_deactivated_disjunctions(self): + ct.check_deactivated_disjunctions(self, 'hull') + + def test_partial_deactivate_indexed_disjunction(self): + ct.check_partial_deactivate_indexed_disjunction(self, 'hull') + + def test_disjunction_data_target(self): + ct.check_disjunction_data_target(self, 'hull') + + def test_disjunction_data_target_any_index(self): + ct.check_disjunction_data_target_any_index(self, 'hull') + + def test_targets_with_container_as_arg(self): + ct.check_targets_with_container_as_arg(self, 'hull') + + def check_trans_block_disjunctions_of_disjunct_datas(self, m): + transBlock1 = m.component("_pyomo_gdp_hull_reformulation") + self.assertIsInstance(transBlock1, Block) + self.assertIsInstance(transBlock1.component("relaxedDisjuncts"), Block) + # We end up with a transformation block for every SimpleDisjunction or + # IndexedDisjunction. + self.assertEqual(len(transBlock1.relaxedDisjuncts), 2) + self.assertIsInstance(transBlock1.relaxedDisjuncts[0].component("x"), + Var) + self.assertTrue(transBlock1.relaxedDisjuncts[0].x.is_fixed()) + self.assertEqual(value(transBlock1.relaxedDisjuncts[0].x), 0) + self.assertIsInstance(transBlock1.relaxedDisjuncts[0].component( + "firstTerm[1].cons"), Constraint) + # No constraint becuase disaggregated variable fixed to 0 + self.assertEqual(len(transBlock1.relaxedDisjuncts[0].component( + "firstTerm[1].cons")), 0) + self.assertIsInstance(transBlock1.relaxedDisjuncts[0].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock1.relaxedDisjuncts[0].component( + "x_bounds")), 2) + + self.assertIsInstance(transBlock1.relaxedDisjuncts[1].component("x"), + Var) + self.assertIsInstance(transBlock1.relaxedDisjuncts[1].component( + "secondTerm[1].cons"), Constraint) + self.assertEqual(len(transBlock1.relaxedDisjuncts[1].component( + "secondTerm[1].cons")), 1) + self.assertIsInstance(transBlock1.relaxedDisjuncts[1].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock1.relaxedDisjuncts[1].component( + "x_bounds")), 2) + + transBlock2 = m.component("_pyomo_gdp_hull_reformulation_4") + self.assertIsInstance(transBlock2, Block) + self.assertIsInstance(transBlock2.component("relaxedDisjuncts"), Block) + self.assertEqual(len(transBlock2.relaxedDisjuncts), 2) + self.assertIsInstance(transBlock2.relaxedDisjuncts[0].component("x"), + Var) + self.assertIsInstance(transBlock2.relaxedDisjuncts[0].component( + "firstTerm[2].cons"), Constraint) + # we have an equality constraint + self.assertEqual(len(transBlock2.relaxedDisjuncts[0].component( + "firstTerm[2].cons")), 1) + self.assertIsInstance(transBlock2.relaxedDisjuncts[0].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock2.relaxedDisjuncts[0].component( + "x_bounds")), 2) + + self.assertIsInstance(transBlock2.relaxedDisjuncts[1].component("x"), + Var) + self.assertIsInstance(transBlock2.relaxedDisjuncts[1].component( + "secondTerm[2].cons"), Constraint) + self.assertEqual(len(transBlock2.relaxedDisjuncts[1].component( + "secondTerm[2].cons")), 1) + self.assertIsInstance(transBlock2.relaxedDisjuncts[1].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock2.relaxedDisjuncts[1].component( + "x_bounds")), 2) + + def test_simple_disjunction_of_disjunct_datas(self): + ct.check_simple_disjunction_of_disjunct_datas(self, 'hull') + + def test_any_indexed_disjunction_of_disjunct_datas(self): + m = models.makeAnyIndexedDisjunctionOfDisjunctDatas() + TransformationFactory('gdp.hull').apply_to(m) + + transBlock = m.component("_pyomo_gdp_hull_reformulation") + self.assertIsInstance(transBlock, Block) + self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) + self.assertEqual(len(transBlock.relaxedDisjuncts), 4) + self.assertIsInstance(transBlock.relaxedDisjuncts[0].component("x"), + Var) + self.assertTrue(transBlock.relaxedDisjuncts[0].x.is_fixed()) + self.assertEqual(value(transBlock.relaxedDisjuncts[0].x), 0) + self.assertIsInstance(transBlock.relaxedDisjuncts[0].component( + "firstTerm[1].cons"), Constraint) + # No constraint becuase disaggregated variable fixed to 0 + self.assertEqual(len(transBlock.relaxedDisjuncts[0].component( + "firstTerm[1].cons")), 0) + self.assertIsInstance(transBlock.relaxedDisjuncts[0].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[0].component( + "x_bounds")), 2) + + self.assertIsInstance(transBlock.relaxedDisjuncts[1].component("x"), + Var) + self.assertIsInstance(transBlock.relaxedDisjuncts[1].component( + "secondTerm[1].cons"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[1].component( + "secondTerm[1].cons")), 1) + self.assertIsInstance(transBlock.relaxedDisjuncts[1].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[1].component( + "x_bounds")), 2) + + self.assertIsInstance(transBlock.relaxedDisjuncts[2].component("x"), + Var) + self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( + "firstTerm[2].cons"), Constraint) + # we have an equality constraint + self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( + "firstTerm[2].cons")), 1) + self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( + "x_bounds")), 2) + + self.assertIsInstance(transBlock.relaxedDisjuncts[3].component("x"), + Var) + self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( + "secondTerm[2].cons"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( + "secondTerm[2].cons")), 1) + self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( + "x_bounds"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( + "x_bounds")), 2) + + self.assertIsInstance(transBlock.component("disjunction_xor"), + Constraint) + self.assertEqual(len(transBlock.component("disjunction_xor")), 2) + + def check_first_iteration(self, model): + transBlock = model.component("_pyomo_gdp_hull_reformulation") + self.assertIsInstance(transBlock, Block) + self.assertIsInstance( + transBlock.component("disjunctionList_xor"), Constraint) + self.assertEqual(len(transBlock.disjunctionList_xor), 1) + self.assertFalse(model.disjunctionList[0].active) + + self.assertIsInstance(transBlock.relaxedDisjuncts, Block) + self.assertEqual(len(transBlock.relaxedDisjuncts), 2) + + self.assertIsInstance(transBlock.relaxedDisjuncts[0].x, Var) + self.assertTrue(transBlock.relaxedDisjuncts[0].x.is_fixed()) + self.assertEqual(value(transBlock.relaxedDisjuncts[0].x), 0) + self.assertIsInstance(transBlock.relaxedDisjuncts[0].component( + "firstTerm[0].cons"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[0].component( + "firstTerm[0].cons")), 0) + self.assertIsInstance(transBlock.relaxedDisjuncts[0].x_bounds, + Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[0].x_bounds), 2) + + self.assertIsInstance(transBlock.relaxedDisjuncts[1].x, Var) + self.assertFalse(transBlock.relaxedDisjuncts[1].x.is_fixed()) + self.assertIsInstance(transBlock.relaxedDisjuncts[1].component( + "secondTerm[0].cons"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[1].component( + "secondTerm[0].cons")), 1) + self.assertIsInstance(transBlock.relaxedDisjuncts[1].x_bounds, + Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[1].x_bounds), 2) + + def check_second_iteration(self, model): + transBlock = model.component("_pyomo_gdp_hull_reformulation") + self.assertIsInstance(transBlock, Block) + self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) + self.assertEqual(len(transBlock.relaxedDisjuncts), 4) + self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( + "firstTerm[1].cons"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( + "firstTerm[1].cons")), 1) + self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( + "secondTerm[1].cons"), Constraint) + self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( + "secondTerm[1].cons")), 1) + self.assertEqual( + len(transBlock.disjunctionList_xor), 2) + self.assertFalse(model.disjunctionList[1].active) + self.assertFalse(model.disjunctionList[0].active) + + def test_disjunction_and_disjuncts_indexed_by_any(self): + ct.check_disjunction_and_disjuncts_indexed_by_any(self, 'hull') + + def test_iteratively_adding_disjunctions_transform_container(self): + ct.check_iteratively_adding_disjunctions_transform_container(self, + 'hull') + + def test_iteratively_adding_disjunctions_transform_model(self): + ct.check_iteratively_adding_disjunctions_transform_model(self, 'hull') + + def test_iteratively_adding_to_indexed_disjunction_on_block(self): + ct.check_iteratively_adding_to_indexed_disjunction_on_block(self, + 'hull') + +class TestTargets_SingleDisjunction(unittest.TestCase, CommonTests): + def test_only_targets_inactive(self): + ct.check_only_targets_inactive(self, 'hull') + + def test_only_targets_transformed(self): + ct.check_only_targets_get_transformed(self, 'hull') + + def test_target_not_a_component_err(self): + ct.check_target_not_a_component_error(self, 'hull') + + def test_targets_cannot_be_cuids(self): + ct.check_targets_cannot_be_cuids(self, 'hull') + +class TestTargets_IndexedDisjunction(unittest.TestCase, CommonTests): + # There are a couple tests for targets above, but since I had the patience + # to make all these for bigm also, I may as well reap the benefits here too. + def test_indexedDisj_targets_inactive(self): + ct.check_indexedDisj_targets_inactive(self, 'hull') + + def test_indexedDisj_only_targets_transformed(self): + ct.check_indexedDisj_only_targets_transformed(self, 'hull') + + def test_warn_for_untransformed(self): + ct.check_warn_for_untransformed(self, 'hull') + + def test_disjData_targets_inactive(self): + ct.check_disjData_targets_inactive(self, 'hull') + m = models.makeDisjunctionsOnIndexedBlock() + + def test_disjData_only_targets_transformed(self): + ct.check_disjData_only_targets_transformed(self, 'hull') + + def test_indexedBlock_targets_inactive(self): + ct.check_indexedBlock_targets_inactive(self, 'hull') + + def test_indexedBlock_only_targets_transformed(self): + ct.check_indexedBlock_only_targets_transformed(self, 'hull') + + def test_blockData_targets_inactive(self): + ct.check_blockData_targets_inactive(self, 'hull') + + def test_blockData_only_targets_transformed(self): + ct.check_blockData_only_targets_transformed(self, 'hull') + + def test_do_not_transform_deactivated_targets(self): + ct.check_do_not_transform_deactivated_targets(self, 'hull') + + def test_create_using(self): + m = models.makeDisjunctionsOnIndexedBlock() + ct.diff_apply_to_and_create_using(self, m, 'gdp.hull') + +class DisaggregatedVarNamingConflict(unittest.TestCase): + @staticmethod + def makeModel(): + m = ConcreteModel() + m.b = Block() + m.b.x = Var(bounds=(0, 10)) + m.add_component("b.x", Var(bounds=(-9, 9))) + def disjunct_rule(d, i): + m = d.model() + if i: + d.cons_block = Constraint(expr=m.b.x >= 5) + d.cons_model = Constraint(expr=m.component("b.x")==0) + else: + d.cons_model = Constraint(expr=m.component("b.x") <= -5) + m.disjunct = Disjunct([0,1], rule=disjunct_rule) + m.disjunction = Disjunction(expr=[m.disjunct[0], m.disjunct[1]]) + + return m + + def test_disaggregation_constraints(self): + m = self.makeModel() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + disaggregationConstraints = m._pyomo_gdp_hull_reformulation.\ + disaggregationConstraints + disaggregationConstraints.pprint() + consmap = [ + (m.component("b.x"), disaggregationConstraints[(0, None)]), + (m.b.x, disaggregationConstraints[(1, None)]) + ] + + for v, cons in consmap: + disCons = hull.get_disaggregation_constraint(v, m.disjunction) + self.assertIs(disCons, cons) + +class DisjunctInMultipleDisjunctions(unittest.TestCase, CommonTests): + def test_error_for_same_disjunct_in_multiple_disjunctions(self): + ct.check_error_for_same_disjunct_in_multiple_disjunctions(self, 'hull') + +class NestedDisjunction(unittest.TestCase, CommonTests): + def setUp(self): + # set seed so we can test name collisions predictably + random.seed(666) + + def test_disjuncts_inactive(self): + ct.check_disjuncts_inactive_nested(self, 'hull') + + def test_deactivated_disjunct_leaves_nested_disjuncts_active(self): + ct.check_deactivated_disjunct_leaves_nested_disjunct_active(self, + 'hull') + + def test_mappings_between_disjunctions_and_xors(self): + # For the sake of not second-guessing anyone, we will let the inner + # disjunction point to its original XOR constraint. This constraint + # itself will be transformed by the outer disjunction, so if you want to + # find what it became you will have to follow its map to the transformed + # version. (But this behaves the same as bigm) + ct.check_mappings_between_disjunctions_and_xors(self, 'hull') + + def test_disjunct_targets_inactive(self): + ct.check_disjunct_targets_inactive(self, 'hull') + + def test_disjunct_only_targets_transformed(self): + ct.check_disjunct_only_targets_transformed(self, 'hull') + + def test_disjunctData_targets_inactive(self): + ct.check_disjunctData_targets_inactive(self, 'hull') + + def test_disjunctData_only_targets_transformed(self): + ct.check_disjunctData_only_targets_transformed(self, 'hull') + + def test_disjunction_target_err(self): + ct.check_disjunction_target_err(self, 'hull') + + @unittest.skipIf(not linear_solvers, "No linear solver available") + def test_relaxation_feasibility(self): + m = models.makeNestedDisjunctions_FlatDisjuncts() + TransformationFactory('gdp.hull').apply_to(m) + + solver = SolverFactory(linear_solvers[0]) + + cases = [ + (1,1,1,1,None), + (0,0,0,0,None), + (1,0,0,0,None), + (0,1,0,0,1.1), + (0,0,1,0,None), + (0,0,0,1,None), + (1,1,0,0,None), + (1,0,1,0,1.2), + (1,0,0,1,1.3), + (1,0,1,1,None), + ] + for case in cases: + m.d1.indicator_var.fix(case[0]) + m.d2.indicator_var.fix(case[1]) + m.d3.indicator_var.fix(case[2]) + m.d4.indicator_var.fix(case[3]) + results = solver.solve(m) + if case[4] is None: + self.assertEqual(results.solver.termination_condition, + pyomo.opt.TerminationCondition.infeasible) + else: + self.assertEqual(results.solver.termination_condition, + pyomo.opt.TerminationCondition.optimal) + self.assertEqual(value(m.obj), case[4]) + + def test_create_using(self): + m = models.makeNestedDisjunctions_FlatDisjuncts() + self.diff_apply_to_and_create_using(m) + + # TODO: test disjunct mappings: This is not the same as bigm because you + # don't move these blocks around in hull the way you do in bigm. + + # And I think it is worth it to go through a full test case for this and + # actually make sure of the transformed constraints too. + + def check_outer_disaggregation_constraint(self, cons, var, disj1, disj2): + hull = TransformationFactory('gdp.hull') + self.assertTrue(cons.active) + self.assertEqual(cons.lower, 0) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + ct.check_linear_coef(self, repn, var, 1) + ct.check_linear_coef(self, repn, hull.get_disaggregated_var(var, disj1), + -1) + ct.check_linear_coef(self, repn, hull.get_disaggregated_var(var, disj2), + -1) + + def check_bounds_constraint_ub(self, constraint, ub, dis_var, ind_var): + hull = TransformationFactory('gdp.hull') + self.assertIsInstance(constraint, Constraint) + self.assertTrue(constraint.active) + self.assertEqual(len(constraint), 1) + self.assertTrue(constraint['ub'].active) + self.assertEqual(constraint['ub'].upper, 0) + self.assertIsNone(constraint['ub'].lower) + repn = generate_standard_repn(constraint['ub'].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, dis_var, 1) + ct.check_linear_coef(self, repn, ind_var, -ub) + self.assertIs(constraint, hull.get_var_bounds_constraint(dis_var)) + + def check_inner_disaggregated_var_bounds(self, cons, dis, ind_var, + original_cons): + hull = TransformationFactory('gdp.hull') + self.assertIsInstance(cons, Constraint) + self.assertTrue(cons.active) + self.assertEqual(len(cons), 1) + self.assertTrue(cons[('ub', 'ub')].active) + self.assertIsNone(cons[('ub', 'ub')].lower) + self.assertEqual(cons[('ub', 'ub')].upper, 0) + repn = generate_standard_repn(cons[('ub', 'ub')].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, dis, 1) + ct.check_linear_coef(self, repn, ind_var, -2) + + self.assertIs(hull.get_var_bounds_constraint(dis), original_cons) + transformed_list = hull.get_transformed_constraints(original_cons['ub']) + self.assertEqual(len(transformed_list), 1) + self.assertIs(transformed_list[0], cons[('ub', 'ub')]) + + def check_inner_transformed_constraint(self, cons, dis, lb, ind_var, + first_transformed, original): + hull = TransformationFactory('gdp.hull') + self.assertIsInstance(cons, Constraint) + self.assertTrue(cons.active) + self.assertEqual(len(cons), 1) + # Ha, this really isn't lovely, but its just chance that it's ub the + # second time. + self.assertTrue(cons[('lb', 'ub')].active) + self.assertIsNone(cons[('lb', 'ub')].lower) + self.assertEqual(cons[('lb', 'ub')].upper, 0) + repn = generate_standard_repn(cons[('lb', 'ub')].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, dis, -1) + ct.check_linear_coef(self, repn, ind_var, lb) + + self.assertIs(hull.get_src_constraint(first_transformed), + original) + trans_list = hull.get_transformed_constraints(original) + self.assertEqual(len(trans_list), 1) + self.assertIs(trans_list[0], first_transformed['lb']) + self.assertIs(hull.get_src_constraint(first_transformed['lb']), + original) + self.assertIs(hull.get_src_constraint(cons), first_transformed) + trans_list = hull.get_transformed_constraints(first_transformed['lb']) + self.assertEqual(len(trans_list), 1) + self.assertIs(trans_list[0], cons[('lb', 'ub')]) + self.assertIs(hull.get_src_constraint(cons[('lb', 'ub')]), + first_transformed['lb']) + + def check_outer_transformed_constraint(self, cons, dis, lb, ind_var): + hull = TransformationFactory('gdp.hull') + self.assertIsInstance(cons, Constraint) + self.assertTrue(cons.active) + self.assertEqual(len(cons), 1) + self.assertTrue(cons['lb'].active) + self.assertIsNone(cons['lb'].lower) + self.assertEqual(cons['lb'].upper, 0) + repn = generate_standard_repn(cons['lb'].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 2) + ct.check_linear_coef(self, repn, dis, -1) + ct.check_linear_coef(self, repn, ind_var, lb) + + orig = ind_var.parent_block().c + self.assertIs(hull.get_src_constraint(cons), orig) + trans_list = hull.get_transformed_constraints(orig) + self.assertEqual(len(trans_list), 1) + self.assertIs(trans_list[0], cons['lb']) + + def test_transformed_model_nestedDisjuncts(self): + # This test tests *everything* for a simple nested disjunction case. + m = models.makeNestedDisjunctions_NestedDisjuncts() + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + transBlock = m._pyomo_gdp_hull_reformulation + self.assertTrue(transBlock.active) + + # outer xor should be on this block + xor = transBlock.disj_xor + self.assertIsInstance(xor, Constraint) + self.assertTrue(xor.active) + self.assertEqual(xor.lower, 1) + self.assertEqual(xor.upper, 1) + repn = generate_standard_repn(xor.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + ct.check_linear_coef(self, repn, m.d1.indicator_var, 1) + ct.check_linear_coef(self, repn, m.d2.indicator_var, 1) + self.assertIs(xor, m.disj.algebraic_constraint()) + self.assertIs(m.disj, hull.get_src_disjunction(xor)) + + # so should the outer disaggregation constraint + dis = transBlock.disaggregationConstraints + self.assertIsInstance(dis, Constraint) + self.assertTrue(dis.active) + self.assertEqual(len(dis), 3) + self.check_outer_disaggregation_constraint(dis[0,None], m.x, m.d1, + m.d2) + self.assertIs(hull.get_disaggregation_constraint(m.x, m.disj), + dis[0, None]) + self.check_outer_disaggregation_constraint(dis[1,None], + m.d1.d3.indicator_var, m.d1, + m.d2) + self.assertIs(hull.get_disaggregation_constraint(m.d1.d3.indicator_var, + m.disj), dis[1,None]) + self.check_outer_disaggregation_constraint(dis[2,None], + m.d1.d4.indicator_var, m.d1, + m.d2) + self.assertIs(hull.get_disaggregation_constraint(m.d1.d4.indicator_var, + m.disj), dis[2,None]) + + # we should have two disjunct transformation blocks + disjBlocks = transBlock.relaxedDisjuncts + self.assertTrue(disjBlocks.active) + self.assertEqual(len(disjBlocks), 2) + + disj1 = disjBlocks[0] + self.assertTrue(disj1.active) + self.assertIs(disj1, m.d1.transformation_block()) + self.assertIs(m.d1, hull.get_src_disjunct(disj1)) + + # check the disaggregated vars are here + self.assertIsInstance(disj1.x, Var) + self.assertEqual(disj1.x.lb, 0) + self.assertEqual(disj1.x.ub, 2) + self.assertIs(disj1.x, hull.get_disaggregated_var(m.x, m.d1)) + self.assertIs(m.x, hull.get_src_var(disj1.x)) + d3 = disj1.component("indicator_var") + self.assertEqual(d3.lb, 0) + self.assertEqual(d3.ub, 1) + self.assertIsInstance(d3, Var) + self.assertIs(d3, hull.get_disaggregated_var(m.d1.d3.indicator_var, + m.d1)) + self.assertIs(m.d1.d3.indicator_var, hull.get_src_var(d3)) + d4 = disj1.component("indicator_var_4") + self.assertIsInstance(d4, Var) + self.assertEqual(d4.lb, 0) + self.assertEqual(d4.ub, 1) + self.assertIs(d4, hull.get_disaggregated_var(m.d1.d4.indicator_var, + m.d1)) + self.assertIs(m.d1.d4.indicator_var, hull.get_src_var(d4)) + + # check inner disjunction disaggregated vars + x3 = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].x + self.assertIsInstance(x3, Var) + self.assertEqual(x3.lb, 0) + self.assertEqual(x3.ub, 2) + self.assertIs(hull.get_disaggregated_var(m.x, m.d1.d3), x3) + self.assertIs(hull.get_src_var(x3), m.x) + + x4 = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].x + self.assertIsInstance(x4, Var) + self.assertEqual(x4.lb, 0) + self.assertEqual(x4.ub, 2) + self.assertIs(hull.get_disaggregated_var(m.x, m.d1.d4), x4) + self.assertIs(hull.get_src_var(x4), m.x) + + # check the bounds constraints + self.check_bounds_constraint_ub(disj1.x_bounds, 2, disj1.x, + m.d1.indicator_var) + self.check_bounds_constraint_ub(disj1.indicator_var_bounds, 1, + disj1.indicator_var, + m.d1.indicator_var) + self.check_bounds_constraint_ub(disj1.indicator_var_4_bounds, 1, + disj1.indicator_var_4, + m.d1.indicator_var) + + # check the transformed constraints + + # transformed xor + xor = disj1.component("d1._pyomo_gdp_hull_reformulation.d1.disj2_xor") + self.assertIsInstance(xor, Constraint) + self.assertTrue(xor.active) + self.assertEqual(len(xor), 1) + self.assertTrue(xor['eq'].active) + self.assertEqual(xor['eq'].lower, 0) + self.assertEqual(xor['eq'].upper, 0) + repn = generate_standard_repn(xor['eq'].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 3) + ct.check_linear_coef(self, repn, disj1.indicator_var, 1) + ct.check_linear_coef(self, repn, disj1.indicator_var_4, 1) + ct.check_linear_coef(self, repn, m.d1.indicator_var, -1) + + # inner disjunction disaggregation constraint + dis_cons_inner_disjunction = disj1.component( + "d1._pyomo_gdp_hull_reformulation.disaggregationConstraints") + self.assertIsInstance(dis_cons_inner_disjunction, Constraint) + self.assertTrue(dis_cons_inner_disjunction.active) + self.assertEqual(len(dis_cons_inner_disjunction), 1) + dis_cons_inner_disjunction.pprint() + self.assertTrue(dis_cons_inner_disjunction[(0,None,'eq')].active) + self.assertEqual(dis_cons_inner_disjunction[(0,None,'eq')].lower, 0) + self.assertEqual(dis_cons_inner_disjunction[(0,None,'eq')].upper, 0) + repn = generate_standard_repn(dis_cons_inner_disjunction[(0, None, + 'eq')].body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 3) + ct.check_linear_coef(self, repn, x3, -1) + ct.check_linear_coef(self, repn, x4, -1) + ct.check_linear_coef(self, repn, disj1.x, 1) + + # disaggregated d3.x bounds constraints + x3_bounds = disj1.component( + "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].x_bounds") + original_cons = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].\ + x_bounds + self.check_inner_disaggregated_var_bounds(x3_bounds, x3, + disj1.indicator_var, + original_cons) + + + # disaggregated d4.x bounds constraints + x4_bounds = disj1.component( + "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].x_bounds") + original_cons = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].\ + x_bounds + self.check_inner_disaggregated_var_bounds(x4_bounds, x4, + disj1.indicator_var_4, + original_cons) + + # transformed x >= 1.2 + cons = disj1.component( + "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].d1.d3.c") + first_transformed = m.d1._pyomo_gdp_hull_reformulation.\ + relaxedDisjuncts[0].component("d1.d3.c") + original = m.d1.d3.c + self.check_inner_transformed_constraint(cons, x3, 1.2, + disj1.indicator_var, + first_transformed, original) + + # transformed x >= 1.3 + cons = disj1.component( + "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].d1.d4.c") + first_transformed = m.d1._pyomo_gdp_hull_reformulation.\ + relaxedDisjuncts[1].component("d1.d4.c") + original = m.d1.d4.c + self.check_inner_transformed_constraint(cons, x4, 1.3, + disj1.indicator_var_4, + first_transformed, original) + + # outer disjunction transformed constraint + cons = disj1.component("d1.c") + self.check_outer_transformed_constraint(cons, disj1.x, 1, + m.d1.indicator_var) + + # and last, check the second transformed outer disjunct + disj2 = disjBlocks[1] + self.assertTrue(disj2.active) + self.assertIs(disj2, m.d2.transformation_block()) + self.assertIs(m.d2, hull.get_src_disjunct(disj2)) + + # disaggregated var + x2 = disj2.x + self.assertIsInstance(x2, Var) + self.assertEqual(x2.lb, 0) + self.assertEqual(x2.ub, 2) + self.assertIs(hull.get_disaggregated_var(m.x, m.d2), x2) + self.assertIs(hull.get_src_var(x2), m.x) + + # bounds constraint + x_bounds = disj2.x_bounds + self.check_bounds_constraint_ub(x_bounds, 2, x2, m.d2.indicator_var) + + # transformed constraint x >= 1.1 + cons = disj2.component("d2.c") + self.check_outer_transformed_constraint(cons, x2, 1.1, + m.d2.indicator_var) + + # check inner xor mapping: Note that this maps to a now deactivated + # (transformed again) constraint, but that it is possible to go full + # circle, like so: + orig_inner_xor = m.d1._pyomo_gdp_hull_reformulation.component( + "d1.disj2_xor") + self.assertIs(m.d1.disj2.algebraic_constraint(), orig_inner_xor) + self.assertFalse(orig_inner_xor.active) + trans_list = hull.get_transformed_constraints(orig_inner_xor) + self.assertEqual(len(trans_list), 1) + self.assertIs(trans_list[0], xor['eq']) + self.assertIs(hull.get_src_constraint(xor), orig_inner_xor) + self.assertIs(hull.get_src_disjunction(orig_inner_xor), m.d1.disj2) + + # the same goes for the disaggregation constraint + orig_dis_container = m.d1._pyomo_gdp_hull_reformulation.\ + disaggregationConstraints + orig_dis = orig_dis_container[0,None] + self.assertIs(hull.get_disaggregation_constraint(m.x, m.d1.disj2), + orig_dis) + self.assertFalse(orig_dis.active) + transformedList = hull.get_transformed_constraints(orig_dis) + self.assertEqual(len(transformedList), 1) + self.assertIs(transformedList[0], dis_cons_inner_disjunction[(0, None, + 'eq')]) + + self.assertIs(hull.get_src_constraint( + dis_cons_inner_disjunction[(0, None, 'eq')]), orig_dis) + self.assertIs(hull.get_src_constraint( dis_cons_inner_disjunction), + orig_dis_container) + # though we don't have a map back from the disaggregation constraint to + # the variable because I'm not sure why you would... The variable is in + # the constraint. + + # check the inner disjunct mappings + self.assertIs(m.d1.d3.transformation_block(), + m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]) + self.assertIs(hull.get_src_disjunct( + m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]), m.d1.d3) + self.assertIs(m.d1.d4.transformation_block(), + m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1]) + self.assertIs(hull.get_src_disjunct( + m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1]), m.d1.d4) + +class TestSpecialCases(unittest.TestCase): + def test_local_vars(self): + """ checks that if nothing is marked as local, we assume it is all + global. We disaggregate everything to be safe.""" + m = ConcreteModel() + m.x = Var(bounds=(5,100)) + m.y = Var(bounds=(0,100)) + m.d1 = Disjunct() + m.d1.c = Constraint(expr=m.y >= m.x) + m.d2 = Disjunct() + m.d2.z = Var() + m.d2.c = Constraint(expr=m.y >= m.d2.z) + m.disj = Disjunction(expr=[m.d1, m.d2]) + + self.assertRaisesRegexp( + GDP_Error, + ".*Missing bound for d2.z.*", + TransformationFactory('gdp.hull').create_using, + m) + m.d2.z.setlb(7) + self.assertRaisesRegexp( + GDP_Error, + ".*Missing bound for d2.z.*", + TransformationFactory('gdp.hull').create_using, + m) + m.d2.z.setub(9) + + i = TransformationFactory('gdp.hull').create_using(m) + rd = i._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1] + # z should be disaggregated becuase we can't be sure it's not somewhere + # else on the model + self.assertEqual(sorted(rd.component_map(Var)), ['x','y','z']) + self.assertEqual(len(rd.component_map(Constraint)), 4) + # bounds haven't changed on original + self.assertEqual(i.d2.z.bounds, (7,9)) + # check disaggregated variable + self.assertIsInstance(rd.component("z"), Var) + self.assertEqual(rd.z.bounds, (0,9)) + self.assertEqual(len(rd.z_bounds), 2) + self.assertEqual(rd.z_bounds['lb'].lower, None) + self.assertEqual(rd.z_bounds['lb'].upper, 0) + self.assertEqual(rd.z_bounds['ub'].lower, None) + self.assertEqual(rd.z_bounds['ub'].upper, 0) + i.d2.indicator_var = 1 + rd.z = 2 + self.assertEqual(rd.z_bounds['lb'].body(), 5) + self.assertEqual(rd.z_bounds['ub'].body(), -7) + + m.d2.z.setlb(-9) + m.d2.z.setub(-7) + i = TransformationFactory('gdp.hull').create_using(m) + rd = i._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1] + self.assertEqual(sorted(rd.component_map(Var)), ['x','y','z']) + self.assertEqual(len(rd.component_map(Constraint)), 4) + # original bounds unchanged + self.assertEqual(i.d2.z.bounds, (-9,-7)) + # check disaggregated variable + self.assertIsInstance(rd.component("z"), Var) + self.assertEqual(rd.z.bounds, (-9,0)) + self.assertEqual(len(rd.z_bounds), 2) + self.assertEqual(rd.z_bounds['lb'].lower, None) + self.assertEqual(rd.z_bounds['lb'].upper, 0) + self.assertEqual(rd.z_bounds['ub'].lower, None) + self.assertEqual(rd.z_bounds['ub'].upper, 0) + i.d2.indicator_var = 1 + rd.z = 2 + self.assertEqual(rd.z_bounds['lb'].body(), -11) + self.assertEqual(rd.z_bounds['ub'].body(), 9) + + def test_local_var_suffix(self): + hull = TransformationFactory('gdp.hull') + + model = ConcreteModel() + model.x = Var(bounds=(5,100)) + model.y = Var(bounds=(0,100)) + model.d1 = Disjunct() + model.d1.c = Constraint(expr=model.y >= model.x) + model.d2 = Disjunct() + model.d2.z = Var(bounds=(-9, -7)) + model.d2.c = Constraint(expr=model.y >= model.d2.z) + model.disj = Disjunction(expr=[model.d1, model.d2]) + + # we don't declare z local + m = hull.create_using(model) + self.assertEqual(m.d2.z.lb, -9) + self.assertEqual(m.d2.z.ub, -7) + self.assertIsInstance(m.d2.transformation_block().component("z"), Var) + self.assertIs(m.d2.transformation_block().z, + hull.get_disaggregated_var(m.d2.z, m.d2)) + + # we do declare z local + model.d2.LocalVars = Suffix(direction=Suffix.LOCAL) + model.d2.LocalVars[model.d2] = [model.d2.z] + + m = hull.create_using(model) + + # make sure we did not disaggregate z + self.assertEqual(m.d2.z.lb, -9) + self.assertEqual(m.d2.z.ub, 0) + # it is its own disaggregated variable + self.assertIs(hull.get_disaggregated_var(m.d2.z, m.d2), m.d2.z) + # it does not exist on the transformation block + self.assertIsNone(m.d2.transformation_block().component("z")) + +class UntransformableObjectsOnDisjunct(unittest.TestCase): + def test_RangeSet(self): + ct.check_RangeSet(self, 'hull') + + def test_Expression(self): + ct.check_Expression(self, 'hull') + +class TransformABlock(unittest.TestCase, CommonTests): + def test_transformation_simple_block(self): + ct.check_transformation_simple_block(self, 'hull') + + def test_transform_block_data(self): + ct.check_transform_block_data(self, 'hull') + + def test_simple_block_target(self): + ct.check_simple_block_target(self, 'hull') + + def test_block_data_target(self): + ct.check_block_data_target(self, 'hull') + + def test_indexed_block_target(self): + ct.check_indexed_block_target(self, 'hull') + + def test_block_targets_inactive(self): + ct.check_block_targets_inactive(self, 'hull') + + def test_block_only_targets_transformed(self): + ct.check_block_only_targets_transformed(self, 'hull') + + def test_create_using(self): + m = models.makeTwoTermDisjOnBlock() + ct.diff_apply_to_and_create_using(self, m, 'gdp.hull') + +class DisjOnBlock(unittest.TestCase, CommonTests): + # when the disjunction is on a block, we want all of the stuff created by + # the transformation to go on that block also so that solving the block + # maintains its meaning + + def test_xor_constraint_added(self): + ct.check_xor_constraint_added(self, 'hull') + + def test_trans_block_created(self): + ct.check_trans_block_created(self, 'hull') + +class TestErrors(unittest.TestCase): + def setUp(self): + # set seed so we can test name collisions predictably + random.seed(666) + + def test_ask_for_transformed_constraint_from_untransformed_disjunct(self): + ct.check_ask_for_transformed_constraint_from_untransformed_disjunct( + self, 'hull') + + def test_silly_target(self): + ct.check_silly_target(self, 'hull') + + def test_retrieving_nondisjunctive_components(self): + ct.check_retrieving_nondisjunctive_components(self, 'hull') + + def test_transform_empty_disjunction(self): + ct.check_transform_empty_disjunction(self, 'hull') + + def test_deactivated_disjunct_nonzero_indicator_var(self): + ct.check_deactivated_disjunct_nonzero_indicator_var(self, + 'hull') + + def test_deactivated_disjunct_unfixed_indicator_var(self): + ct.check_deactivated_disjunct_unfixed_indicator_var(self, 'hull') + + def test_infeasible_xor_because_all_disjuncts_deactivated(self): + m = ct.setup_infeasible_xor_because_all_disjuncts_deactivated(self, + 'hull') + hull = TransformationFactory('gdp.hull') + transBlock = m.component("_pyomo_gdp_hull_reformulation") + self.assertIsInstance(transBlock, Block) + self.assertEqual(len(transBlock.relaxedDisjuncts), 2) + self.assertIsInstance(transBlock.component("disjunction_xor"), + Constraint) + disjunct1 = transBlock.relaxedDisjuncts[0] + # we disaggregated the (deactivated) indicator variables + d3_ind = m.disjunction_disjuncts[0].nestedDisjunction_disjuncts[0].\ + indicator_var + d4_ind = m.disjunction_disjuncts[0].nestedDisjunction_disjuncts[1].\ + indicator_var + self.assertIs(hull.get_disaggregated_var(d3_ind, + m.disjunction_disjuncts[0]), + disjunct1.indicator_var) + self.assertIs(hull.get_src_var(disjunct1.indicator_var), d3_ind) + self.assertIs(hull.get_disaggregated_var(d4_ind, + m.disjunction_disjuncts[0]), + disjunct1.indicator_var_4) + self.assertIs(hull.get_src_var(disjunct1.indicator_var_4), d4_ind) + + relaxed_xor = disjunct1.component( + "disjunction_disjuncts[0]._pyomo_gdp_hull_reformulation." + "disjunction_disjuncts[0].nestedDisjunction_xor") + self.assertIsInstance(relaxed_xor, Constraint) + self.assertEqual(len(relaxed_xor), 1) + repn = generate_standard_repn(relaxed_xor['eq'].body) + self.assertEqual(relaxed_xor['eq'].lower, 0) + self.assertEqual(relaxed_xor['eq'].upper, 0) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 3) + # constraint says that the disaggregated indicator variables of the + # nested disjuncts sum to the indicator variable of the outer disjunct. + ct.check_linear_coef( self, repn, + m.disjunction.disjuncts[0].indicator_var, -1) + ct.check_linear_coef(self, repn, disjunct1.indicator_var, 1) + ct.check_linear_coef(self, repn, disjunct1.indicator_var_4, 1) + self.assertEqual(repn.constant, 0) + + # but the disaggregation constraints are going to force them to 0 (which + # will in turn force the outer disjunct indicator variable to 0, which + # is what we want) + d3_ind_dis = transBlock.disaggregationConstraints[1, None] + self.assertEqual(d3_ind_dis.lower, 0) + self.assertEqual(d3_ind_dis.upper, 0) + repn = generate_standard_repn(d3_ind_dis.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 2) + self.assertEqual(repn.constant, 0) + ct.check_linear_coef(self, repn, disjunct1.indicator_var, -1) + ct.check_linear_coef(self, repn, + transBlock.relaxedDisjuncts[1].indicator_var, -1) + d4_ind_dis = transBlock.disaggregationConstraints[2, None] + self.assertEqual(d4_ind_dis.lower, 0) + self.assertEqual(d4_ind_dis.upper, 0) + repn = generate_standard_repn(d4_ind_dis.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 2) + self.assertEqual(repn.constant, 0) + ct.check_linear_coef(self, repn, disjunct1.indicator_var_4, -1) + ct.check_linear_coef(self, repn, + transBlock.relaxedDisjuncts[1].indicator_var_9, -1) + + def test_mapping_method_errors(self): + m = models.makeTwoTermDisj_Nonlinear() + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + log = StringIO() + with LoggingIntercept(log, 'pyomo.gdp.hull', logging.ERROR): + self.assertRaisesRegexp( + AttributeError, + "'ConcreteModel' object has no attribute '_bigMConstraintMap'", + hull.get_var_bounds_constraint, + m.w) + self.assertRegexpMatches( + log.getvalue(), + ".*Either 'w' is not a disaggregated variable, " + "or the disjunction that disaggregates it has " + "not been properly transformed.") + + log = StringIO() + with LoggingIntercept(log, 'pyomo.gdp.hull', logging.ERROR): + self.assertRaisesRegexp( + KeyError, + ".*_pyomo_gdp_hull_reformulation.relaxedDisjuncts\[1\].w", + hull.get_disaggregation_constraint, + m.d[1].transformation_block().w, + m.disjunction) + self.assertRegexpMatches(log.getvalue(), ".*It doesn't appear that " + "'_pyomo_gdp_hull_reformulation." + "relaxedDisjuncts\[1\].w' is a " + "variable that was disaggregated by " + "Disjunction 'disjunction'") + + log = StringIO() + with LoggingIntercept(log, 'pyomo.gdp.hull', logging.ERROR): + self.assertRaisesRegexp( + AttributeError, + "'ConcreteModel' object has no attribute '_disaggregatedVarMap'", + hull.get_src_var, + m.w) + self.assertRegexpMatches( + log.getvalue(), + ".*'w' does not appear to be a disaggregated variable") + + log = StringIO() + with LoggingIntercept(log, 'pyomo.gdp.hull', logging.ERROR): + self.assertRaisesRegexp( + KeyError, + ".*_pyomo_gdp_hull_reformulation.relaxedDisjuncts\[1\].w", + hull.get_disaggregated_var, + m.d[1].transformation_block().w, + m.d[1]) + self.assertRegexpMatches(log.getvalue(), + ".*It does not appear " + "'_pyomo_gdp_hull_reformulation." + "relaxedDisjuncts\[1\].w' is a " + "variable which appears in disjunct 'd\[1\]'") + + m.random_disjunction = Disjunction(expr=[m.w == 2, m.w >= 7]) + self.assertRaisesRegexp( + GDP_Error, + "Disjunction 'random_disjunction' has not been properly " + "transformed: None of its disjuncts are transformed.", + hull.get_disaggregation_constraint, + m.w, + m.random_disjunction) + + self.assertRaisesRegexp( + GDP_Error, + "Disjunct 'random_disjunction_disjuncts\[0\]' has not been " + "transformed", + hull.get_disaggregated_var, + m.w, + m.random_disjunction.disjuncts[0]) + +class InnerDisjunctionSharedDisjuncts(unittest.TestCase): + def test_activeInnerDisjunction_err(self): + ct.check_activeInnerDisjunction_err(self, 'hull') + +class BlocksOnDisjuncts(unittest.TestCase): + def setUp(self): + # set seed so we can test name collisions predictably + random.seed(666) + + def makeModel(self): + # I'm going to multi-task and also check some types of constraints + # whose expressions need to be tested + m = ConcreteModel() + m.x = Var(bounds=(1, 5)) + m.y = Var(bounds=(0, 9)) + m.disj1 = Disjunct() + m.disj1.add_component("b.any_index", Constraint(expr=m.x >= 1.5)) + m.disj1.b = Block() + m.disj1.b.any_index = Constraint(Any) + m.disj1.b.any_index['local'] = m.x <= 2 + m.disj1.b.LocalVars = Suffix(direction=Suffix.LOCAL) + m.disj1.b.LocalVars[m.disj1] = [m.x] + m.disj1.b.any_index['nonlin-ub'] = m.y**2 <= 4 + m.disj2 = Disjunct() + m.disj2.non_lin_lb = Constraint(expr=log(1 + m.y) >= 1) + m.disjunction = Disjunction(expr=[m.disj1, m.disj2]) + return m + + def test_transformed_constraint_name_conflict(self): + m = self.makeModel() + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + transBlock = m.disj1.transformation_block() + self.assertIsInstance(transBlock.component("disj1.b.any_index"), + Constraint) + self.assertIsInstance(transBlock.component("disj1.b.any_index_4"), + Constraint) + xformed = hull.get_transformed_constraints( + m.disj1.component("b.any_index")) + self.assertEqual(len(xformed), 1) + self.assertIs(xformed[0], + transBlock.component("disj1.b.any_index")['lb']) + + xformed = hull.get_transformed_constraints(m.disj1.b.any_index['local']) + self.assertEqual(len(xformed), 1) + self.assertIs(xformed[0], + transBlock.component("disj1.b.any_index_4")[ + ('local','ub')]) + xformed = hull.get_transformed_constraints( + m.disj1.b.any_index['nonlin-ub']) + self.assertEqual(len(xformed), 1) + self.assertIs(xformed[0], + transBlock.component("disj1.b.any_index_4")[ + ('nonlin-ub','ub')]) + + def test_local_var_handled_correctly(self): + m = self.makeModel() + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + # test the local variable was handled correctly. + self.assertIs(hull.get_disaggregated_var(m.x, m.disj1), m.x) + self.assertEqual(m.x.lb, 0) + self.assertEqual(m.x.ub, 5) + self.assertIsNone(m.disj1.transformation_block().component("x")) + self.assertIsInstance(m.disj1.transformation_block().component("y"), + Var) + + # this doesn't require the block, I'm just coopting this test to make sure + # of some nonlinear expressions. + def test_transformed_constraints(self): + m = self.makeModel() + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + + # test the transformed nonlinear constraints + nonlin_ub_list = hull.get_transformed_constraints( + m.disj1.b.any_index['nonlin-ub']) + self.assertEqual(len(nonlin_ub_list), 1) + cons = nonlin_ub_list[0] + self.assertEqual(cons.index(), ('nonlin-ub', 'ub')) + self.assertIs(cons.ctype, Constraint) + self.assertIsNone(cons.lower) + self.assertEqual(value(cons.upper), 0) + repn = generate_standard_repn(cons.body) + self.assertEqual(str(repn.nonlinear_expr), + "(0.9999*disj1.indicator_var + 0.0001)*" + "(_pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].y/" + "(0.9999*disj1.indicator_var + 0.0001))**2") + self.assertEqual(len(repn.nonlinear_vars), 2) + self.assertIs(repn.nonlinear_vars[0], m.disj1.indicator_var) + self.assertIs(repn.nonlinear_vars[1], + hull.get_disaggregated_var(m.y, m.disj1)) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 1) + self.assertIs(repn.linear_vars[0], m.disj1.indicator_var) + self.assertEqual(repn.linear_coefs[0], -4) + + nonlin_lb_list = hull.get_transformed_constraints(m.disj2.non_lin_lb) + self.assertEqual(len(nonlin_lb_list), 1) + cons = nonlin_lb_list[0] + self.assertEqual(cons.index(), 'lb') + self.assertIs(cons.ctype, Constraint) + self.assertIsNone(cons.lower) + self.assertEqual(value(cons.upper), 0) + repn = generate_standard_repn(cons.body) + self.assertEqual(str(repn.nonlinear_expr), + "- ((0.9999*disj2.indicator_var + 0.0001)*" + "log(1 + " + "_pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].y/" + "(0.9999*disj2.indicator_var + 0.0001)))") + self.assertEqual(len(repn.nonlinear_vars), 2) + self.assertIs(repn.nonlinear_vars[0], m.disj2.indicator_var) + self.assertIs(repn.nonlinear_vars[1], + hull.get_disaggregated_var(m.y, m.disj2)) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear_vars), 1) + self.assertIs(repn.linear_vars[0], m.disj2.indicator_var) + self.assertEqual(repn.linear_coefs[0], 1) + +class DisaggregatingFixedVars(unittest.TestCase): + def test_disaggregate_fixed_variables(self): + m = models.makeTwoTermDisj() + m.x.fix(6) + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) + # check that we did indeed disaggregate x + transBlock = m.d[1]._transformation_block() + self.assertIsInstance(transBlock.component("x"), Var) + self.assertIs(hull.get_disaggregated_var(m.x, m.d[1]), transBlock.x) + self.assertIs(hull.get_src_var(transBlock.x), m.x) + + def test_do_not_disaggregate_fixed_variables(self): + m = models.makeTwoTermDisj() + m.x.fix(6) + hull = TransformationFactory('gdp.hull') + hull.apply_to(m, assume_fixed_vars_permanent=True) + # check that we didn't disaggregate x + transBlock = m.d[1]._transformation_block() + self.assertIsNone(transBlock.component("x")) + + +class NameDeprecationTest(unittest.TestCase): + def test_name_deprecated(self): + m = models.makeTwoTermDisj() + output = StringIO() + with LoggingIntercept(output, 'pyomo.gdp', logging.WARNING): + TransformationFactory('gdp.chull').apply_to(m) + self.assertIn("DEPRECATED: The 'gdp.chull' name is deprecated. " + "Please use the more apt 'gdp.hull' instead.", + output.getvalue().replace('\n', ' ')) + + def test_hull_chull_equivalent(self): + m = models.makeTwoTermDisj() + out1 = StringIO() + out2 = StringIO() + m1 = TransformationFactory('gdp.hull').create_using(m) + m2 = TransformationFactory('gdp.chull').create_using(m) + m1.pprint(ostream=out1) + m2.pprint(ostream=out2) + self.assertMultiLineEqual(out1.getvalue(), out2.getvalue()) diff --git a/pyomo/gdp/util.py b/pyomo/gdp/util.py index af8a9fce883..8f49f812ea6 100644 --- a/pyomo/gdp/util.py +++ b/pyomo/gdp/util.py @@ -12,13 +12,19 @@ import pyomo.core.expr.current as EXPR from pyomo.core.expr.numvalue import nonpyomo_leaf_types, native_numeric_types -from pyomo.gdp import GDP_Error +from pyomo.gdp import GDP_Error, Disjunction +from pyomo.gdp.disjunct import _DisjunctData from copy import deepcopy from pyomo.core.base.component import _ComponentBase, ComponentUID from pyomo.opt import TerminationCondition, SolverStatus from pyomo.common.deprecation import deprecation_warning +from six import iterkeys +import sys +from weakref import ref as weakref_ref +import logging +logger = logging.getLogger('pyomo.gdp') _acceptable_termination_conditions = set([ TerminationCondition.optimal, @@ -134,3 +140,157 @@ def is_child_of(parent, child, knownBlocks=None): node = node.parent_block() else: node = container + +def get_src_disjunction(xor_constraint): + """Return the Disjunction corresponding to xor_constraint + + Parameters + ---------- + xor_constraint: Constraint, which must be the logical constraint + (located on the transformation block) of some + Disjunction + """ + # NOTE: This is indeed a linear search through the Disjunctions on the + # model. I am leaving it this way on the assumption that asking XOR + # constraints for their Disjunction is not going to be a common + # question. If we ever need efficiency then we should store a reverse + # map from the XOR constraint to the Disjunction on the transformation + # block while we do the transformation. And then this method could query + # that map. + m = xor_constraint.model() + for disjunction in m.component_data_objects(Disjunction): + if disjunction._algebraic_constraint: + if disjunction._algebraic_constraint() is xor_constraint: + return disjunction + raise GDP_Error("It appears that '%s' is not an XOR or OR constraint " + "resulting from transforming a Disjunction." + % xor_constraint.name) + +def get_src_disjunct(transBlock): + """Return the Disjunct object whose transformed components are on + transBlock. + + Parameters + ---------- + transBlock: _BlockData which is in the relaxedDisjuncts IndexedBlock + on a transformation block. + """ + if not hasattr(transBlock, "_srcDisjunct") or \ + type(transBlock._srcDisjunct) is not weakref_ref: + raise GDP_Error("Block '%s' doesn't appear to be a transformation " + "block for a disjunct. No source disjunct found." + % transBlock.name) + return transBlock._srcDisjunct() + +def get_src_constraint(transformedConstraint): + """Return the original Constraint whose transformed counterpart is + transformedConstraint + + Parameters + ---------- + transformedConstraint: Constraint, which must be a component on one of + the BlockDatas in the relaxedDisjuncts Block of + a transformation block + """ + transBlock = transformedConstraint.parent_block() + # This should be our block, so if it's not, the user messed up and gave + # us the wrong thing. If they happen to also have a _constraintMap then + # the world is really against us. + if not hasattr(transBlock, "_constraintMap"): + raise GDP_Error("Constraint '%s' is not a transformed constraint" + % transformedConstraint.name) + # if something goes wrong here, it's a bug in the mappings. + return transBlock._constraintMap['srcConstraints'][transformedConstraint] + +def _find_parent_disjunct(constraint): + # traverse up until we find the disjunct this constraint lives on + parent_disjunct = constraint.parent_block() + while not isinstance(parent_disjunct, _DisjunctData): + if parent_disjunct is None: + raise GDP_Error( + "Constraint '%s' is not on a disjunct and so was not " + "transformed" % constraint.name) + parent_disjunct = parent_disjunct.parent_block() + + return parent_disjunct + +def _get_constraint_transBlock(constraint): + parent_disjunct = _find_parent_disjunct(constraint) + # we know from _find_parent_disjunct that parent_disjunct is a Disjunct, + # so the below is OK + transBlock = parent_disjunct._transformation_block + if transBlock is None: + raise GDP_Error("Constraint '%s' is on a disjunct which has not been " + "transformed" % constraint.name) + # if it's not None, it's the weakref we wanted. + transBlock = transBlock() + + return transBlock + +def get_transformed_constraints(srcConstraint): + """Return the transformed version of srcConstraint + + Parameters + ---------- + srcConstraint: SimpleConstraint or _ConstraintData, which must be in + the subtree of a transformed Disjunct + """ + if srcConstraint.is_indexed(): + raise GDP_Error("Argument to get_transformed_constraint should be " + "a SimpleConstraint or _ConstraintData. (If you " + "want the container for all transformed constraints " + "from an IndexedDisjunction, this is the parent " + "component of a transformed constraint originating " + "from any of its _ComponentDatas.)") + transBlock = _get_constraint_transBlock(srcConstraint) + try: + return transBlock._constraintMap['transformedConstraints'][srcConstraint] + except: + logger.error("Constraint '%s' has not been transformed." + % srcConstraint.name) + raise + +def _warn_for_active_disjunction(disjunction, disjunct, NAME_BUFFER): + # this should only have gotten called if the disjunction is active + assert disjunction.active + problemdisj = disjunction + if disjunction.is_indexed(): + for i in sorted(iterkeys(disjunction)): + if disjunction[i].active: + # a _DisjunctionData is active, we will yell about + # it specifically. + problemdisj = disjunction[i] + break + + parentblock = problemdisj.parent_block() + # the disjunction should only have been active if it wasn't transformed + assert problemdisj.algebraic_constraint is None + _probDisjName = problemdisj.getname( + fully_qualified=True, name_buffer=NAME_BUFFER) + _disjName = disjunct.getname(fully_qualified=True, name_buffer=NAME_BUFFER) + raise GDP_Error("Found untransformed disjunction '%s' in disjunct '%s'! " + "The disjunction must be transformed before the " + "disjunct. If you are using targets, put the " + "disjunction before the disjunct in the list." + % (_probDisjName, _disjName)) + +def _warn_for_active_disjunct(innerdisjunct, outerdisjunct, NAME_BUFFER): + assert innerdisjunct.active + problemdisj = innerdisjunct + if innerdisjunct.is_indexed(): + for i in sorted(iterkeys(innerdisjunct)): + if innerdisjunct[i].active: + # This shouldn't be true, we will complain about it. + problemdisj = innerdisjunct[i] + break + + raise GDP_Error("Found active disjunct '{0}' in disjunct '{1}'! Either {0} " + "is not in a disjunction or the disjunction it is in " + "has not been transformed. {0} needs to be deactivated " + "or its disjunction transformed before {1} can be " + "transformed.".format( + problemdisj.getname( + fully_qualified=True, name_buffer = NAME_BUFFER), + outerdisjunct.getname( + fully_qualified=True, + name_buffer=NAME_BUFFER))) diff --git a/pyomo/mpec/complementarity.py b/pyomo/mpec/complementarity.py index 9d9777ecde7..b07a4f255c7 100644 --- a/pyomo/mpec/complementarity.py +++ b/pyomo/mpec/complementarity.py @@ -21,6 +21,9 @@ from pyomo.core.base.numvalue import ZeroConstant, _sub from pyomo.core.base.misc import apply_indexed_rule, tabular_writer from pyomo.core.base.block import _BlockData +from pyomo.core.base.util import ( + disable_methods, Initializer, IndexedCallInitializer, CountedCallInitializer +) import logging logger = logging.getLogger('pyomo.core') @@ -132,84 +135,7 @@ def to_standard_form(self): self.v = Var(bounds=(0, None)) self.ve = Constraint(expr=self.v == _e1[2] - _e1[1]) - -@ModelComponentFactory.register("Complementarity conditions.") -class Complementarity(Block): - - Skip = (1000,) - - def __new__(cls, *args, **kwds): - if cls != Complementarity: - return super(Complementarity, cls).__new__(cls) - if args == (): - return SimpleComplementarity.__new__(SimpleComplementarity) - else: - return IndexedComplementarity.__new__(IndexedComplementarity) - - def __init__(self, *args, **kwargs): - self._expr = kwargs.pop('expr', None ) - # - kwargs.setdefault('ctype', Complementarity) - # - # The attribute _rule is initialized here. - # - Block.__init__(self, *args, **kwargs) - - def construct(self, data=None): - if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover - logger.debug("Constructing %s '%s', from data=%s", - self.__class__.__name__, self.name, str(data)) - if self._constructed: #pragma:nocover - return - timer = ConstructionTimer(self) - - # - _self_rule = self._rule - self._rule = None - super(Complementarity, self).construct() - self._rule = _self_rule - # - if _self_rule is None and self._expr is None: - # No construction rule or expression specified. - return - # - if not self.is_indexed(): - # - # Scalar component - # - if _self_rule is None: - self.add(None, self._expr) - else: - try: - tmp = _self_rule(self.parent_block()) - self.add(None, tmp) - except Exception: - err = sys.exc_info()[1] - logger.error( - "Rule failed when generating expression for " - "complementarity %s:\n%s: %s" - % ( self.name, type(err).__name__, err ) ) - raise - else: - if not self._expr is None: - raise IndexError( - "Cannot initialize multiple indices of a Complementarity " - "component with a single expression") - _self_parent = self._parent() - for idx in self._index: - try: - tmp = apply_indexed_rule( self, _self_rule, _self_parent, idx ) - self.add(idx, tmp) - except Exception: - err = sys.exc_info()[1] - logger.error( - "Rule failed when generating expression for " - "complementarity %s with index %s:\n%s: %s" - % ( self.name, idx, type(err).__name__, err ) ) - raise - timer.report() - - def add(self, index, cc): + def set_value(self, cc): """ Add a complementarity condition with a specified index. """ @@ -218,37 +144,98 @@ def add(self, index, cc): # The ComplementarityTuple has a fixed length, so we initialize # the _args component and return # - self[index]._args = ( as_numeric(cc.arg0), as_numeric(cc.arg1) ) - return self[index] + self._args = ( as_numeric(cc.arg0), as_numeric(cc.arg1) ) # - if cc.__class__ is tuple: + elif cc.__class__ is tuple: if cc is Complementarity.Skip: - return + del self.parent_component()[self.index()] elif len(cc) != 2: raise ValueError( "Invalid tuple for Complementarity %s (expected 2-tuple):" "\n\t%s" % (self.name, cc) ) + else: + self._args = tuple( as_numeric(x) for x in cc ) elif cc.__class__ is list: # - # Call add() recursively to apply the error same error + # Call set_value() recursively to apply the error same error # checks. # - return self.add(index, tuple(cc)) - elif cc is None: - raise ValueError(""" + return self.set_value(tuple(cc)) + else: + raise ValueError( + "Unexpected value for Complementarity %s:\n\t%s" + % (self.name, cc) ) + + +@ModelComponentFactory.register("Complementarity conditions.") +class Complementarity(Block): + + Skip = (1000,) + _ComponentDataClass = _ComplementarityData + + def __new__(cls, *args, **kwds): + if cls != Complementarity: + return super(Complementarity, cls).__new__(cls) + if args == (): + return super(Complementarity, cls).__new__(AbstractSimpleComplementarity) + else: + return super(Complementarity, cls).__new__(IndexedComplementarity) + + @staticmethod + def _complementarity_rule(b, *idx): + _rule = b.parent_component()._init_rule + if _rule is None: + return + cc = _rule(b.parent_block(), idx) + if cc is None: + raise ValueError(""" Invalid complementarity condition. The complementarity condition is None instead of a 2-tuple. Please modify your rule to return Complementarity.Skip instead of None. -Error thrown for Complementarity "%s" -""" % ( self.name, ) ) - else: +Error thrown for Complementarity "%s".""" % ( b.name, ) ) + b.set_value(cc) + + def __init__(self, *args, **kwargs): + kwargs.setdefault('ctype', Complementarity) + _init = tuple( _arg for _arg in ( + kwargs.pop('initialize', None), + kwargs.pop('rule', None), + kwargs.pop('expr', None) ) if _arg is not None ) + if len(_init) > 1: raise ValueError( - "Unexpected argument declaring Complementarity %s:\n\t%s" - % (self.name, cc) ) - # - self[index]._args = tuple( as_numeric(x) for x in cc ) - return self[index] + "Duplicate initialization: Complementarity() only accepts " + "one of 'initialize=', 'rule=', and 'expr='") + elif _init: + _init = _init[0] + else: + _init = None + + self._init_rule = Initializer( + _init, treat_sequences_as_mappings=False, allow_generators=True + ) + + if self._init_rule is not None: + kwargs['rule'] = Complementarity._complementarity_rule + Block.__init__(self, *args, **kwargs) + + # HACK to make the "counted call" syntax work. We wait until + # after the base class is set up so that is_indexed() is + # reliable. + if self._init_rule is not None \ + and self._init_rule.__class__ is IndexedCallInitializer: + self._init_rule = CountedCallInitializer(self, self._init_rule) + + + def add(self, index, cc): + """ + Add a complementarity condition with a specified index. + """ + if cc is Complementarity.Skip: + return + _block = self[index] + _block.set_value(cc) + return _block def _pprint(self): """ @@ -298,10 +285,13 @@ def __init__(self, *args, **kwds): self._data[None] = self -class IndexedComplementarity(Complementarity): +@disable_methods({'add', 'set_value', 'to_standard_form'}) +class AbstractSimpleComplementarity(SimpleComplementarity): + pass - def _getitem_when_not_present(self, idx): - return self._data.setdefault(idx, _ComplementarityData(self)) + +class IndexedComplementarity(Complementarity): + pass @ModelComponentFactory.register("A list of complementarity conditions.") @@ -319,6 +309,10 @@ def __init__(self, **kwargs): args = (Set(),) self._nconditions = 0 Complementarity.__init__(self, *args, **kwargs) + # disable the implicit rule; construct will exhaust the + # user-provided rule, and then subsequent attempts to add a CC + # will bypass the rule + self._rule = None def add(self, expr): """ @@ -333,41 +327,21 @@ def construct(self, data=None): Construct the expression(s) for this complementarity condition. """ generate_debug_messages = __debug__ and logger.isEnabledFor(logging.DEBUG) - if generate_debug_messages: #pragma:nocover + if generate_debug_messages: logger.debug("Constructing complementarity list %s", self.name) - if self._constructed: #pragma:nocover + if self._constructed: return timer = ConstructionTimer(self) - _self_rule = self._rule self._constructed=True - if _self_rule is None: - return - # - _generator = None - _self_parent = self._parent() - if inspect.isgeneratorfunction(_self_rule): - _generator = _self_rule(_self_parent) - elif inspect.isgenerator(_self_rule): - _generator = _self_rule - if _generator is None: - while True: - val = self._nconditions + 1 - if generate_debug_messages: #pragma:nocover - logger.debug(" Constructing complementarity index "+str(val)) - expr = apply_indexed_rule( self, _self_rule, _self_parent, val ) - if expr is None: - raise ValueError( "Complementarity rule returned None " - "instead of ComplementarityList.End" ) - if (expr.__class__ is tuple and expr == ComplementarityList.End): - return - self.add(expr) - else: - for expr in _generator: - if expr is None: - raise ValueError( "Complementarity generator returned None " - "instead of ComplementarityList.End" ) - if (expr.__class__ is tuple and expr == ComplementarityList.End): - return - self.add(expr) + + if self._init_rule is not None: + _init = self._init_rule(self.parent_block(), ()) + for cc in iter(_init): + if cc is ComplementarityList.End: + break + if cc is Complementarity.Skip: + continue + self.add(cc) + timer.report() diff --git a/pyomo/mpec/tests/test_complementarity.py b/pyomo/mpec/tests/test_complementarity.py index 895a9e06fc3..d25b1cb04e5 100644 --- a/pyomo/mpec/tests/test_complementarity.py +++ b/pyomo/mpec/tests/test_complementarity.py @@ -208,11 +208,9 @@ def f(model, i): def test_cov6(self): # Testing construction with indexing and an expression M = self._setup() - try: + with self.assertRaisesRegex( + ValueError, "Invalid tuple for Complementarity"): M.cc = Complementarity([0,1], expr=()) - self.fail("Expected an IndexError") - except IndexError: - pass def test_cov7(self): # Testing error checking with return value @@ -313,7 +311,10 @@ def f(M): def test_list5(self): M = self._setup() - M.cc = ComplementarityList(rule=(complements(M.y + M.x3, M.x1 + 2*M.x2 == i) for i in range(3))) + M.cc = ComplementarityList( + rule=( complements(M.y + M.x3, M.x1 + 2*M.x2 == i) + for i in range(3) ) + ) self._test("list5", M) def test_list6(self): diff --git a/pyomo/neos/kestrel.py b/pyomo/neos/kestrel.py index aa5ebd38eaf..3db7f7640fb 100644 --- a/pyomo/neos/kestrel.py +++ b/pyomo/neos/kestrel.py @@ -26,6 +26,8 @@ import tempfile import logging +from six.moves.http_client import BadStatusLine + from pyomo.common.dependencies import attempt_import def _xmlrpclib_importer(): @@ -148,7 +150,7 @@ def setup_connection(self): try: result = self.neos.ping() logger.info("OK.") - except (socket.error, xmlrpclib.ProtocolError): + except (socket.error, xmlrpclib.ProtocolError, BadStatusLine): e = sys.exc_info()[1] self.neos = None logger.info("Fail.") @@ -162,8 +164,16 @@ def kill(self,jobnumber,password): logger.info(response) def solvers(self): - return self.neos.listSolversInCategory("kestrel") \ - if not self.neos is None else [] + if self.neos is None: + return [] + else: + attempt = 0 + while attempt < 3: + try: + return self.neos.listSolversInCategory("kestrel") + except socket.timeout: + attempt += 1 + return [] def retrieve(self,stub,jobNumber,password): # NEOS should return results as uu-encoded xmlrpclib.Binary data diff --git a/pyomo/network/decomposition.py b/pyomo/network/decomposition.py index acea4bac3ef..f8f21cb563c 100644 --- a/pyomo/network/decomposition.py +++ b/pyomo/network/decomposition.py @@ -31,7 +31,6 @@ logger = logging.getLogger('pyomo.network') - class SequentialDecomposition(FOQUSGraph): """ A sequential decomposition tool for Pyomo Network models @@ -470,7 +469,9 @@ def pass_values(self, arc, fixed_inputs): evars = [(evar, None)] for evar, idx in evars: fixed_inputs[dest_unit].add(evar) - evar.fix(value(mem[idx] if mem.is_indexed() else mem)) + val = value(mem[idx] if mem.is_indexed() else mem) + # val are numpy.float64; coerce val back to float + evar.fix(float(val)) for con in eblock.component_data_objects(Constraint, active=True): # we expect to find equality constraints with one linear variable @@ -501,7 +502,8 @@ def pass_values(self, arc, fixed_inputs): val = (value(con.lower) - repn.constant) / repn.linear_coefs[0] var = repn.linear_vars[0] fixed_inputs[dest_unit].add(var) - var.fix(val) + # val are numpy.float64; coerce val back to float + var.fix(float(val)) def pass_single_value(self, port, name, member, val, fixed): """ @@ -525,7 +527,8 @@ def pass_single_value(self, port, name, member, val, fixed): fval = (0 - repn.constant) / repn.linear_coefs[0] var = repn.linear_vars[0] fixed.add(var) - var.fix(fval) + # val are numpy.float64; coerce val back to float + var.fix(float(fval)) else: raise RuntimeError( "Member '%s' of port '%s' had more than " @@ -534,7 +537,8 @@ def pass_single_value(self, port, name, member, val, fixed): "to this port." % (name, port.name)) else: fixed.add(member) - member.fix(val) + # val are numpy.float64; coerce val back to float + member.fix(float(val)) def load_guesses(self, guesses, port, fixed): srcs = port.sources() @@ -577,7 +581,7 @@ def load_guesses(self, guesses, port, fixed): # silently ignore vars already fixed continue fixed.add(evar) - evar.fix(val) + evar.fix(float(val)) if not has_evars: # the only NumericValues in Pyomo that return True # for is_fixed are expressions and variables @@ -591,7 +595,7 @@ def load_guesses(self, guesses, port, fixed): port.name)) else: fixed.add(var) - var.fix(entry) + var.fix(float(entry)) def load_values(self, port, default, fixed, use_guesses): sources = port.sources() @@ -652,7 +656,7 @@ def check_value_fix(self, port, var, default, fixed, use_guesses, "guess, " if use_guesses else "")) fixed.add(var) - var.fix(val) + var.fix(float(val)) def combine_and_fix(self, port, name, obj, evars, fixed): """ diff --git a/pyomo/pysp/embeddedsp.py b/pyomo/pysp/embeddedsp.py index 814820bb255..85935626687 100644 --- a/pyomo/pysp/embeddedsp.py +++ b/pyomo/pysp/embeddedsp.py @@ -663,13 +663,17 @@ def __init__(self, reference_model): def _create_scenario_tree_model(self, size): assert size > 0 stm = CreateAbstractScenarioTreeModel() - stm.Stages.add('t1') - stm.Stages.add('t2') - stm.Nodes.add('root') + _stages = ["t1", "t2"] + _nodes = ["root"] + _scenarios = [] for i in xrange(1, size+1): - stm.Nodes.add('n'+str(i)) - stm.Scenarios.add('s'+str(i)) - stm = stm.create_instance() + _nodes.append('n'+str(i)) + _scenarios.append('s'+str(i)) + stm = stm.create_instance( + data={None: {"Stages": _stages, + "Nodes": _nodes, + "Scenarios": _scenarios}} + ) stm.NodeStage['root'] = 't1' stm.ConditionalProbability['root'] = 1.0 weight = 1.0/float(size) diff --git a/pyomo/pysp/phsolverserverutils.py b/pyomo/pysp/phsolverserverutils.py index fda1ba4083d..b92e98a2cec 100644 --- a/pyomo/pysp/phsolverserverutils.py +++ b/pyomo/pysp/phsolverserverutils.py @@ -2,8 +2,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -13,20 +13,21 @@ import time import itertools - -from pyutilib.enum import Enum +import enum from pyomo.core import * from six import iteritems, itervalues -InvocationType = Enum('SingleInvocation', - 'PerBundleInvocation', - 'PerBundleChainedInvocation', - 'PerScenarioInvocation', - 'PerScenarioChainedInvocation', - 'PerNodeInvocation', - 'PerNodeChainedInvocation') + +class InvocationType(str, enum.Enum): + SingleInvocation = 'SingleInvocation' + PerBundleInvocation = 'PerBundleInvocation' + PerBundleChainedInvocation = 'PerBundleChainedInvocation' + PerScenarioInvocation = 'PerScenarioInvocation' + PerScenarioChainedInvocation = 'PerScenarioChainedInvocation' + PerNodeInvocation = 'PerNodeInvocation' + PerNodeChainedInvocation = 'PerNodeChainedInvocation' class TransmitType(object): @@ -89,7 +90,7 @@ def collect_full_results(ph, var_config): print("Waiting for results extraction") num_results_so_far = 0 - + while (num_results_so_far < len(ph._scenario_tree.subproblems)): action_handle = ph._solver_manager.wait_any() @@ -146,7 +147,7 @@ def warmstart_scenario_instances(ph): action_handle_scenario_map = {} # maps action handles to scenario names ph._solver_manager.begin_bulk() - + if ph._scenario_tree.contains_bundles(): for bundle in ph._scenario_tree._scenario_bundles: @@ -174,7 +175,7 @@ def warmstart_scenario_instances(ph): scenario_action_handle_map[scenario.name] = new_action_handle action_handle_scenario_map[new_action_handle] = scenario.name - + ph._solver_manager.end_bulk() if ph._verbose: @@ -231,7 +232,7 @@ def transmit_weights(ph): generate_responses = ph._handshake_with_phpyro ph._solver_manager.begin_bulk() - + if ph._scenario_tree.contains_bundles(): for bundle in ph._scenario_tree._scenario_bundles: @@ -266,7 +267,7 @@ def transmit_weights(ph): generateResponse=generate_responses, name=scenario.name, new_weights=scenario._w) ) - + ph._solver_manager.end_bulk() if generate_responses: @@ -294,7 +295,7 @@ def transmit_xbars(ph): generate_responses = ph._handshake_with_phpyro ph._solver_manager.begin_bulk() - + if ph._scenario_tree.contains_bundles(): for bundle in ph._scenario_tree._scenario_bundles: @@ -333,7 +334,7 @@ def transmit_xbars(ph): generateResponse=generate_responses, name=scenario.name, new_xbars=xbars_to_transmit) ) - + ph._solver_manager.end_bulk() if generate_responses: @@ -382,14 +383,14 @@ def release_phsolverservers(ph): print("Revoking PHPyroWorker job assignments") ph._solver_manager.begin_bulk() - + for job, worker in iteritems(ph._phpyro_job_worker_map): ph._solver_manager.queue(action="release", queue_name=ph._phpyro_job_worker_map[job], name=worker, object_name=job, generateResponse=False) - + ph._solver_manager.end_bulk() ph._phpyro_worker_jobs_map = {} @@ -583,7 +584,7 @@ def activate_ph_objective_weight_terms(ph): generate_responses = ph._handshake_with_phpyro ph._solver_manager.begin_bulk() - + for subproblem in ph._scenario_tree.subproblems: action_handles.append( ph._solver_manager.queue( action="activate_ph_objective_weight_terms", @@ -612,7 +613,7 @@ def deactivate_ph_objective_weight_terms(ph): generate_responses = ph._handshake_with_phpyro ph._solver_manager.begin_bulk() - + for subproblem in ph._scenario_tree.subproblems: action_handles.append( ph._solver_manager.queue( action="deactivate_ph_objective_weight_terms", @@ -642,7 +643,7 @@ def activate_ph_objective_proximal_terms(ph): generate_responses = ph._handshake_with_phpyro ph._solver_manager.begin_bulk() - + for subproblem in ph._scenario_tree.subproblems: action_handles.append( ph._solver_manager.queue( action="activate_ph_objective_proximal_terms", @@ -678,7 +679,7 @@ def deactivate_ph_objective_proximal_terms(ph): queue_name=ph._phpyro_job_worker_map[subproblem.name], generateResponse=generate_responses, name=subproblem.name) ) - + ph._solver_manager.end_bulk() if generate_responses: @@ -798,7 +799,7 @@ def transmit_external_function_invocation_to_worker( action_handle = ph._solver_manager.queue(action="invoke_external_function", queue_name=ph._phpyro_job_worker_map[worker_name], name=worker_name, - invocation_type=invocation_type.key, + invocation_type=invocation_type.value, generateResponse=generate_response, module_name=module_name, function_name=function_name, @@ -839,7 +840,7 @@ def transmit_external_function_invocation( action="invoke_external_function", queue_name=ph._phpyro_job_worker_map[bundle.name], name=bundle.name, - invocation_type=invocation_type.key, + invocation_type=invocation_type.value, generateResponse=generate_responses, module_name=module_name, function_name=function_name, @@ -855,7 +856,7 @@ def transmit_external_function_invocation( action="invoke_external_function", queue_name=ph._phpyro_job_worker_map[scenario.name], name=scenario.name, - invocation_type=invocation_type.key, + invocation_type=invocation_type.value, generateResponse=generate_responses, module_name=module_name, function_name=function_name, @@ -890,7 +891,7 @@ def define_import_suffix(ph, suffix_name): generate_responses = ph._handshake_with_phpyro ph._solver_manager.begin_bulk() - + for subproblem in ph._scenario_tree.subproblems: action_handles.append( ph._solver_manager.queue( action="define_import_suffix", diff --git a/pyomo/pysp/scenariotree/manager.py b/pyomo/pysp/scenariotree/manager.py index acae8fed6df..0b753dc89e1 100644 --- a/pyomo/pysp/scenariotree/manager.py +++ b/pyomo/pysp/scenariotree/manager.py @@ -24,7 +24,6 @@ namedtuple) import pyutilib.misc -import pyutilib.enum from pyutilib.pyro import (shutdown_pyro_components, using_pyro4) from pyomo.common.dependencies import dill, dill_available @@ -41,8 +40,7 @@ safe_register_common_option, _domain_must_be_str, _domain_tuple_of_str) -from pyomo.pysp.util.misc import (load_external_module, - _EnumValueWithData) +from pyomo.pysp.util.misc import load_external_module from pyomo.pysp.scenariotree.instance_factory import \ ScenarioTreeInstanceFactory from pyomo.pysp.scenariotree.action_manager_pyro \ @@ -60,52 +58,18 @@ logger = logging.getLogger('pyomo.pysp') -_invocation_type_enum_list = [] -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 0, 'Single')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 1, 'PerScenario')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 2, 'PerScenarioChained')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 3, 'PerBundle')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 4, 'PerBundleChained')) - -##### These values are DEPRECATED -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 5, 'SingleInvocation')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 6, 'PerScenarioInvocation')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 7, 'PerScenarioChainedInvocation')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 8, 'PerBundleInvocation')) -_invocation_type_enum_list.append( - pyutilib.enum.EnumValue('InvocationType', 9, 'PerBundleChainedInvocation')) -##### - -# These are enum values that carry data with them -_invocation_type_enum_list.append( - _EnumValueWithData(_domain_must_be_str, - 'InvocationType', 10, 'OnScenario')) -_invocation_type_enum_list.append( - _EnumValueWithData(_domain_tuple_of_str, - 'InvocationType', 11, 'OnScenarios')) -_invocation_type_enum_list.append( - _EnumValueWithData(_domain_must_be_str, - 'InvocationType', 12, 'OnBundle')) -_invocation_type_enum_list.append( - _EnumValueWithData(_domain_tuple_of_str, - 'InvocationType', 13, 'OnBundles')) -_invocation_type_enum_list.append( - _EnumValueWithData(_domain_tuple_of_str, - 'InvocationType', 14, 'OnScenariosChained')) -_invocation_type_enum_list.append( - _EnumValueWithData(_domain_tuple_of_str, - 'InvocationType', 15, 'OnBundlesChained')) - -class _InvocationTypeDocumentedEnum(pyutilib.enum.Enum): +class _InvocationTypeMeta(type): + def __contains__(cls, obj): + return isinstance(obj, cls._value) + def __iter__(cls): + return iter( + sorted((obj for obj in cls.__dict__.values() + if isinstance(obj, cls._value)), + key=lambda _: _.index) + ) + +@six.add_metaclass(_InvocationTypeMeta) +class InvocationType(object): """Controls execution of function invocations with a scenario tree manager. In all cases, the function must accept the process-local scenario @@ -220,8 +184,61 @@ class _InvocationTypeDocumentedEnum(pyutilib.enum.Enum): managed by the named scenario tree worker. """ - -InvocationType = _InvocationTypeDocumentedEnum(*_invocation_type_enum_list) + class _value(object): + def __init__(self, key, index): + self._key = key + self._index = index + @property + def key(self): + return self._key + @property + def index(self): + return self._index + def __hash__(self): + return hash((self.key, self.index)) + def __eq__(self, other): + return (self.__class__ is other.__class__) and \ + (self.key == other.key) and (self.index == other.index) + def __ne__(self, other): + return not self.__eq__(other) + def __repr__(self): + return ("InvocationType.%s" % (self.key)) + class _value_with_data(_value): + def __init__(self, key, id_, domain): + super(self.__class__, self).__init__(key, id_) + self._domain = domain + self._data = None + @property + def data(self): + return self._data + def __call__(self, data): + if self.data is not None: + raise ValueError("Must create from InvocationType class") + obj = self.__class__(self.key, self.index, self._domain) + assert obj.data is None + obj._data = self._domain(data) + assert obj.data is obj._data + return obj + Single = _value("Single", 0) + PerScenario = _value("PerScenario", 1) + PerScenarioChained = _value("PerScenarioChained", 2) + PerBundle = _value("PerBundle", 3) + PerBundleChained = _value("PerBundleChained", 4) + ### deprecated + SingleInvocation = _value("SingleInvocation", 5) + PerScenarioInvocation = _value("PerScenarioInvocation", 6) + PerScenarioChainedInvocation = _value("PerScenarioChainedInvocation", 7) + PerBundleInvocation = _value("PerBundleInvocation", 8) + PerBundleChainedInvocation = _value("PerBundleChainedInvocation", 9) + ### + OnScenario = _value_with_data("OnScenario", 10 ,_domain_must_be_str) + OnScenarios = _value_with_data("OnScenarios", 11, _domain_tuple_of_str) + OnBundle = _value_with_data("OnBundle", 12, _domain_must_be_str) + OnBundles = _value_with_data("OnBundles", 13, _domain_tuple_of_str) + OnScenariosChained = _value_with_data("OnScenariosChained", 14, _domain_tuple_of_str) + OnBundlesChained = _value_with_data("OnBundlesChained", 15, _domain_tuple_of_str) + def __init__(self, *args, **kwds): + raise NotImplementedError _deprecated_invocation_types = \ {InvocationType.SingleInvocation: InvocationType.Single, @@ -1753,7 +1770,7 @@ def _invoke_function_by_worker(self, raise ValueError("Unexpected function invocation type '%s'. " "Expected one of %s" % (invocation_type, - [str(v) for v in InvocationType._values])) + [str(v) for v in InvocationType])) result = None if (invocation_type == InvocationType.Single): @@ -3592,7 +3609,7 @@ def _invoke_function_impl( raise ValueError("Unexpected function invocation type '%s'. " "Expected one of %s" % (invocation_type, - [str(v) for v in InvocationType._values])) + [str(v) for v in InvocationType])) if oneway_call: action_handle_data = None diff --git a/pyomo/pysp/scenariotree/manager_worker_pyro.py b/pyomo/pysp/scenariotree/manager_worker_pyro.py index af5e751316f..9481ea8509f 100644 --- a/pyomo/pysp/scenariotree/manager_worker_pyro.py +++ b/pyomo/pysp/scenariotree/manager_worker_pyro.py @@ -13,7 +13,6 @@ import time from pyomo.common.dependencies import dill, dill_available -from pyomo.pysp.util.misc import _EnumValueWithData from pyomo.pysp.util.configured_object import PySPConfiguredObject from pyomo.pysp.util.config import (PySPConfigBlock, safe_declare_common_option) @@ -270,16 +269,15 @@ def _invoke_function_impl(self, print("Received request to invoke anonymous " "function serialized using the dill module") - # pyutilib.Enum can not be serialized depending on the - # serializer type used by Pyro, so we just transmit it - # as a (key, data) tuple in that case + # InvocationType is transmitted as (key, data) to + # avoid issues with Pyro, so this function accepts a + # tuple and converts back to InvocationType if type(invocation_type) is tuple: _invocation_type_key, _invocation_type_data = invocation_type assert isinstance(_invocation_type_key, string_types) invocation_type = getattr(InvocationType, _invocation_type_key) if _invocation_type_data is not None: - assert isinstance(invocation_type, _EnumValueWithData) invocation_type = invocation_type(_invocation_type_data) # here we assume that if the module_name is None, diff --git a/pyomo/pysp/solvers/spsolvershellcommand.py b/pyomo/pysp/solvers/spsolvershellcommand.py index 42dafab5f8f..a231430b139 100644 --- a/pyomo/pysp/solvers/spsolvershellcommand.py +++ b/pyomo/pysp/solvers/spsolvershellcommand.py @@ -15,6 +15,7 @@ import pyutilib.misc +import pyomo.common from pyomo.pysp.solvers.spsolver import SPSolver logger = logging.getLogger('pyomo.pysp') diff --git a/pyomo/pysp/tests/scenariotreemanager/test_scenariotreemanagersolver.py b/pyomo/pysp/tests/scenariotreemanager/test_scenariotreemanagersolver.py index 0dc5e821454..527b527240d 100644 --- a/pyomo/pysp/tests/scenariotreemanager/test_scenariotreemanagersolver.py +++ b/pyomo/pysp/tests/scenariotreemanager/test_scenariotreemanagersolver.py @@ -25,8 +25,7 @@ from pyomo.pysp.util.config import PySPConfigBlock from pyomo.pysp.scenariotree.manager import \ (ScenarioTreeManagerClientSerial, - ScenarioTreeManagerClientPyro, - InvocationType) + ScenarioTreeManagerClientPyro) from pyomo.pysp.scenariotree.instance_factory import \ ScenarioTreeInstanceFactory from pyomo.pysp.scenariotree.manager_solver import \ diff --git a/pyomo/pysp/util/misc.py b/pyomo/pysp/util/misc.py index 9e6132c54b0..d19c6ee9c39 100644 --- a/pyomo/pysp/util/misc.py +++ b/pyomo/pysp/util/misc.py @@ -31,14 +31,13 @@ except ImportError: pstats_available=False -from pyutilib.enum import EnumValue from pyutilib.misc import PauseGC, import_file from pyutilib.services import TempfileManager import pyutilib.common from pyomo.opt.base import ConverterError from pyomo.common.dependencies import attempt_import from pyomo.common.plugin import (ExtensionPoint, - SingletonPlugin) + SingletonPlugin) from pyomo.pysp.util.config import PySPConfigBlock from pyomo.pysp.util.configured_object import PySPConfiguredObject @@ -515,31 +514,3 @@ def _get_test_dispatcher(ns_host=None, dispatcher_port = None dispatcher_process = None return dispatcher_process, dispatcher_port - -class _EnumValueWithData(EnumValue): - """A subclass of pyutilib.enum.EnumValue that carries additional data. - - The data carried by the _EnumValueWithData object does not affect - equality checks with other instances of the same enumerated value, - nor does it affect containment checks in the owning Enum - container. - - """ - def __init__(self, check_type, *args, **kwds): - super(_EnumValueWithData, self).__init__(*args, **kwds) - self._data = None - self._check_type = check_type - @property - def data(self): - return self._data - def __repr__(self): - return (super(_EnumValueWithData, self).__repr__() + \ - ": %s" % (self.data)) - def __call__(self, data): - self._check_type(data) - obj = self.__class__(self._check_type, - self.enumtype, - self.index, - self.key) - obj._data = data - return obj diff --git a/pyomo/repn/plugins/baron_writer.py b/pyomo/repn/plugins/baron_writer.py index e650fbabbae..d53f59a36a3 100644 --- a/pyomo/repn/plugins/baron_writer.py +++ b/pyomo/repn/plugins/baron_writer.py @@ -19,6 +19,7 @@ from six.moves import xrange from pyutilib.math import isclose +from pyomo.common.collections import OrderedSet from pyomo.opt import ProblemFormat from pyomo.opt.base import AbstractProblemWriter, WriterFactory from pyomo.core.expr.numvalue import ( @@ -142,8 +143,6 @@ def visiting_potential_leaf(self, node): if node.is_expression_type(): # we will descend into this, so type checking will happen later - if node.is_component_type(): - self.treechecker(node) return False, None if node.is_component_type(): @@ -204,7 +203,7 @@ def _write_equations_section(self, skip_trivial_constraints, sorter): - referenced_variable_ids = set() + referenced_variable_ids = OrderedSet() def _skip_trivial(constraint_data): if skip_trivial_constraints: @@ -415,7 +414,7 @@ def mutable_param_gen(b): c_eqns, l_eqns): - variables = set() + variables = OrderedSet() #print(symbol_map.byObject.keys()) eqn_body = expression_to_string(constraint_data.body, variables, smap=symbol_map) #print(symbol_map.byObject.keys()) @@ -496,7 +495,7 @@ def mutable_param_gen(b): else: output_file.write("maximize ") - variables = set() + variables = OrderedSet() #print(symbol_map.byObject.keys()) obj_string = expression_to_string(objective_data.expr, variables, smap=symbol_map) #print(symbol_map.byObject.keys()) diff --git a/pyomo/repn/plugins/gams_writer.py b/pyomo/repn/plugins/gams_writer.py index de61e12a0d6..091054fdaaf 100644 --- a/pyomo/repn/plugins/gams_writer.py +++ b/pyomo/repn/plugins/gams_writer.py @@ -180,11 +180,14 @@ def __init__(self, var_list, symbol_map): self.ints = [] self.positive = [] self.reals = [] + self.fixed = [] # categorize variables for var in var_list: v = symbol_map.getObject(var) - if v.is_binary(): + if v.is_fixed(): + self.fixed.append(var) + elif v.is_binary(): self.binary.append(var) elif v.is_integer(): if (v.has_lb() and (value(v.lb) >= 0)) and \ @@ -322,8 +325,16 @@ def __call__(self, | 2 : sort keys AND sort names (over declaration order) - put_results=None Filename for optionally writing solution values and - marginals to (put_results).dat, and solver statuses - to (put_results + 'stat').dat. + marginals. If put_results_format is 'gdx', then GAMS + will write solution values and marginals to + GAMS_MODEL_p.gdx and solver statuses to + {put_results}_s.gdx. If put_results_format is 'dat', + then solution values and marginals are written to + (put_results).dat, and solver statuses to (put_results + + 'stat').dat. + - put_results_format='gdx' + Format used for put_results, one of 'gdx', 'dat'. + """ # Make sure not to modify the user's dictionary, @@ -343,6 +354,12 @@ def __call__(self, # If None, will chose from lp, nlp, mip, and minlp. mtype = io_options.pop("mtype", None) + # Improved GAMS calling options + solprint = io_options.pop("solprint", "off") + limrow = io_options.pop("limrow", 0) + limcol = io_options.pop("limcol", 0) + solvelink = io_options.pop("solvelink", 5) + # Lines to add before solve statement. add_options = io_options.pop("add_options", None) @@ -368,6 +385,8 @@ def __call__(self, # Filename for optionally writing solution values and marginals # Set to True by GAMSSolver put_results = io_options.pop("put_results", None) + put_results_format = io_options.pop("put_results_format", 'gdx') + assert put_results_format in ('gdx','dat') if len(io_options): raise ValueError( @@ -461,8 +480,13 @@ def var_label(obj): warmstart=warmstart, solver=solver, mtype=mtype, + solprint=solprint, + limrow=limrow, + limcol=limcol, + solvelink=solvelink, add_options=add_options, - put_results=put_results + put_results=put_results, + put_results_format=put_results_format, ) finally: if isinstance(output_filename, string_types): @@ -483,8 +507,14 @@ def _write_model(self, warmstart, solver, mtype, + solprint, + limrow, + limcol, + solvelink, add_options, - put_results): + put_results, + put_results_format, + ): constraint_names = [] ConstraintIO = StringIO() linear = True @@ -578,6 +608,7 @@ def _write_model(self, categorized_vars = Categorizer(var_list, symbolMap) # Write the GAMS model + output_file.write("$offlisting\n") # $offdigit ignores extra precise digits instead of erroring output_file.write("$offdigit\n\n") output_file.write("EQUATIONS\n\t") @@ -593,9 +624,17 @@ def _write_model(self, output_file.write(";\n\nPOSITIVE VARIABLES\n\t") output_file.write("\n\t".join(categorized_vars.positive)) output_file.write(";\n\nVARIABLES\n\tGAMS_OBJECTIVE\n\t") - output_file.write("\n\t".join(categorized_vars.reals)) + output_file.write("\n\t".join( + categorized_vars.reals + categorized_vars.fixed + )) output_file.write(";\n\n") + for var in categorized_vars.fixed: + output_file.write("%s.fx = %s;\n" % ( + var, ftoa(value(symbolMap.getObject(var))) + )) + output_file.write("\n") + for line in ConstraintIO.getvalue().splitlines(): if len(line) > 80000: line = split_long_line(line) @@ -679,6 +718,14 @@ def _write_model(self, % (solver, mtype)) output_file.write("option %s=%s;\n" % (mtype, solver)) + output_file.write("option solprint=%s;\n" % solprint) + output_file.write("option limrow=%d;\n" % limrow) + output_file.write("option limcol=%d;\n" % limcol) + output_file.write("option solvelink=%d;\n" % solvelink) + + if put_results is not None and put_results_format == 'gdx': + output_file.write("option savepoint=1;\n") + if add_options is not None: output_file.write("\n* START USER ADDITIONAL OPTIONS\n") for line in add_options: @@ -720,28 +767,33 @@ def _write_model(self, output_file.write("ETSOLVE = %s.etsolve\n\n" % model_name) if put_results is not None: - results = put_results + '.dat' - output_file.write("\nfile results /'%s'/;" % results) - output_file.write("\nresults.nd=15;") - output_file.write("\nresults.nw=21;") - output_file.write("\nput results;") - output_file.write("\nput 'SYMBOL : LEVEL : MARGINAL' /;") - for var in var_list: - output_file.write("\nput %s %s.l %s.m /;" % (var, var, var)) - for con in constraint_names: - output_file.write("\nput %s %s.l %s.m /;" % (con, con, con)) - output_file.write("\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l " - "GAMS_OBJECTIVE.m;\n") - - statresults = put_results + 'stat.dat' - output_file.write("\nfile statresults /'%s'/;" % statresults) - output_file.write("\nstatresults.nd=15;") - output_file.write("\nstatresults.nw=21;") - output_file.write("\nput statresults;") - output_file.write("\nput 'SYMBOL : VALUE' /;") - for stat in stat_vars: - output_file.write("\nput '%s' %s /;\n" % (stat, stat)) - + if put_results_format == 'gdx': + output_file.write("\nexecute_unload '%s_s.gdx'" % put_results) + for stat in stat_vars: + output_file.write(", %s" % stat) + output_file.write(";\n") + else: + results = put_results + '.dat' + output_file.write("\nfile results /'%s'/;" % results) + output_file.write("\nresults.nd=15;") + output_file.write("\nresults.nw=21;") + output_file.write("\nput results;") + output_file.write("\nput 'SYMBOL : LEVEL : MARGINAL' /;") + for var in var_list: + output_file.write("\nput %s %s.l %s.m /;" % (var, var, var)) + for con in constraint_names: + output_file.write("\nput %s %s.l %s.m /;" % (con, con, con)) + output_file.write("\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l " + "GAMS_OBJECTIVE.m;\n") + + statresults = put_results + 'stat.dat' + output_file.write("\nfile statresults /'%s'/;" % statresults) + output_file.write("\nstatresults.nd=15;") + output_file.write("\nstatresults.nw=21;") + output_file.write("\nput statresults;") + output_file.write("\nput 'SYMBOL : VALUE' /;") + for stat in stat_vars: + output_file.write("\nput '%s' %s /;\n" % (stat, stat)) valid_solvers = { 'ALPHAECP': {'MINLP','MIQCP'}, diff --git a/pyomo/repn/tests/gams/fixed_linear_expr.gams.baseline b/pyomo/repn/tests/gams/fixed_linear_expr.gams.baseline new file mode 100644 index 00000000000..b6ddb2d06b8 --- /dev/null +++ b/pyomo/repn/tests/gams/fixed_linear_expr.gams.baseline @@ -0,0 +1,52 @@ +$offlisting +$offdigit + +EQUATIONS + c1_lo + c2 + obj; + +POSITIVE VARIABLES + x; + +VARIABLES + GAMS_OBJECTIVE + y; + +y.fx = 0; + +c1_lo.. 0 =l= y + y ; +c2.. x + y =e= 1 ; +obj.. GAMS_OBJECTIVE =e= x ; + + +MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; +SOLVE GAMS_MODEL USING lp minimizing GAMS_OBJECTIVE; + +Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; +MODELSTAT = GAMS_MODEL.modelstat; +SOLVESTAT = GAMS_MODEL.solvestat; + +Scalar OBJEST 'best objective', OBJVAL 'objective value'; +OBJEST = GAMS_MODEL.objest; +OBJVAL = GAMS_MODEL.objval; + +Scalar NUMVAR 'number of variables'; +NUMVAR = GAMS_MODEL.numvar + +Scalar NUMEQU 'number of equations'; +NUMEQU = GAMS_MODEL.numequ + +Scalar NUMDVAR 'number of discrete variables'; +NUMDVAR = GAMS_MODEL.numdvar + +Scalar NUMNZ 'number of nonzeros'; +NUMNZ = GAMS_MODEL.numnz + +Scalar ETSOLVE 'time to execute solve statement'; +ETSOLVE = GAMS_MODEL.etsolve + diff --git a/pyomo/repn/tests/gams/no_column_ordering_linear.gams.baseline b/pyomo/repn/tests/gams/no_column_ordering_linear.gams.baseline index 75e56dc9f3c..2f54353a004 100644 --- a/pyomo/repn/tests/gams/no_column_ordering_linear.gams.baseline +++ b/pyomo/repn/tests/gams/no_column_ordering_linear.gams.baseline @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -15,6 +16,10 @@ obj.. GAMS_OBJECTIVE =e= a + b + c ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING lp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/no_column_ordering_quadratic.gams.baseline b/pyomo/repn/tests/gams/no_column_ordering_quadratic.gams.baseline index f5419e59642..0a91e1e8295 100644 --- a/pyomo/repn/tests/gams/no_column_ordering_quadratic.gams.baseline +++ b/pyomo/repn/tests/gams/no_column_ordering_quadratic.gams.baseline @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -15,6 +16,10 @@ obj.. GAMS_OBJECTIVE =e= a + b + c + a*a + b*b + c*c + a*b + a*c + b*c ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/no_row_ordering.gams.baseline b/pyomo/repn/tests/gams/no_row_ordering.gams.baseline index 5fdf5b3398a..f739c26a0f5 100644 --- a/pyomo/repn/tests/gams/no_row_ordering.gams.baseline +++ b/pyomo/repn/tests/gams/no_row_ordering.gams.baseline @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -23,6 +24,10 @@ obj.. GAMS_OBJECTIVE =e= a ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING lp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small1.pyomo.gms b/pyomo/repn/tests/gams/small1.pyomo.gms index 8795ec6225c..861d9cca798 100644 --- a/pyomo/repn/tests/gams/small1.pyomo.gms +++ b/pyomo/repn/tests/gams/small1.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -16,6 +17,10 @@ x1.l = 1; x2.l = 1; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small10.pyomo.gms b/pyomo/repn/tests/gams/small10.pyomo.gms index c4ed2a245b6..486c2c142cb 100644 --- a/pyomo/repn/tests/gams/small10.pyomo.gms +++ b/pyomo/repn/tests/gams/small10.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -39,6 +40,10 @@ c15.. GAMS_OBJECTIVE =e= x1 + 0*x1 + 0*x1 + x1*x1*0 + x1*x1*0 + 0*power(x1, 2) ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small11.pyomo.gms b/pyomo/repn/tests/gams/small11.pyomo.gms index c72c90629ee..dd0e89528d4 100644 --- a/pyomo/repn/tests/gams/small11.pyomo.gms +++ b/pyomo/repn/tests/gams/small11.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -25,6 +26,10 @@ c4.. GAMS_OBJECTIVE =e= x3 ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING lp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small12.pyomo.gms b/pyomo/repn/tests/gams/small12.pyomo.gms index 199c0e97fd3..ad771f58bb6 100644 --- a/pyomo/repn/tests/gams/small12.pyomo.gms +++ b/pyomo/repn/tests/gams/small12.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -72,6 +73,10 @@ x6.l = -2; x7.l = 2; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small13.pyomo.gms b/pyomo/repn/tests/gams/small13.pyomo.gms index 7c2b3df1132..f4800bf1005 100644 --- a/pyomo/repn/tests/gams/small13.pyomo.gms +++ b/pyomo/repn/tests/gams/small13.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -18,6 +19,10 @@ c4.. GAMS_OBJECTIVE =e= x1 ; x1.l = 0.5; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp maximizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small14a.pyomo.gms b/pyomo/repn/tests/gams/small14a.pyomo.gms index 6c88c52a869..f3a0179a86c 100644 --- a/pyomo/repn/tests/gams/small14a.pyomo.gms +++ b/pyomo/repn/tests/gams/small14a.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -46,6 +47,10 @@ x1.l = 1; x2.l = 0; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING dnlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small15.pyomo.gms b/pyomo/repn/tests/gams/small15.pyomo.gms index 8795ec6225c..861d9cca798 100644 --- a/pyomo/repn/tests/gams/small15.pyomo.gms +++ b/pyomo/repn/tests/gams/small15.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -16,6 +17,10 @@ x1.l = 1; x2.l = 1; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small2.pyomo.gms b/pyomo/repn/tests/gams/small2.pyomo.gms index 0e375b66795..acc55986a7d 100644 --- a/pyomo/repn/tests/gams/small2.pyomo.gms +++ b/pyomo/repn/tests/gams/small2.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -16,6 +17,10 @@ x1.l = 1; x2.l = 1; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small3.pyomo.gms b/pyomo/repn/tests/gams/small3.pyomo.gms index bf4351f8cbe..381c6124cf3 100644 --- a/pyomo/repn/tests/gams/small3.pyomo.gms +++ b/pyomo/repn/tests/gams/small3.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -16,6 +17,10 @@ x1.l = 1; x2.l = 1; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small4.pyomo.gms b/pyomo/repn/tests/gams/small4.pyomo.gms index 57cf245fe07..34e83203efb 100644 --- a/pyomo/repn/tests/gams/small4.pyomo.gms +++ b/pyomo/repn/tests/gams/small4.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -16,6 +17,10 @@ x1.l = 1; x2.l = 1; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small5.pyomo.gms b/pyomo/repn/tests/gams/small5.pyomo.gms index fd3058ac622..a9308a8dc99 100644 --- a/pyomo/repn/tests/gams/small5.pyomo.gms +++ b/pyomo/repn/tests/gams/small5.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -46,6 +47,10 @@ x3.up = 1; x3.l = 2; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small6.pyomo.gms b/pyomo/repn/tests/gams/small6.pyomo.gms index 14d669bb758..ae31fa35a76 100644 --- a/pyomo/repn/tests/gams/small6.pyomo.gms +++ b/pyomo/repn/tests/gams/small6.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -34,6 +35,10 @@ x3.up = 1; x3.l = 2; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small7.pyomo.gms b/pyomo/repn/tests/gams/small7.pyomo.gms index 25fda1e4174..cdc19f43297 100644 --- a/pyomo/repn/tests/gams/small7.pyomo.gms +++ b/pyomo/repn/tests/gams/small7.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -70,6 +71,10 @@ x3.up = 1; x3.l = 2; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small8.pyomo.gms b/pyomo/repn/tests/gams/small8.pyomo.gms index 5411bdbf4e2..c62492c33fc 100644 --- a/pyomo/repn/tests/gams/small8.pyomo.gms +++ b/pyomo/repn/tests/gams/small8.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -22,6 +23,10 @@ c4.. GAMS_OBJECTIVE =e= x3 + x2*x2 + x1 ; x3.lo = 7; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/small9.pyomo.gms b/pyomo/repn/tests/gams/small9.pyomo.gms index 11a03f5039d..1cbcafe40e8 100644 --- a/pyomo/repn/tests/gams/small9.pyomo.gms +++ b/pyomo/repn/tests/gams/small9.pyomo.gms @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -22,6 +23,10 @@ c6.. GAMS_OBJECTIVE =e= x1 ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING nlp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/test_gams.py b/pyomo/repn/tests/gams/test_gams.py index d57d4f61a2b..aa4c2789a8f 100644 --- a/pyomo/repn/tests/gams/test_gams.py +++ b/pyomo/repn/tests/gams/test_gams.py @@ -19,7 +19,9 @@ from pyomo.core.base import NumericLabeler, SymbolMap from pyomo.environ import (Block, ConcreteModel, Connector, Constraint, Objective, TransformationFactory, Var, exp, log, - ceil, floor, asin, acos, atan, asinh, acosh, atanh) + ceil, floor, asin, acos, atan, asinh, acosh, atanh, + Binary, quicksum) +from pyomo.gdp import Disjunction from pyomo.repn.plugins.gams_writer import (StorageTreeChecker, expression_to_string, split_long_line) @@ -117,6 +119,72 @@ def test_var_on_deactivated_block(self): model.obj = Objective(expr=model.x) self._check_baseline(model) + def test_fixed_linear_expr(self): + # Note that this checks both that a fixed variable is fixed, and + # that the resulting model type is correctly classified (in this + # case, fixing a binary makes this an LP) + m = ConcreteModel() + m.y = Var(within=Binary) + m.y.fix(0) + m.x = Var(bounds=(0,None)) + m.c1 = Constraint(expr=quicksum([m.y, m.y], linear=True) >= 0) + m.c2 = Constraint(expr=quicksum([m.x, m.y], linear=True) == 1) + m.obj = Objective(expr=m.x) + self._check_baseline(m) + + def test_nested_GDP_with_deactivate(self): + m = ConcreteModel() + m.x = Var(bounds=(0, 1)) + + @m.Disjunct([0, 1]) + def disj(disj, _): + @disj.Disjunct(['A', 'B']) + def nested(n_disj, _): + pass # Blank nested disjunct + + return disj + + m.choice = Disjunction(expr=[m.disj[0], m.disj[1]]) + + m.c = Constraint(expr=m.x ** 2 + m.disj[1].nested['A'].indicator_var >= 1) + + m.disj[0].indicator_var.fix(1) + m.disj[1].deactivate() + m.disj[0].nested['A'].indicator_var.fix(1) + m.disj[0].nested['B'].deactivate() + m.disj[1].nested['A'].indicator_var.set_value(1) + m.disj[1].nested['B'].deactivate() + m.o = Objective(expr=m.x) + TransformationFactory('gdp.fix_disjuncts').apply_to(m) + + os = StringIO() + m.write(os, format='gams', io_options=dict(solver='dicopt')) + self.assertIn("USING minlp", os.getvalue()) + + def test_quicksum(self): + m = ConcreteModel() + m.y = Var(domain=Binary) + m.c = Constraint(expr=quicksum([m.y, m.y], linear=True) == 1) + m.y.fix(1) + lbl = NumericLabeler('x') + smap = SymbolMap(lbl) + tc = StorageTreeChecker(m) + self.assertEqual(("x1 + x1", False), expression_to_string(m.c.body, tc, smap=smap)) + m.x = Var() + m.c2 = Constraint(expr=quicksum([m.x, m.y], linear=True) == 1) + self.assertEqual(("x2 + x1", False), expression_to_string(m.c2.body, tc, smap=smap)) + + def test_quicksum_integer_var_fixed(self): + m = ConcreteModel() + m.x = Var() + m.y = Var(domain=Binary) + m.c = Constraint(expr=quicksum([m.y, m.y], linear=True) == 1) + m.o = Objective(expr=m.x ** 2) + m.y.fix(1) + os = StringIO() + m.write(os, format='gams') + self.assertIn("USING nlp", os.getvalue()) + def test_expr_xfrm(self): from pyomo.repn.plugins.gams_writer import ( expression_to_string, StorageTreeChecker) diff --git a/pyomo/repn/tests/gams/var_on_deactivated_block.gams.baseline b/pyomo/repn/tests/gams/var_on_deactivated_block.gams.baseline index bd361b20a3a..1e6aff70be1 100644 --- a/pyomo/repn/tests/gams/var_on_deactivated_block.gams.baseline +++ b/pyomo/repn/tests/gams/var_on_deactivated_block.gams.baseline @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -14,6 +15,10 @@ obj.. GAMS_OBJECTIVE =e= x ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING lp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/var_on_nonblock.gams.baseline b/pyomo/repn/tests/gams/var_on_nonblock.gams.baseline index 9c7c08d1abe..41012808de2 100644 --- a/pyomo/repn/tests/gams/var_on_nonblock.gams.baseline +++ b/pyomo/repn/tests/gams/var_on_nonblock.gams.baseline @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -14,6 +15,10 @@ obj.. GAMS_OBJECTIVE =e= x ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING lp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/repn/tests/gams/var_on_other_model.gams.baseline b/pyomo/repn/tests/gams/var_on_other_model.gams.baseline index 18eec5ddc27..43ec772606a 100644 --- a/pyomo/repn/tests/gams/var_on_other_model.gams.baseline +++ b/pyomo/repn/tests/gams/var_on_other_model.gams.baseline @@ -1,3 +1,4 @@ +$offlisting $offdigit EQUATIONS @@ -14,6 +15,10 @@ obj.. GAMS_OBJECTIVE =e= x ; MODEL GAMS_MODEL /all/ ; +option solprint=off; +option limrow=0; +option limcol=0; +option solvelink=5; SOLVE GAMS_MODEL USING lp minimizing GAMS_OBJECTIVE; Scalars MODELSTAT 'model status', SOLVESTAT 'solve status'; diff --git a/pyomo/scripting/driver_help.py b/pyomo/scripting/driver_help.py index f18f3f87f1c..2d81e47fae7 100644 --- a/pyomo/scripting/driver_help.py +++ b/pyomo/scripting/driver_help.py @@ -16,6 +16,7 @@ import textwrap import logging import argparse +import socket import pyutilib.subprocess from pyutilib.misc import Options @@ -258,7 +259,19 @@ def help_transformations(): print("---------------------------") for xform in sorted(TransformationFactory): print(" "+xform) - print(wrapper.fill(TransformationFactory.doc(xform))) + _doc = TransformationFactory.doc(xform) or "" + # Ideally, the Factory would ensure that the doc string + # indicated deprecation, but as @deprecated() is Pyomo + # functionality and the Factory comes directly from PyUtilib, + # PyUtilib probably shouldn't contain Pyomo-specific processing. + # The next best thing is to ensure that the deprecation status + # is indicated here. + _init_doc = TransformationFactory.get_class(xform).__init__.__doc__ \ + or "" + if _init_doc.startswith('DEPRECATION') and 'DEPRECAT' not in _doc: + _doc = ' '.join(('[DEPRECATED]', _doc)) + if _doc: + print(wrapper.fill(_doc)) def help_solvers(): import pyomo.environ @@ -288,31 +301,53 @@ def help_solvers(): print("") solver_list = list(pyomo.opt.SolverFactory) solver_list = sorted( filter(lambda x: '_' != x[0], solver_list) ) - n = max(map(len, solver_list)) - wrapper = textwrap.TextWrapper(subsequent_indent=' '*(n+9)) + _data = [] try: # Disable warnings logging.disable(logging.WARNING) for s in solver_list: # Create a solver, and see if it is available with pyomo.opt.SolverFactory(s) as opt: - if s == 'py' or (hasattr(opt, "_metasolver") and opt._metasolver): + ver = '' + if opt.available(False): + avail = '-' + if not hasattr(opt, 'license_is_valid'): + avail = '+' + elif opt.license_is_valid(): + avail = '+' + try: + ver = opt.version() + if ver: + while len(ver) > 2 and ver[-1] == 0: + ver = ver[:-1] + ver = '.'.join(str(v) for v in ver) + else: + ver = '' + except (AttributeError, NameError): + pass + elif s == 'py' or (hasattr(opt, "_metasolver") and opt._metasolver): # py is a metasolver, but since we don't specify a subsolver # for this test, opt is actually an UnknownSolver, so we # can't try to get the _metasolver attribute from it. # Also, default to False if the attribute isn't implemented - msg = ' %-'+str(n)+'s + %s' - elif opt.available(False): - msg = ' %-'+str(n)+'s * %s' + avail = '*' else: - msg = ' %-'+str(n)+'s %s' - print(wrapper.fill(msg % (s, pyomo.opt.SolverFactory.doc(s)))) + avail = '' + _data.append((avail, s, ver, pyomo.opt.SolverFactory.doc(s))) finally: # Reset logging level logging.disable(logging.NOTSET) + nameFieldLen = max(len(line[1]) for line in _data) + verFieldLen = max(len(line[2]) for line in _data) + fmt = ' %%1s%%-%ds %%-%ds %%s' % (nameFieldLen, verFieldLen) + wrapper = textwrap.TextWrapper( + subsequent_indent=' '*(nameFieldLen + verFieldLen + 6)) + for _line in _data: + print(wrapper.fill(fmt % _line)) + print("") wrapper = textwrap.TextWrapper(subsequent_indent='') - print(wrapper.fill("An asterisk indicates solvers that are currently available to be run from Pyomo with the serial solver manager. A plus indicates meta-solvers, that are always available.")) + print(wrapper.fill("""The leading symbol (one of *, -, +) indicates the current solver availability. A plus (+) indicates the solver is currently available to be run from Pyomo with the serial solver manager, and (if applicable) has a valid license. A minus (-) indicates the solver executables are available but do not reporthaving a valid license. The solver may still be usable in an unlicensed or "demo" mode for limited problem sizes. An asterisk (*) indicates meta-solvers or generic interfaces, which are always available.""")) print('') print(wrapper.fill('Pyomo also supports solver interfaces that are wrappers around third-party solver interfaces. These interfaces require a subsolver specification that indicates the solver being executed. For example, the following indicates that the ipopt solver will be used:')) print('') @@ -326,6 +361,7 @@ def help_solvers(): print('') try: logging.disable(logging.WARNING) + socket.setdefaulttimeout(10) import pyomo.neos.kestrel kestrel = pyomo.neos.kestrel.kestrelAMPL() #print "HERE", solver_list @@ -353,6 +389,7 @@ def help_solvers(): pass finally: logging.disable(logging.NOTSET) + socket.setdefaulttimeout(None) def print_components(data): """ diff --git a/pyomo/scripting/pyomo_main.py b/pyomo/scripting/pyomo_main.py index 86f8db53860..d941b89a3fb 100644 --- a/pyomo/scripting/pyomo_main.py +++ b/pyomo/scripting/pyomo_main.py @@ -94,3 +94,6 @@ def main_console_script(): return ans.errorcode except AttributeError: return ans + +if __name__ == '__main__': + sys.exit(main_console_script()) diff --git a/pyomo/scripting/tests/test_cmds.py b/pyomo/scripting/tests/test_cmds.py index 23a3935205e..a444b95e722 100644 --- a/pyomo/scripting/tests/test_cmds.py +++ b/pyomo/scripting/tests/test_cmds.py @@ -13,7 +13,7 @@ from pyutilib.misc.redirect_io import capture_output from pyomo.environ import SolverFactory -from pyomo.scripting.driver_help import help_solvers +from pyomo.scripting.driver_help import help_solvers, help_transformations class Test(unittest.TestCase): @@ -26,15 +26,42 @@ def test_help_solvers(self): self.assertTrue(re.search('Serial Solver', OUT)) # Test known solvers and metasolver flags # ASL is a metasolver - self.assertTrue(re.search('asl +\+', OUT)) + self.assertTrue(re.search('\n \*asl ', OUT)) # PS is bundles with Pyomo so should always be available - self.assertTrue(re.search('ps +\*', OUT)) - for solver in ('ipopt','baron','cbc','glpk'): + self.assertTrue(re.search('\n \+ps ', OUT)) + for solver in ('ipopt','cbc','glpk'): s = SolverFactory(solver) if s.available(): - self.assertTrue(re.search("%s +\* [a-zA-Z]" % solver, OUT)) + self.assertTrue( + re.search("\n \+%s " % solver, OUT), + "' +%s' not found in help --solvers" % solver) else: - self.assertTrue(re.search("%s +[a-zA-Z]" % solver, OUT)) + self.assertTrue( + re.search("\n %s " % solver, OUT), + "' %s' not found in help --solvers" % solver) + for solver in ('baron',): + s = SolverFactory(solver) + if s.license_is_valid(): + self.assertTrue( + re.search("\n \+%s " % solver, OUT), + "' +%s' not found in help --solvers" % solver) + elif s.available(): + self.assertTrue( + re.search("\n \-%s " % solver, OUT), + "' +%s' not found in help --solvers" % solver) + else: + self.assertTrue( + re.search("\n %s " % solver, OUT), + "' %s' not found in help --solvers" % solver) + + def test_help_transformations(self): + with capture_output() as OUT: + help_transformations() + OUT = OUT.getvalue() + self.assertTrue(re.search('Pyomo Model Transformations', OUT)) + self.assertTrue(re.search('core.relax_integer_vars', OUT)) + # test a transformation that we know is deprecated + self.assertTrue(re.search('duality.linear_dual\s+\[DEPRECATED\]', OUT)) if __name__ == "__main__": diff --git a/pyomo/solvers/plugins/solvers/BARON.py b/pyomo/solvers/plugins/solvers/BARON.py index 8eeb61115f2..22e3298c226 100644 --- a/pyomo/solvers/plugins/solvers/BARON.py +++ b/pyomo/solvers/plugins/solvers/BARON.py @@ -238,11 +238,16 @@ def _convert_problem(self, for key in self.options: lower_key = key.lower() if lower_key == 'resname': - logger.warning('The ResName option is set to %s' - % self._soln_file) + logger.warning( + 'Ignoring user-specified option "%s=%s". This ' + 'option is set to %s, and can be overridden using ' + 'the "solnfile" argument to the solve() method.' + % (key, self.options[key], self._soln_file)) elif lower_key == 'timname': - logger.warning('The TimName option is set to %s' - % self._tim_file) + logger.warning( + 'Ignoring user-specified option "%s=%s". This ' + 'option is set to %s.' + % (key, self.options[key], self._tim_file)) else: solver_options[key] = self.options[key] diff --git a/pyomo/solvers/plugins/solvers/GAMS.py b/pyomo/solvers/plugins/solvers/GAMS.py index dfc77523210..31d9a447b1a 100644 --- a/pyomo/solvers/plugins/solvers/GAMS.py +++ b/pyomo/solvers/plugins/solvers/GAMS.py @@ -31,6 +31,8 @@ from pyomo.opt.results import (SolverResults, SolverStatus, Solution, SolutionStatus, TerminationCondition, ProblemSense) +from pyomo.common.dependencies import attempt_import +gdxcc, gdxcc_available = attempt_import('gdxcc', defer_check=True) logger = logging.getLogger('pyomo.solvers') @@ -331,7 +333,7 @@ def solve(self, *args, **kwds): extract_rc = ('rc' in model_suffixes) results = SolverResults() - results.problem.name = t1.name + results.problem.name = os.path.join(ws.working_directory, t1.name + '.gms') results.problem.lower_bound = t1.out_db["OBJEST"].find_record().value results.problem.upper_bound = t1.out_db["OBJEST"].find_record().value results.problem.number_of_variables = \ @@ -576,14 +578,14 @@ def available(self, exception_flag=True): """True if the solver is available.""" exe = pyomo.common.Executable("gams") if exception_flag is False: - return exe.available() + if not exe.available(): + return False else: - if exe.available(): - return True - else: + if not exe.available(): raise NameError( "No 'gams' command found on system PATH - GAMS shell " "solver functionality is not available.") + return True def _default_executable(self): executable = pyomo.common.Executable("gams") @@ -606,12 +608,22 @@ def _get_version(self): return _extract_version('') else: # specify logging to stdout for windows compatibility - # technically this command makes gams complain because we're not - # providing a filename, but it will include the version name anyway - cmd = [solver_exec, "", "lo=3"] + cmd = [solver_exec, "audit", "lo=3"] _, txt = pyutilib.subprocess.run(cmd, tee=False) return _extract_version(txt) + @staticmethod + def _parse_special_values(value): + if value == 1.0e300 or value == 2.0e300: + return float('nan') + if value == 3.0e300: + return float('inf') + if value == 4.0e300: + return -float('inf') + if value == 5.0e300: + return sys.float_info.epsilon + return value + def solve(self, *args, **kwds): """ Solve a model via the GAMS executable. @@ -696,8 +708,19 @@ def solve(self, *args, **kwds): put_results = "results" io_options["put_results"] = put_results - results_filename = os.path.join(tmpdir, put_results + ".dat") - statresults_filename = os.path.join(tmpdir, put_results + "stat.dat") + io_options.setdefault("put_results_format", + 'gdx' if gdxcc_available else 'dat') + + if io_options['put_results_format'] == 'gdx': + results_filename = os.path.join( + tmpdir, "GAMS_MODEL_p.gdx") + statresults_filename = os.path.join( + tmpdir, "%s_s.gdx" % (put_results,)) + else: + results_filename = os.path.join( + tmpdir, "%s.dat" % (put_results,)) + statresults_filename = os.path.join( + tmpdir, "%sstat.dat" % (put_results,)) if isinstance(model, IBlock): # Kernel blocks have slightly different write method @@ -761,10 +784,12 @@ def solve(self, *args, **kwds): raise RuntimeError("GAMS encountered an error during solve. " "Check listing file for details.") - with open(results_filename, 'r') as results_file: - results_text = results_file.read() - with open(statresults_filename, 'r') as statresults_file: - statresults_text = statresults_file.read() + if io_options['put_results_format'] == 'gdx': + model_soln, stat_vars = self._parse_gdx_results( + results_filename, statresults_filename) + else: + model_soln, stat_vars = self._parse_dat_results( + results_filename, statresults_filename) finally: if not keepfiles: if newdir: @@ -798,16 +823,6 @@ def solve(self, *args, **kwds): extract_dual = ('dual' in model_suffixes) extract_rc = ('rc' in model_suffixes) - stat_vars = dict() - # Skip first line of explanatory text - for line in statresults_text.splitlines()[1:]: - items = line.split() - try: - stat_vars[items[0]] = float(items[1]) - except ValueError: - # GAMS printed NA, just make it nan - stat_vars[items[0]] = float('nan') - results = SolverResults() results.problem.name = output_filename results.problem.lower_bound = stat_vars["OBJEST"] @@ -930,12 +945,6 @@ def solve(self, *args, **kwds): soln.gap = abs(results.problem.upper_bound \ - results.problem.lower_bound) - model_soln = dict() - # Skip first line of explanatory text - for line in results_text.splitlines()[1:]: - items = line.split() - model_soln[items[0]] = (items[1], items[2]) - has_rc_info = True for sym, ref in iteritems(symbolMap.bySymbol): obj = ref() @@ -950,7 +959,11 @@ def solve(self, *args, **kwds): soln.objective[sym] = {'Value': objctvval} if obj.parent_component().ctype is not Var: continue - rec = model_soln[sym] + try: + rec = model_soln[sym] + except KeyError: + # no solution returned + rec = (float('nan'), float('nan')) # obj.value = float(rec[0]) soln.variable[sym] = {"Value": float(rec[0])} if extract_rc and has_rc_info: @@ -969,7 +982,11 @@ def solve(self, *args, **kwds): continue sym = symbolMap.getSymbol(c) if c.equality: - rec = model_soln[sym] + try: + rec = model_soln[sym] + except KeyError: + # no solution returned + rec = (float('nan'), float('nan')) try: # model.dual[c] = float(rec[1]) soln.constraint[sym] = {'dual': float(rec[1])} @@ -983,14 +1000,22 @@ def solve(self, *args, **kwds): # Negate marginal for _lo equations marg = 0 if c.lower is not None: - rec_lo = model_soln[sym + '_lo'] + try: + rec_lo = model_soln[sym + '_lo'] + except KeyError: + # no solution returned + rec_lo = (float('nan'), float('nan')) try: marg -= float(rec_lo[1]) except ValueError: # Solver didn't provide marginals marg = float('nan') if c.upper is not None: - rec_hi = model_soln[sym + '_hi'] + try: + rec_hi = model_soln[sym + '_hi'] + except KeyError: + # no solution returned + rec_hi = (float('nan'), float('nan')) try: marg += float(rec_hi[1]) except ValueError: @@ -1049,6 +1074,106 @@ def solve(self, *args, **kwds): return results + def _parse_gdx_results(self, results_filename, statresults_filename): + model_soln = dict() + stat_vars = dict.fromkeys(['MODELSTAT', 'SOLVESTAT', 'OBJEST', + 'OBJVAL', 'NUMVAR', 'NUMEQU', 'NUMDVAR', + 'NUMNZ', 'ETSOLVE']) + + pgdx = gdxcc.new_gdxHandle_tp() + ret = gdxcc.gdxCreateD(pgdx, os.path.dirname(self.executable()), 128) + if not ret[0]: + raise RuntimeError("GAMS GDX failure (gdxCreate): %s." % ret[1]) + + if os.path.exists(statresults_filename): + ret = gdxcc.gdxOpenRead(pgdx, statresults_filename) + if not ret[0]: + raise RuntimeError("GAMS GDX failure (gdxOpenRead): %d." % ret[1]) + + i = 0 + while True: + i += 1 + ret = gdxcc.gdxDataReadRawStart(pgdx, i) + if not ret[0]: + break + + ret = gdxcc.gdxSymbolInfo(pgdx, i) + if not ret[0]: + break + if len(ret) < 2: + raise RuntimeError("GAMS GDX failure (gdxSymbolInfo).") + stat = ret[1] + if not stat in stat_vars: + continue + + ret = gdxcc.gdxDataReadRaw(pgdx) + if not ret[0] or len(ret[2]) == 0: + raise RuntimeError("GAMS GDX failure (gdxDataReadRaw).") + + if stat in ('OBJEST', 'OBJVAL', 'ETSOLVE'): + stat_vars[stat] = self._parse_special_values(ret[2][0]) + else: + stat_vars[stat] = int(ret[2][0]) + + gdxcc.gdxDataReadDone(pgdx) + gdxcc.gdxClose(pgdx) + + if os.path.exists(results_filename): + ret = gdxcc.gdxOpenRead(pgdx, results_filename) + if not ret[0]: + raise RuntimeError("GAMS GDX failure (gdxOpenRead): %d." % ret[1]) + + i = 0 + while True: + i += 1 + ret = gdxcc.gdxDataReadRawStart(pgdx, i) + if not ret[0]: + break + + ret = gdxcc.gdxDataReadRaw(pgdx) + if not ret[0] or len(ret[2]) < 2: + raise RuntimeError("GAMS GDX failure (gdxDataReadRaw).") + level = self._parse_special_values(ret[2][0]) + dual = self._parse_special_values(ret[2][1]) + + ret = gdxcc.gdxSymbolInfo(pgdx, i) + if not ret[0]: + break + if len(ret) < 2: + raise RuntimeError("GAMS GDX failure (gdxSymbolInfo).") + model_soln[ret[1]] = (level, dual) + + gdxcc.gdxDataReadDone(pgdx) + gdxcc.gdxClose(pgdx) + + gdxcc.gdxFree(pgdx) + return model_soln, stat_vars + + def _parse_dat_results(self, results_filename, statresults_filename): + with open(statresults_filename, 'r') as statresults_file: + statresults_text = statresults_file.read() + + stat_vars = dict() + # Skip first line of explanatory text + for line in statresults_text.splitlines()[1:]: + items = line.split() + try: + stat_vars[items[0]] = float(items[1]) + except ValueError: + # GAMS printed NA, just make it nan + stat_vars[items[0]] = float('nan') + + with open(results_filename, 'r') as results_file: + results_text = results_file.read() + + model_soln = dict() + # Skip first line of explanatory text + for line in results_text.splitlines()[1:]: + items = line.split() + model_soln[items[0]] = (items[1], items[2]) + + return model_soln, stat_vars + class OutputStream: """Output stream object for simultaneously writing to multiple streams. diff --git a/pyomo/solvers/plugins/solvers/cplex_direct.py b/pyomo/solvers/plugins/solvers/cplex_direct.py index 990f8f3590c..1d21c98b224 100644 --- a/pyomo/solvers/plugins/solvers/cplex_direct.py +++ b/pyomo/solvers/plugins/solvers/cplex_direct.py @@ -37,13 +37,22 @@ class DegreeError(ValueError): class _CplexExpr(object): - def __init__(self): - self.variables = [] - self.coefficients = [] - self.offset = 0 - self.q_variables1 = [] - self.q_variables2 = [] - self.q_coefficients = [] + def __init__( + self, + variables, + coefficients, + offset=None, + q_variables1=None, + q_variables2=None, + q_coefficients=None, + ): + self.variables = variables + self.coefficients = coefficients + self.offset = offset or 0.0 + self.q_variables1 = q_variables1 or [] + self.q_variables2 = q_variables2 or [] + self.q_coefficients = [float(coef) for coef in q_coefficients or []] + def _is_numeric(x): try: @@ -53,6 +62,52 @@ def _is_numeric(x): return True +class _VariableData(object): + def __init__(self, solver_model): + self._solver_model = solver_model + self.lb = [] + self.ub = [] + self.types = [] + self.names = [] + + def add(self, lb, ub, type_, name): + self.lb.append(lb) + self.ub.append(ub) + self.types.append(type_) + self.names.append(name) + + def store_in_cplex(self): + self._solver_model.variables.add( + lb=self.lb, ub=self.ub, types=self.types, names=self.names + ) + + +class _LinearConstraintData(object): + def __init__(self, solver_model): + self._solver_model = solver_model + self.lin_expr = [] + self.senses = [] + self.rhs = [] + self.range_values = [] + self.names = [] + + def add(self, cplex_expr, sense, rhs, range_values, name): + self.lin_expr.append([cplex_expr.variables, cplex_expr.coefficients]) + self.senses.append(sense) + self.rhs.append(rhs) + self.range_values.append(range_values) + self.names.append(name) + + def store_in_cplex(self): + self._solver_model.linear_constraints.add( + lin_expr=self.lin_expr, + senses=self.senses, + rhs=self.rhs, + range_values=self.range_values, + names=self.names, + ) + + @SolverFactory.register('cplex_direct', doc='Direct python interface to CPLEX') class CPLEXDirect(DirectSolver): @@ -193,29 +248,36 @@ def _process_stream(arg): return Bunch(rc=None, log=None) def _get_expr_from_pyomo_repn(self, repn, max_degree=2): - referenced_vars = ComponentSet() - degree = repn.polynomial_degree() - if (degree is None) or (degree > max_degree): - raise DegreeError('CPLEXDirect does not support expressions of degree {0}.'.format(degree)) + if degree is None or degree > max_degree: + raise DegreeError( + "CPLEXDirect does not support expressions of degree {0}.".format(degree) + ) - new_expr = _CplexExpr() - if len(repn.linear_vars) > 0: - referenced_vars.update(repn.linear_vars) - new_expr.variables.extend(self._pyomo_var_to_ndx_map[i] for i in repn.linear_vars) - new_expr.coefficients.extend(repn.linear_coefs) + referenced_vars = ComponentSet(repn.linear_vars) + q_coefficients = [] + q_variables1 = [] + q_variables2 = [] for i, v in enumerate(repn.quadratic_vars): x, y = v - new_expr.q_coefficients.append(repn.quadratic_coefs[i]) - new_expr.q_variables1.append(self._pyomo_var_to_ndx_map[x]) - new_expr.q_variables2.append(self._pyomo_var_to_ndx_map[y]) + q_coefficients.append(repn.quadratic_coefs[i]) + q_variables1.append(self._pyomo_var_to_ndx_map[x]) + q_variables2.append(self._pyomo_var_to_ndx_map[y]) referenced_vars.add(x) referenced_vars.add(y) - new_expr.offset = repn.constant - - return new_expr, referenced_vars + return ( + _CplexExpr( + variables=[self._pyomo_var_to_ndx_map[var] for var in repn.linear_vars], + coefficients=repn.linear_coefs, + offset=repn.constant, + q_variables1=q_variables1, + q_variables2=q_variables2, + q_coefficients=q_coefficients, + ), + referenced_vars, + ) def _get_expr_from_pyomo_expr(self, expr, max_degree=2): if max_degree == 2: @@ -232,7 +294,7 @@ def _get_expr_from_pyomo_expr(self, expr, max_degree=2): return cplex_expr, referenced_vars - def _add_var(self, var): + def _add_var(self, var, var_data=None): varname = self._symbol_map.getSymbol(var, self._labeler) vtype = self._cplex_vtype_from_var(var) if var.has_lb(): @@ -244,7 +306,16 @@ def _add_var(self, var): else: ub = self._cplex.infinity - self._solver_model.variables.add(lb=[lb], ub=[ub], types=[vtype], names=[varname]) + if var.is_fixed(): + lb = value(var) + ub = value(var) + + cplex_var_data = ( + _VariableData(self._solver_model) if var_data is None else var_data + ) + cplex_var_data.add(lb=lb, ub=ub, type_=vtype, name=varname) + if var_data is None: + cplex_var_data.store_in_cplex() self._pyomo_var_to_solver_var_map[var] = varname self._solver_var_to_pyomo_var_map[varname] = var @@ -252,10 +323,6 @@ def _add_var(self, var): self._ndx_count += 1 self._referenced_variables[var] = 0 - if var.is_fixed(): - self._solver_model.variables.set_lower_bounds(varname, var.value) - self._solver_model.variables.set_upper_bounds(varname, var.value) - def _set_instance(self, model, kwds={}): self._pyomo_var_to_ndx_map = ComponentMap() self._ndx_count = 0 @@ -287,7 +354,51 @@ def _set_instance(self, model, kwds={}): "by overwriting its bounds in the CPLEX instance." % (var.name, self._pyomo_model.name,)) - def _add_constraint(self, con): + def _add_block(self, block): + var_data = _VariableData(self._solver_model) + for var in block.component_data_objects( + ctype=pyomo.core.base.var.Var, descend_into=True, active=True, sort=True + ): + self._add_var(var, var_data) + var_data.store_in_cplex() + + lin_con_data = _LinearConstraintData(self._solver_model) + for sub_block in block.block_data_objects(descend_into=True, active=True): + for con in sub_block.component_data_objects( + ctype=pyomo.core.base.constraint.Constraint, + descend_into=False, + active=True, + sort=True, + ): + if not con.has_lb() and not con.has_ub(): + assert not con.equality + continue # non-binding, so skip + + self._add_constraint(con, lin_con_data) + + for con in sub_block.component_data_objects( + ctype=pyomo.core.base.sos.SOSConstraint, + descend_into=False, + active=True, + sort=True, + ): + self._add_sos_constraint(con) + + obj_counter = 0 + for obj in sub_block.component_data_objects( + ctype=pyomo.core.base.objective.Objective, + descend_into=False, + active=True, + ): + obj_counter += 1 + if obj_counter > 1: + raise ValueError( + "Solver interface does not support multiple objectives." + ) + self._set_objective(obj) + lin_con_data.store_in_cplex() + + def _add_constraint(self, con, lin_con_data=None): if not con.active: return None @@ -298,55 +409,57 @@ def _add_constraint(self, con): if con._linear_canonical_form: cplex_expr, referenced_vars = self._get_expr_from_pyomo_repn( - con.canonical_form(), - self._max_constraint_degree) + con.canonical_form(), self._max_constraint_degree + ) else: cplex_expr, referenced_vars = self._get_expr_from_pyomo_expr( - con.body, - self._max_constraint_degree) - - if con.has_lb(): - if not is_fixed(con.lower): - raise ValueError("Lower bound of constraint {0} " - "is not constant.".format(con)) - if con.has_ub(): - if not is_fixed(con.upper): - raise ValueError("Upper bound of constraint {0} " - "is not constant.".format(con)) + con.body, self._max_constraint_degree + ) + + if con.has_lb() and not is_fixed(con.lower): + raise ValueError( + "Lower bound of constraint {0} is not constant.".format(con) + ) + if con.has_ub() and not is_fixed(con.upper): + raise ValueError( + "Upper bound of constraint {0} is not constant.".format(con) + ) + + range_ = 0.0 if con.equality: - my_sense = 'E' - my_rhs = [value(con.lower) - cplex_expr.offset] - my_range = [] + sense = "E" + rhs = value(con.lower) - cplex_expr.offset elif con.has_lb() and con.has_ub(): - my_sense = 'R' + sense = "R" lb = value(con.lower) ub = value(con.upper) - my_rhs = [ub - cplex_expr.offset] - my_range = [lb - ub] + rhs = ub - cplex_expr.offset + range_ = lb - ub self._range_constraints.add(con) elif con.has_lb(): - my_sense = 'G' - my_rhs = [value(con.lower) - cplex_expr.offset] - my_range = [] + sense = "G" + rhs = value(con.lower) - cplex_expr.offset elif con.has_ub(): - my_sense = 'L' - my_rhs = [value(con.upper) - cplex_expr.offset] - my_range = [] + sense = "L" + rhs = value(con.upper) - cplex_expr.offset else: - raise ValueError("Constraint does not have a lower " - "or an upper bound: {0} \n".format(con)) + raise ValueError( + "Constraint does not have a lower " + "or an upper bound: {0} \n".format(con) + ) if len(cplex_expr.q_coefficients) == 0: - self._solver_model.linear_constraints.add( - lin_expr=[[cplex_expr.variables, - cplex_expr.coefficients]], - senses=my_sense, - rhs=my_rhs, - range_values=my_range, - names=[conname]) + cplex_lin_con_data = ( + _LinearConstraintData(self._solver_model) + if lin_con_data is None + else lin_con_data + ) + cplex_lin_con_data.add(cplex_expr, sense, rhs, range_, conname) + if lin_con_data is None: + cplex_lin_con_data.store_in_cplex() else: - if my_sense == 'R': + if sense == 'R': raise ValueError("The CPLEXDirect interface does not " "support quadratic range constraints: " "{0}".format(con)) @@ -356,8 +469,8 @@ def _add_constraint(self, con): quad_expr=[cplex_expr.q_variables1, cplex_expr.q_variables2, cplex_expr.q_coefficients], - sense=my_sense, - rhs=my_rhs[0], + sense=sense, + rhs=rhs, name=conname) for var in referenced_vars: @@ -425,9 +538,6 @@ def _set_objective(self, obj): self._vars_referenced_by_obj = ComponentSet() self._objective = None - self._solver_model.objective.set_linear([(i, 0.0) for i in range(len(self._pyomo_var_to_solver_var_map.values()))]) - self._solver_model.objective.set_quadratic([[[0], [0]] for i in self._pyomo_var_to_solver_var_map.keys()]) - if obj.active is False: raise ValueError('Cannot add inactive objective to solver.') @@ -448,12 +558,33 @@ def _set_objective(self, obj): self._solver_model.objective.set_sense(sense) if hasattr(self._solver_model.objective, 'set_offset'): self._solver_model.objective.set_offset(cplex_expr.offset) - if len(cplex_expr.coefficients) != 0: - self._solver_model.objective.set_linear(list(zip(cplex_expr.variables, cplex_expr.coefficients))) - if len(cplex_expr.q_coefficients) != 0: - self._solver_model.objective.set_quadratic_coefficients(list(zip(cplex_expr.q_variables1, - cplex_expr.q_variables2, - cplex_expr.q_coefficients))) + + linear_objective_already_exists = any(self._solver_model.objective.get_linear()) + quadratic_objective_already_exists = self._solver_model.objective.get_num_quadratic_nonzeros() + + contains_linear_terms = any(cplex_expr.coefficients) + contains_quadratic_terms = any(cplex_expr.q_coefficients) + num_cols = len(self._pyomo_var_to_solver_var_map) + + if linear_objective_already_exists or contains_linear_terms: + self._solver_model.objective.set_linear([(i, 0.0) for i in range(num_cols)]) + + if contains_linear_terms: + self._solver_model.objective.set_linear(list(zip(cplex_expr.variables, cplex_expr.coefficients))) + + if quadratic_objective_already_exists or contains_quadratic_terms: + self._solver_model.objective.set_quadratic([0.0] * num_cols) + + if contains_quadratic_terms: + self._solver_model.objective.set_quadratic_coefficients( + list( + zip( + cplex_expr.q_variables1, + cplex_expr.q_variables2, + cplex_expr.q_coefficients + ) + ) + ) self._objective = obj self._vars_referenced_by_obj = referenced_vars @@ -587,13 +718,13 @@ def _postsolve(self): soln_constraints = soln.constraint var_names = self._solver_model.variables.get_names() - var_names = list(set(var_names).intersection(set(self._pyomo_var_to_solver_var_map.values()))) - var_vals = self._solver_model.solution.get_values(var_names) - for i, name in enumerate(var_names): + assert set(var_names) == set(self._pyomo_var_to_solver_var_map.values()) + var_vals = self._solver_model.solution.get_values() + for name, val in zip(var_names, var_vals): pyomo_var = self._solver_var_to_pyomo_var_map[name] if self._referenced_variables[pyomo_var] > 0: pyomo_var.stale = False - soln_variables[name] = {"Value":var_vals[i]} + soln_variables[name] = {"Value": val} if extract_reduced_costs: reduced_costs = self._solver_model.solution.get_reduced_costs(var_names) @@ -681,18 +812,18 @@ def _warm_start(self): self._solver_model.MIP_starts.effort_level.auto) def _load_vars(self, vars_to_load=None): - var_map = self._pyomo_var_to_solver_var_map - ref_vars = self._referenced_variables + var_map = self._pyomo_var_to_ndx_map if vars_to_load is None: + vals = self._solver_model.solution.get_values() vars_to_load = var_map.keys() + else: + cplex_vars_to_load = [var_map[pyomo_var] for pyomo_var in vars_to_load] + vals = self._solver_model.solution.get_values(cplex_vars_to_load) - cplex_vars_to_load = [var_map[pyomo_var] for pyomo_var in vars_to_load] - vals = self._solver_model.solution.get_values(cplex_vars_to_load) - - for i, pyomo_var in enumerate(vars_to_load): - if ref_vars[pyomo_var] > 0: + for pyomo_var, val in zip(vars_to_load, vals): + if self._referenced_variables[pyomo_var] > 0: pyomo_var.stale = False - pyomo_var.value = vals[i] + pyomo_var.value = val def _load_rc(self, vars_to_load=None): if not hasattr(self._pyomo_model, 'rc'): diff --git a/pyomo/solvers/tests/checks/test_BARON.py b/pyomo/solvers/tests/checks/test_BARON.py index 302394970b6..e7b5e9f3cb5 100644 --- a/pyomo/solvers/tests/checks/test_BARON.py +++ b/pyomo/solvers/tests/checks/test_BARON.py @@ -1,7 +1,23 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + """Tests the BARON interface.""" + +from six import StringIO + import pyutilib.th as unittest -from pyomo.environ import (ConcreteModel, Constraint, Objective, Var, log10, - minimize) + +from pyomo.common.log import LoggingIntercept +from pyomo.environ import ( + ConcreteModel, Constraint, Objective, Var, log10, minimize, +) from pyomo.opt import SolverFactory, TerminationCondition # check if BARON is available @@ -57,6 +73,23 @@ def test_pow(self): self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal) + def test_BARON_option_warnings(self): + os = StringIO() + with LoggingIntercept(os, 'pyomo.solvers'): + m = ConcreteModel() + m.x = Var() + m.obj = Objective(expr=m.x**2) + + with SolverFactory("baron") as opt: + results = opt.solve(m, options={'ResName': 'results.lst', + 'TimName': 'results.tim'}) + + self.assertEqual(results.solver.termination_condition, + TerminationCondition.optimal) + self.assertIn('Ignoring user-specified option "ResName=results.lst"', + os.getvalue()) + self.assertIn('Ignoring user-specified option "TimName=results.tim"', + os.getvalue()) if __name__ == '__main__': unittest.main() diff --git a/pyomo/solvers/tests/checks/test_CPLEXDirect.py b/pyomo/solvers/tests/checks/test_CPLEXDirect.py index d49472806d0..f1954885483 100644 --- a/pyomo/solvers/tests/checks/test_CPLEXDirect.py +++ b/pyomo/solvers/tests/checks/test_CPLEXDirect.py @@ -8,10 +8,15 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import sys + import pyutilib.th as unittest -from pyomo.opt import * + from pyomo.environ import * -import sys +from pyomo.opt import * +from pyomo.solvers.plugins.solvers.cplex_direct import (_CplexExpr, + _LinearConstraintData, + _VariableData) try: import cplex @@ -143,5 +148,444 @@ def test_optimal_mip(self): self.assertEqual(results.solution.status, SolutionStatus.optimal) + +@unittest.skipIf(not unittest.mock_available, "'mock' is not available") +@unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") +class TestIsFixedCallCount(unittest.TestCase): + """ Tests for PR#1402 (669e7b2b) """ + + def setup(self, skip_trivial_constraints): + m = ConcreteModel() + m.x = Var() + m.y = Var() + m.c1 = Constraint(expr=m.x + m.y == 1) + m.c2 = Constraint(expr=m.x <= 1) + self.assertFalse(m.c2.has_lb()) + self.assertTrue(m.c2.has_ub()) + self._model = m + + self._opt = SolverFactory("cplex_persistent") + self._opt.set_instance( + self._model, skip_trivial_constraints=skip_trivial_constraints + ) + + def test_skip_trivial_and_call_count_for_fixed_con_is_one(self): + self.setup(skip_trivial_constraints=True) + self._model.x.fix(1) + self.assertTrue(self._opt._skip_trivial_constraints) + self.assertTrue(self._model.c2.body.is_fixed()) + + with unittest.mock.patch( + "pyomo.solvers.plugins.solvers.cplex_direct.is_fixed", wraps=is_fixed + ) as mock_is_fixed: + self.assertEqual(mock_is_fixed.call_count, 0) + self._opt.add_constraint(self._model.c2) + self.assertEqual(mock_is_fixed.call_count, 1) + + def test_skip_trivial_and_call_count_for_unfixed_con_is_two(self): + self.setup(skip_trivial_constraints=True) + self.assertTrue(self._opt._skip_trivial_constraints) + self.assertFalse(self._model.c2.body.is_fixed()) + + with unittest.mock.patch( + "pyomo.solvers.plugins.solvers.cplex_direct.is_fixed", wraps=is_fixed + ) as mock_is_fixed: + self.assertEqual(mock_is_fixed.call_count, 0) + self._opt.add_constraint(self._model.c2) + self.assertEqual(mock_is_fixed.call_count, 2) + + def test_skip_trivial_and_call_count_for_unfixed_equality_con_is_three(self): + self.setup(skip_trivial_constraints=True) + self._model.c2 = Constraint(expr=self._model.x == 1) + self.assertTrue(self._opt._skip_trivial_constraints) + self.assertFalse(self._model.c2.body.is_fixed()) + + with unittest.mock.patch( + "pyomo.solvers.plugins.solvers.cplex_direct.is_fixed", wraps=is_fixed + ) as mock_is_fixed: + self.assertEqual(mock_is_fixed.call_count, 0) + self._opt.add_constraint(self._model.c2) + self.assertEqual(mock_is_fixed.call_count, 3) + + def test_dont_skip_trivial_and_call_count_for_fixed_con_is_one(self): + self.setup(skip_trivial_constraints=False) + self._model.x.fix(1) + self.assertFalse(self._opt._skip_trivial_constraints) + self.assertTrue(self._model.c2.body.is_fixed()) + + with unittest.mock.patch( + "pyomo.solvers.plugins.solvers.cplex_direct.is_fixed", wraps=is_fixed + ) as mock_is_fixed: + self.assertEqual(mock_is_fixed.call_count, 0) + self._opt.add_constraint(self._model.c2) + self.assertEqual(mock_is_fixed.call_count, 1) + + def test_dont_skip_trivial_and_call_count_for_unfixed_con_is_one(self): + self.setup(skip_trivial_constraints=False) + self.assertFalse(self._opt._skip_trivial_constraints) + self.assertFalse(self._model.c2.body.is_fixed()) + + with unittest.mock.patch( + "pyomo.solvers.plugins.solvers.cplex_direct.is_fixed", wraps=is_fixed + ) as mock_is_fixed: + self.assertEqual(mock_is_fixed.call_count, 0) + self._opt.add_constraint(self._model.c2) + self.assertEqual(mock_is_fixed.call_count, 1) + + +@unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") +class TestDataContainers(unittest.TestCase): + def test_variable_data(self): + solver_model = cplex.Cplex() + var_data = _VariableData(solver_model) + var_data.add(lb=0, ub=1, type_=solver_model.variables.type.binary, name="var1") + var_data.add( + lb=0, ub=10, type_=solver_model.variables.type.integer, name="var2" + ) + var_data.add( + lb=-cplex.infinity, + ub=cplex.infinity, + type_=solver_model.variables.type.continuous, + name="var3", + ) + self.assertEqual(solver_model.variables.get_num(), 0) + var_data.store_in_cplex() + self.assertEqual(solver_model.variables.get_num(), 3) + + def test_constraint_data(self): + solver_model = cplex.Cplex() + + solver_model.variables.add( + lb=[-cplex.infinity, -cplex.infinity, -cplex.infinity], + ub=[cplex.infinity, cplex.infinity, cplex.infinity], + types=[ + solver_model.variables.type.continuous, + solver_model.variables.type.continuous, + solver_model.variables.type.continuous, + ], + names=["var1", "var2", "var3"], + ) + con_data = _LinearConstraintData(solver_model) + con_data.add( + cplex_expr=_CplexExpr(variables=[0, 1], coefficients=[10, 100]), + sense="L", + rhs=0, + range_values=0, + name="c1", + ) + con_data.add( + cplex_expr=_CplexExpr(variables=[0], coefficients=[-30]), + sense="G", + rhs=1, + range_values=0, + name="c2", + ) + con_data.add( + cplex_expr=_CplexExpr(variables=[1], coefficients=[80]), + sense="E", + rhs=2, + range_values=0, + name="c3", + ) + con_data.add( + cplex_expr=_CplexExpr(variables=[2], coefficients=[50]), + sense="R", + rhs=3, + range_values=10, + name="c4", + ) + + self.assertEqual(solver_model.linear_constraints.get_num(), 0) + con_data.store_in_cplex() + self.assertEqual(solver_model.linear_constraints.get_num(), 4) + + +@unittest.skipIf(not unittest.mock_available, "'mock' is not available") +@unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") +class TestAddVar(unittest.TestCase): + def test_add_single_variable(self): + """ Test that the variable is added correctly to `solver_model`. """ + model = ConcreteModel() + + opt = SolverFactory("cplex", solver_io="python") + opt._set_instance(model) + + self.assertEqual(opt._solver_model.variables.get_num(), 0) + self.assertEqual(opt._solver_model.variables.get_num_binary(), 0) + + model.X = Var(within=Binary) + + var_interface = opt._solver_model.variables + with unittest.mock.patch.object( + var_interface, "add", wraps=var_interface.add + ) as wrapped_add_call, unittest.mock.patch.object( + var_interface, "set_lower_bounds", wraps=var_interface.set_lower_bounds + ) as wrapped_lb_call, unittest.mock.patch.object( + var_interface, "set_upper_bounds", wraps=var_interface.set_upper_bounds + ) as wrapped_ub_call: + opt._add_var(model.X) + + self.assertEqual(wrapped_add_call.call_count, 1) + self.assertEqual( + wrapped_add_call.call_args, + ({"lb": [0], "names": ["x1"], "types": ["B"], "ub": [1]},), + ) + + self.assertFalse(wrapped_lb_call.called) + self.assertFalse(wrapped_ub_call.called) + + self.assertEqual(opt._solver_model.variables.get_num(), 1) + self.assertEqual(opt._solver_model.variables.get_num_binary(), 1) + + def test_add_block_containing_single_variable(self): + """ Test that the variable is added correctly to `solver_model`. """ + model = ConcreteModel() + + opt = SolverFactory("cplex", solver_io="python") + opt._set_instance(model) + + self.assertEqual(opt._solver_model.variables.get_num(), 0) + self.assertEqual(opt._solver_model.variables.get_num_binary(), 0) + + model.X = Var(within=Binary) + + with unittest.mock.patch.object( + opt._solver_model.variables, "add", wraps=opt._solver_model.variables.add + ) as wrapped_add_call: + opt._add_block(model) + + self.assertEqual(wrapped_add_call.call_count, 1) + self.assertEqual( + wrapped_add_call.call_args, + ({"lb": [0], "names": ["x1"], "types": ["B"], "ub": [1]},), + ) + + self.assertEqual(opt._solver_model.variables.get_num(), 1) + self.assertEqual(opt._solver_model.variables.get_num_binary(), 1) + + def test_add_block_containing_multiple_variables(self): + """ Test that: + - The variable is added correctly to `solver_model` + - The CPLEX `variables` interface is called only once + - Fixed variable bounds are set correctly + """ + model = ConcreteModel() + + opt = SolverFactory("cplex", solver_io="python") + opt._set_instance(model) + + self.assertEqual(opt._solver_model.variables.get_num(), 0) + + model.X1 = Var(within=Binary) + model.X2 = Var(within=NonNegativeReals) + model.X3 = Var(within=NonNegativeIntegers) + + model.X3.fix(5) + + with unittest.mock.patch.object( + opt._solver_model.variables, "add", wraps=opt._solver_model.variables.add + ) as wrapped_add_call: + opt._add_block(model) + + self.assertEqual(wrapped_add_call.call_count, 1) + self.assertEqual( + wrapped_add_call.call_args, + ( + { + "lb": [0, 0, 5], + "names": ["x1", "x2", "x3"], + "types": ["B", "C", "I"], + "ub": [1, cplex.infinity, 5], + }, + ), + ) + + self.assertEqual(opt._solver_model.variables.get_num(), 3) + + +@unittest.skipIf(not unittest.mock_available, "'mock' is not available") +@unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") +class TestAddCon(unittest.TestCase): + def test_add_single_constraint(self): + model = ConcreteModel() + model.X = Var(within=Binary) + + opt = SolverFactory("cplex", solver_io="python") + opt._set_instance(model) + + self.assertEqual(opt._solver_model.linear_constraints.get_num(), 0) + + model.C = Constraint(expr=model.X == 1) + + con_interface = opt._solver_model.linear_constraints + with unittest.mock.patch.object( + con_interface, "add", wraps=con_interface.add + ) as wrapped_add_call: + opt._add_constraint(model.C) + + self.assertEqual(wrapped_add_call.call_count, 1) + self.assertEqual( + wrapped_add_call.call_args, + ( + { + "lin_expr": [[[0], (1,)]], + "names": ["x2"], + "range_values": [0.0], + "rhs": [1.0], + "senses": ["E"], + }, + ), + ) + + self.assertEqual(opt._solver_model.linear_constraints.get_num(), 1) + + def test_add_block_containing_single_constraint(self): + model = ConcreteModel() + model.X = Var(within=Binary) + + opt = SolverFactory("cplex", solver_io="python") + opt._set_instance(model) + + self.assertEqual(opt._solver_model.linear_constraints.get_num(), 0) + + model.B = Block() + model.B.C = Constraint(expr=model.X == 1) + + con_interface = opt._solver_model.linear_constraints + with unittest.mock.patch.object( + con_interface, "add", wraps=con_interface.add + ) as wrapped_add_call: + opt._add_block(model.B) + + self.assertEqual(wrapped_add_call.call_count, 1) + self.assertEqual( + wrapped_add_call.call_args, + ( + { + "lin_expr": [[[0], (1,)]], + "names": ["x2"], + "range_values": [0.0], + "rhs": [1.0], + "senses": ["E"], + }, + ), + ) + + self.assertEqual(opt._solver_model.linear_constraints.get_num(), 1) + + def test_add_block_containing_multiple_constraints(self): + model = ConcreteModel() + model.X = Var(within=Binary) + + opt = SolverFactory("cplex", solver_io="python") + opt._set_instance(model) + + self.assertEqual(opt._solver_model.linear_constraints.get_num(), 0) + + model.B = Block() + model.B.C1 = Constraint(expr=model.X == 1) + model.B.C2 = Constraint(expr=model.X <= 1) + model.B.C3 = Constraint(expr=model.X >= 1) + + con_interface = opt._solver_model.linear_constraints + with unittest.mock.patch.object( + con_interface, "add", wraps=con_interface.add + ) as wrapped_add_call: + opt._add_block(model.B) + + self.assertEqual(wrapped_add_call.call_count, 1) + self.assertEqual( + wrapped_add_call.call_args, + ( + { + "lin_expr": [[[0], (1,)], [[0], (1,)], [[0], (1,)]], + "names": ["x2", "x3", "x4"], + "range_values": [0.0, 0.0, 0.0], + "rhs": [1.0, 1.0, 1.0], + "senses": ["E", "L", "G"], + }, + ), + ) + + self.assertEqual(opt._solver_model.linear_constraints.get_num(), 3) + + +@unittest.skipIf(not unittest.mock_available, "'mock' is not available") +@unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") +class TestLoadVars(unittest.TestCase): + def setUp(self): + opt = SolverFactory("cplex", solver_io="python") + model = ConcreteModel() + model.X = Var(within=NonNegativeReals, initialize=0) + model.Y = Var(within=NonNegativeReals, initialize=0) + + model.C1 = Constraint(expr=2 * model.X + model.Y >= 8) + model.C2 = Constraint(expr=model.X + 3 * model.Y >= 6) + + model.O = Objective(expr=model.X + model.Y) + + opt.solve(model, load_solutions=False, save_results=False) + + self._model = model + self._opt = opt + + def test_all_vars_are_loaded(self): + self.assertTrue(self._model.X.stale) + self.assertTrue(self._model.Y.stale) + self.assertEqual(value(self._model.X), 0) + self.assertEqual(value(self._model.Y), 0) + + with unittest.mock.patch.object( + self._opt._solver_model.solution, + "get_values", + wraps=self._opt._solver_model.solution.get_values, + ) as wrapped_values_call: + self._opt.load_vars() + + self.assertEqual(wrapped_values_call.call_count, 1) + self.assertEqual(wrapped_values_call.call_args, tuple()) + + self.assertFalse(self._model.X.stale) + self.assertFalse(self._model.Y.stale) + self.assertAlmostEqual(value(self._model.X), 3.6) + self.assertAlmostEqual(value(self._model.Y), 0.8) + + def test_only_specified_vars_are_loaded(self): + self.assertTrue(self._model.X.stale) + self.assertTrue(self._model.Y.stale) + self.assertEqual(value(self._model.X), 0) + self.assertEqual(value(self._model.Y), 0) + + with unittest.mock.patch.object( + self._opt._solver_model.solution, + "get_values", + wraps=self._opt._solver_model.solution.get_values, + ) as wrapped_values_call: + self._opt.load_vars([self._model.X]) + + self.assertEqual(wrapped_values_call.call_count, 1) + self.assertEqual(wrapped_values_call.call_args, (([0],), {})) + + self.assertFalse(self._model.X.stale) + self.assertTrue(self._model.Y.stale) + self.assertAlmostEqual(value(self._model.X), 3.6) + self.assertEqual(value(self._model.Y), 0) + + with unittest.mock.patch.object( + self._opt._solver_model.solution, + "get_values", + wraps=self._opt._solver_model.solution.get_values, + ) as wrapped_values_call: + self._opt.load_vars([self._model.Y]) + + self.assertEqual(wrapped_values_call.call_count, 1) + self.assertEqual(wrapped_values_call.call_args, (([1],), {})) + + self.assertFalse(self._model.X.stale) + self.assertFalse(self._model.Y.stale) + self.assertAlmostEqual(value(self._model.X), 3.6) + self.assertAlmostEqual(value(self._model.Y), 0.8) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/solvers/tests/checks/test_CPLEXPersistent.py b/pyomo/solvers/tests/checks/test_CPLEXPersistent.py new file mode 100644 index 00000000000..51ff1ab7e43 --- /dev/null +++ b/pyomo/solvers/tests/checks/test_CPLEXPersistent.py @@ -0,0 +1,35 @@ +import pyutilib.th as unittest + +from pyomo.environ import * +from pyomo.opt import * + +try: + import cplex + + cplexpy_available = True +except ImportError: + cplexpy_available = False + + +@unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") +class TestQuadraticObjective(unittest.TestCase): + def test_quadratic_objective_is_set(self): + model = ConcreteModel() + model.X = Var(bounds=(-2, 2)) + model.Y = Var(bounds=(-2, 2)) + model.O = Objective(expr=model.X ** 2 + model.Y ** 2) + model.C1 = Constraint(expr=model.Y >= 2 * model.X - 1) + model.C2 = Constraint(expr=model.Y >= -model.X + 2) + opt = SolverFactory("cplex_persistent") + opt.set_instance(model) + opt.solve() + + self.assertAlmostEqual(model.X.value, 1, places=3) + self.assertAlmostEqual(model.Y.value, 1, places=3) + + del model.O + model.O = Objective(expr=model.X ** 2) + opt.set_objective(model.O) + opt.solve() + self.assertAlmostEqual(model.X.value, 0, places=3) + self.assertAlmostEqual(model.Y.value, 2, places=3) diff --git a/pyomo/solvers/tests/checks/test_GAMS.py b/pyomo/solvers/tests/checks/test_GAMS.py index 6cb612edd22..f7fe0655e62 100644 --- a/pyomo/solvers/tests/checks/test_GAMS.py +++ b/pyomo/solvers/tests/checks/test_GAMS.py @@ -10,7 +10,9 @@ from pyomo.environ import * -from pyomo.solvers.plugins.solvers.GAMS import GAMSShell, GAMSDirect +from pyomo.solvers.plugins.solvers.GAMS import ( + GAMSShell, GAMSDirect, gdxcc_available +) import pyutilib.th as unittest from pyutilib.misc import capture_output import os, shutil @@ -101,9 +103,9 @@ def test_file_removal_gms(self): self.assertFalse(os.path.exists(os.path.join(tmpdir, 'output.lst'))) self.assertFalse(os.path.exists(os.path.join(tmpdir, - 'results.dat'))) + 'GAMS_MODEL_p.gdx'))) self.assertFalse(os.path.exists(os.path.join(tmpdir, - 'resultsstat.dat'))) + 'GAMS_MODEL_s.gdx'))) os.rmdir(tmpdir) @@ -156,10 +158,16 @@ def test_keepfiles_gms(self): 'model.gms'))) self.assertTrue(os.path.exists(os.path.join(tmpdir, 'output.lst'))) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - 'results.dat'))) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - 'resultsstat.dat'))) + if gdxcc_available: + self.assertTrue(os.path.exists(os.path.join( + tmpdir, 'GAMS_MODEL_p.gdx'))) + self.assertTrue(os.path.exists(os.path.join( + tmpdir, 'results_s.gdx'))) + else: + self.assertTrue(os.path.exists(os.path.join( + tmpdir, 'results.dat'))) + self.assertTrue(os.path.exists(os.path.join( + tmpdir, 'resultsstat.dat'))) shutil.rmtree(tmpdir) diff --git a/pyomo/solvers/tests/checks/test_no_solution_behavior.py b/pyomo/solvers/tests/checks/test_no_solution_behavior.py index 43a6d8eb256..9a84696a06f 100644 --- a/pyomo/solvers/tests/checks/test_no_solution_behavior.py +++ b/pyomo/solvers/tests/checks/test_no_solution_behavior.py @@ -78,9 +78,9 @@ def failed_solve_test(self): # Skip this test if the status is 'skip' if test_case.status == 'skip': - def skipping_this(self): + def skipping_test(self): return self.skipTest(test_case.msg) - return skipping_this + return skipping_test if is_expected_failure: @unittest.expectedFailure @@ -108,6 +108,7 @@ def failing_failed_solve_test(self): cls = new.classobj(name, (unittest.TestCase,), {}) else: cls = types.new_class(name, (unittest.TestCase,)) + cls.__module__ = __name__ cls = unittest.category(*case.level)(cls) driver[model] = cls globals()[name] = cls @@ -126,7 +127,10 @@ def failing_failed_solve_test(self): test_name = "test_"+solver+"_"+io test_method = create_test_method(model, solver, io, value) if test_method is not None: + test_method = unittest.category('smoke','nightly',solver)( + test_method) setattr(cls, test_name, test_method) + test_method = None # Reset the cls variable, since it contains a unittest.TestCase subclass. # This prevents this class from being processed twice! diff --git a/pyomo/solvers/tests/checks/test_pickle.py b/pyomo/solvers/tests/checks/test_pickle.py index cb7345fdc11..c9ec50867bb 100644 --- a/pyomo/solvers/tests/checks/test_pickle.py +++ b/pyomo/solvers/tests/checks/test_pickle.py @@ -104,9 +104,19 @@ def pickle_test(self): # Skip this test if the status is 'skip' if test_case.status == 'skip': - def skipping_this(self): + def skipping_test(self): return self.skipTest(test_case.msg) - return skipping_this + return skipping_test + + # If this solver is in demo mode + size = getattr(test_case.model, 'size', (None, None, None)) + for prb, sol in zip(size, test_case.demo_limits): + if prb is None or sol is None: + continue + if prb > sol: + def skipping_test(self): + self.skipTest("Problem is too large for unlicensed %s solver" % solver) + return skipping_test if is_expected_failure: @unittest.expectedFailure @@ -133,6 +143,7 @@ def failing_pickle_test(self): cls = new.classobj(name, (unittest.TestCase,), {}) else: cls = types.new_class(name, (unittest.TestCase,)) + cls.__module__ = __name__ cls = unittest.category(*case.level)(cls) driver[model] = cls globals()[name] = cls @@ -146,12 +157,17 @@ def failing_pickle_test(self): test_name = "test_"+solver+"_"+io +"_symbolic_labels" test_method = create_test_method(model, solver, io, value, True) if test_method is not None: + test_method = unittest.category('smoke','nightly',solver)(test_method) setattr(cls, test_name, test_method) + test_method = None + # Non-symbolic labels test_name = "test_"+solver+"_"+io +"_nonsymbolic_labels" test_method = create_test_method(model, solver, io, value, False) if test_method is not None: + test_method = unittest.category('smoke','nightly',solver)(test_method) setattr(cls, test_name, test_method) + test_method = None # Reset the cls variable, since it contains a unittest.TestCase subclass. # This prevents this class from being processed twice! diff --git a/pyomo/solvers/tests/checks/test_writers.py b/pyomo/solvers/tests/checks/test_writers.py index 5ebc1c13642..f5dc27c6ea3 100644 --- a/pyomo/solvers/tests/checks/test_writers.py +++ b/pyomo/solvers/tests/checks/test_writers.py @@ -84,7 +84,8 @@ def writer_test(self): else: model_class.model.solutions.load_from(results, default_variable_value=opt.default_variable_value()) model_class.save_current_solution(save_filename, suffixes=model_class.test_suffixes) - rc = model_class.validate_current_solution(suffixes=model_class.test_suffixes) + rc = model_class.validate_current_solution(suffixes=model_class.test_suffixes, + exclude_suffixes=test_case.exclude_suffixes) if is_expected_failure: if rc[0]: @@ -119,9 +120,19 @@ def writer_test(self): # Skip this test if the status is 'skip' if test_case.status == 'skip': - def skipping_this(self): - return self.skipTest(test_case.msg) - return skipping_this + def skipping_test(self): + self.skipTest(test_case.msg) + return skipping_test + + # If this solver is in demo mode + size = getattr(test_case.model, 'size', (None, None, None)) + for prb, sol in zip(size, test_case.demo_limits): + if prb is None or sol is None: + continue + if prb > sol: + def skipping_test(self): + self.skipTest("Problem is too large for unlicensed %s solver" % solver) + return skipping_test if is_expected_failure: @unittest.expectedFailure @@ -149,6 +160,7 @@ def failing_writer_test(self): cls = new.classobj(name, (unittest.TestCase,), {}) else: cls = types.new_class(name, (unittest.TestCase,)) + cls.__module__ = __name__ cls = unittest.category(*case.level)(cls) driver[model] = cls globals()[name] = cls @@ -164,13 +176,17 @@ def failing_writer_test(self): test_name = "test_"+solver+"_"+io +"_symbolic_labels" test_method = create_test_method(model, solver, io, value, True) if test_method is not None: + test_method = unittest.category('smoke','nightly',solver)(test_method) setattr(cls, test_name, test_method) + test_method = None # Non-symbolic labels test_name = "test_"+solver+"_"+io +"_nonsymbolic_labels" test_method = create_test_method(model, solver, io, value, False) if test_method is not None: + test_method = unittest.category('smoke','nightly',solver)(test_method) setattr(cls, test_name, test_method) + test_method = None # Reset the cls variable, since it contains a unittest.TestCase subclass. # This prevents this class from being processed twice! diff --git a/pyomo/solvers/tests/models/LP_compiled.py b/pyomo/solvers/tests/models/LP_compiled.py index 1ee7ab63516..88d63666c62 100644 --- a/pyomo/solvers/tests/models/LP_compiled.py +++ b/pyomo/solvers/tests/models/LP_compiled.py @@ -38,6 +38,7 @@ class LP_compiled(_BaseTestModel): description = "LP_compiled" capabilities = set(['linear']) test_pickling = False + size = (13, 22, None) def __init__(self): _BaseTestModel.__init__(self) diff --git a/pyomo/solvers/tests/models/LP_duals_maximize.py b/pyomo/solvers/tests/models/LP_duals_maximize.py index 95267088a34..496cc9815c8 100644 --- a/pyomo/solvers/tests/models/LP_duals_maximize.py +++ b/pyomo/solvers/tests/models/LP_duals_maximize.py @@ -23,6 +23,7 @@ class LP_duals_maximize(_BaseTestModel): description = "LP_duals_maximize" level = ('nightly', 'expensive') capabilities = set(['linear']) + size = (13, 22, None) def __init__(self): _BaseTestModel.__init__(self) diff --git a/pyomo/solvers/tests/models/LP_duals_minimize.py b/pyomo/solvers/tests/models/LP_duals_minimize.py index 719ab654e9d..9a6a8d22d09 100644 --- a/pyomo/solvers/tests/models/LP_duals_minimize.py +++ b/pyomo/solvers/tests/models/LP_duals_minimize.py @@ -23,6 +23,7 @@ class LP_duals_minimize(_BaseTestModel): description = "LP_duals_minimize" level = ('nightly', 'expensive') capabilities = set(['linear']) + size = (12, 12, None) def __init__(self): _BaseTestModel.__init__(self) diff --git a/pyomo/solvers/tests/models/base.py b/pyomo/solvers/tests/models/base.py index 5e644c76058..b7e25ff0692 100644 --- a/pyomo/solvers/tests/models/base.py +++ b/pyomo/solvers/tests/models/base.py @@ -191,6 +191,7 @@ def validate_current_solution(self, **kwds): model = self.model suffixes = dict((suffix, getattr(model,suffix)) for suffix in kwds.pop('suffixes',[])) + exclude = kwds.pop('exclude_suffixes',set()) for suf in suffixes.values(): if isinstance(self.model, IBlock): assert isinstance(suf,pmo.suffix) @@ -226,8 +227,12 @@ def validate_current_solution(self, **kwds): solution[var.name]['stale'], var.stale)) for suffix_name, suffix in suffixes.items(): + _ex = exclude.get(suffix_name, None) if suffix_name in solution[var.name]: if suffix.get(var) is None: + if _ex is not None and ( + not _ex[1] or var.name in _ex[1] ): + continue if not(solution[var.name][suffix_name] in \ solution["suffix defaults"][suffix_name]): return (False, @@ -236,6 +241,12 @@ def validate_current_solution(self, **kwds): suffix, solution[var.name][suffix_name], "none defined")) + elif _ex is not None and _ex[0] and ( + not _ex[1] or var.name in _ex[1] ): + return ( + False, + "Expected solution to be missing suffix %s" + % suffix_name) elif not abs(solution[var.name][suffix_name] - \ suffix.get(var)) < self.diff_tol: return (False, @@ -257,8 +268,12 @@ def validate_current_solution(self, **kwds): con_value_sol, con_value)) for suffix_name, suffix in suffixes.items(): + _ex = exclude.get(suffix_name, None) if suffix_name in solution[con.name]: if suffix.get(con) is None: + if _ex is not None and ( + not _ex[1] or con.name in _ex[1] ): + continue if not (solution[con.name][suffix_name] in \ solution["suffix defaults"][suffix_name]): return (False, @@ -267,6 +282,12 @@ def validate_current_solution(self, **kwds): suffix, solution[con.name][suffix_name], "none defined")) + elif _ex is not None and _ex[0] and ( + not _ex[1] or con.name in _ex[1] ): + return ( + False, + "Expected solution to be missing suffix %s" + % suffix_name) elif not abs(solution[con.name][suffix_name] - \ suffix.get(con)) < self.diff_tol: return (False, @@ -288,8 +309,12 @@ def validate_current_solution(self, **kwds): obj_value_sol, obj_value)) for suffix_name, suffix in suffixes.items(): + _ex = exclude.get(suffix_name, None) if suffix_name in solution[obj.name]: if suffix.get(obj) is None: + if _ex is not None and ( + not _ex[1] or obj.name in _ex[1] ): + continue if not(solution[obj.name][suffix_name] in \ solution["suffix defaults"][suffix_name]): return (False, @@ -298,6 +323,12 @@ def validate_current_solution(self, **kwds): suffix, solution[obj.name][suffix_name], "none defined")) + elif _ex is not None and _ex[0] and ( + not _ex[1] or obj.name in _ex[1] ): + return ( + False, + "Expected solution to be missing suffix %s" + % suffix_name) elif not abs(solution[obj.name][suffix_name] - \ suffix.get(obj)) < self.diff_tol: return (False, @@ -313,9 +344,13 @@ def validate_current_solution(self, **kwds): first=False continue for suffix_name, suffix in suffixes.items(): + _ex = exclude.get(suffix_name, None) if (solution[block.name] is not None) and \ (suffix_name in solution[block.name]): if suffix.get(block) is None: + if _ex is not None and ( + not _ex[1] or block.name in _ex[1] ): + continue if not(solution[block.name][suffix_name] in \ solution["suffix defaults"][suffix_name]): return (False, @@ -324,6 +359,12 @@ def validate_current_solution(self, **kwds): suffix, solution[block.name][suffix_name], "none defined")) + elif _ex is not None and _ex[0] and ( + not _ex[1] or block.name in _ex[1] ): + return ( + False, + "Expected solution to be missing suffix %s" + % suffix_name) elif not abs(solution[block.name][suffix_name] - \ suffix.get(block)) < sefl.diff_tol: return (False, diff --git a/pyomo/solvers/tests/solvers.py b/pyomo/solvers/tests/solvers.py index 9a9c6a04463..0f8abae6137 100644 --- a/pyomo/solvers/tests/solvers.py +++ b/pyomo/solvers/tests/solvers.py @@ -37,6 +37,14 @@ def initialize(**kwds): obj = Options(**kwds) # + # Set the limits for the solver's "demo" (unlicensed) mode: + # ( nVars, nCons, nNonZeros ) + obj.demo_limits = (None, None, None) + if (obj.name == "baron") and \ + (not BARONSHELL.license_is_valid()): + obj.demo_limits = (10, 10, 50) + # + # # Set obj.available # opt = None @@ -49,9 +57,6 @@ def initialize(**kwds): elif (obj.name == "gurobi") and \ (not GUROBISHELL.license_is_valid()): obj.available = False - elif (obj.name == "baron") and \ - (not BARONSHELL.license_is_valid()): - obj.available = False elif (obj.name == "mosek") and \ (not MosekDirect.license_is_valid()): obj.available = False diff --git a/pyomo/solvers/tests/testcases.py b/pyomo/solvers/tests/testcases.py index f5eec554984..21c38bb2194 100644 --- a/pyomo/solvers/tests/testcases.py +++ b/pyomo/solvers/tests/testcases.py @@ -24,11 +24,24 @@ _trunk_version = (float('inf'), float('inf'), float('inf'), float('inf')) # These are usually due to a bug in the latest version of the -# thirdparty solver Tests will be expected to fail. If they do not, +# thirdparty solver. Tests will be expected to fail. If they do not, # that means the solver has been fixed and that particular case should # no longer exist in the list of expected failures ExpectedFailures = {} +# These are usually due to a bug in the latest version of the thirdparty +# solver. The solver is expected to run successfully, but will not +# return suffix information. If they return suffix information, that +# means the solver has been fixed and that particular case should no +# longer exist in the list of expected failures. This dict has (solver, +# io, test) tuples as keys and values that are either a dict mapping +# suffix to "(bool(enforce), set(object_names))" or a list of suffix +# names (in which case enforcing is set to True and the set is empty, +# indicating ALL objects). If enforcing is True the test will fail if +# the missing suffix was found. Set enforcing to false for tests where +# the solver is inconsistent in returning duals. +MissingSuffixFailures = {} + # # MOSEK # @@ -48,24 +61,29 @@ # CPLEX # -ExpectedFailures['cplex', 'lp', 'QCP_simple'] = \ - (lambda v: v <= _trunk_version, +MissingSuffixFailures['cplex', 'lp', 'QCP_simple'] = ( + lambda v: v <= _trunk_version, + {'dual': (True, {'qc0','qc1'})}, "Cplex does not report duals of quadratic constraints.") -ExpectedFailures['cplex', 'mps', 'QCP_simple'] =\ - (lambda v: v <= _trunk_version, +MissingSuffixFailures['cplex', 'mps', 'QCP_simple'] = ( + lambda v: v <= _trunk_version, + {'dual': (True, {'qc0','qc1'})}, "Cplex does not report duals of quadratic constraints.") -ExpectedFailures['cplex', 'python', 'QCP_simple'] =\ - (lambda v: v <= _trunk_version, +MissingSuffixFailures['cplex', 'python', 'QCP_simple'] = ( + lambda v: v <= _trunk_version, + {'dual': (True, {'qc0','qc1'})}, "Cplex does not report duals of quadratic constraints.") -ExpectedFailures['cplex_persistent', 'python', 'QCP_simple'] =\ - (lambda v: v <= _trunk_version, +MissingSuffixFailures['cplex_persistent', 'python', 'QCP_simple'] = ( + lambda v: v <= _trunk_version, + {'dual': (True, {'qc0','qc1'})}, "Cplex does not report duals of quadratic constraints.") -ExpectedFailures['cplex', 'nl', 'QCP_simple'] = \ - (lambda v: v <= (12,5,9,9), +MissingSuffixFailures['cplex', 'nl', 'QCP_simple'] = ( + lambda v: v <= (12,5,9,9), + {'dual': (True, {'qc0','qc1'})}, "Cplex does not report duals of quadratic constraints.") # @@ -252,25 +270,78 @@ # BARON # -ExpectedFailures['baron', 'bar', 'LP_piecewise'] = \ - (lambda v: v <= (15,0,0,0), +# Known to fail through 18.11.15, but was resolved by 19.12.7 +ExpectedFailures['baron', 'bar', 'MILP_unbounded'] = ( + lambda v: v <= (18,11,15), + ['dual'], + "Baron fails to report a MILP model as unbounded") + +# Known to work through 18.11.15, and fail in 19.12.7 +MissingSuffixFailures['baron', 'bar', 'LP_piecewise'] = ( + lambda v: v <= (15,0,0,0) or v > (18,11,15), + ['dual'], + "Baron will not return dual solution when a solution is " + "found during preprocessing.") + +# Marking this test suffixes as fragile: Baron 20.4.14 will +# intermittently return suffixes. +MissingSuffixFailures['baron', 'bar', 'QP_simple'] = ( + lambda v: v <= (15,2,0,0) or v > (18,11,15), + {'dual': (False, {}), 'rc': (False, {})}, + "Baron will intermittently return dual solution when " + "a solution is found during preprocessing.") + +# Known to fail through 17.4.1, but was resolved by 18.5.9 +MissingSuffixFailures['baron', 'bar', 'QCP_simple'] = ( + lambda v: v <= (17,4,1) or v > (18,11,15), + ['dual','rc'], "Baron will not return dual solution when a solution is " "found during preprocessing.") -ExpectedFailures['baron', 'bar', 'QP_simple'] = \ - (lambda v: v <= (15,2,0,0), +# Known to work through 18.11.15, and fail in 19.12.7 +MissingSuffixFailures['baron', 'bar', 'LP_block'] = ( + lambda v: v > (18,11,15), + ['dual'], "Baron will not return dual solution when a solution is " "found during preprocessing.") -# Known to fail through 17.4.1, but was resolved by 18.5.9 -ExpectedFailures['baron', 'bar', 'QCP_simple'] = \ - (lambda v: v <= (17,4,1), +# Known to work through 18.11.15, and fail in 19.12.7 +MissingSuffixFailures['baron', 'bar', 'LP_inactive_index'] = ( + lambda v: v > (18,11,15), + ['dual'], + "Baron will not return dual solution when a solution is " + "found during preprocessing.") + +# Known to work through 18.11.15, and fail in 19.12.7 +MissingSuffixFailures['baron', 'bar', 'LP_simple'] = ( + lambda v: v > (18,11,15), + ['dual'], + "Baron will not return dual solution when a solution is " + "found during preprocessing.") + +# Known to work through 18.11.15, and fail in 19.12.7 +MissingSuffixFailures['baron', 'bar', 'LP_trivial_constraints'] = ( + lambda v: v > (18,11,15), + ['dual'], + "Baron will not return dual solution when a solution is " + "found during preprocessing.") + +# Known to work through 19.12.7, and fail in 20.4.14 +MissingSuffixFailures['baron', 'bar', 'LP_duals_minimize'] = ( + lambda v: v > (19,12,7), + ['dual','rc'], "Baron will not return dual solution when a solution is " "found during preprocessing.") -ExpectedFailures['baron', 'bar', 'MILP_unbounded'] = \ - (lambda v: v < _trunk_version, - "Baron fails to report a MILP model as unbounded") +# Known to work through 19.12.7, and fail in 20.4.14 +MissingSuffixFailures['baron', 'bar', 'LP_duals_maximize'] = ( + lambda v: v > (19,12,7), + ['dual','rc'], + "Baron will not return dual solution when a solution is " + "found during preprocessing.") + + + # # KNITROAMPL @@ -297,6 +368,7 @@ def test_scenarios(arg=None): continue # Set status values for expected failures + exclude_suffixes = {} status='ok' msg="" if not _solver_case.available: @@ -308,9 +380,22 @@ def test_scenarios(arg=None): case[0](_solver_case.version): status='expected failure' msg=case[1] + if (solver,io,_model.description) in MissingSuffixFailures: + case = MissingSuffixFailures[solver,io,_model.description] + if _solver_case.version is not None and\ + case[0](_solver_case.version): + if type(case[1]) is dict: + exclude_suffixes.update(case[1]) + else: + for x in case[1]: + exclude_suffixes[x] = (True, {}) + msg=case[2] # Return scenario dimensions and scenario information - yield (model, solver, io), Options(status=status, msg=msg, model=_model, solver=None, testcase=_solver_case) + yield (model, solver, io), Options( + status=status, msg=msg, model=_model, solver=None, + testcase=_solver_case, demo_limits=_solver_case.demo_limits, + exclude_suffixes=exclude_suffixes) @unittest.nottest diff --git a/pyomo/util/check_units.py b/pyomo/util/check_units.py new file mode 100644 index 00000000000..96f206873f7 --- /dev/null +++ b/pyomo/util/check_units.py @@ -0,0 +1,171 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# __________________________________________________________________________ +# +# +""" Pyomo Units Checking Module +This module has some helpful methods to support checking units on Pyomo +module objects. +""" +from pyomo.core.base.units_container import units, UnitsError, UnitExtractionVisitor +from pyomo.core.base import (Objective, Constraint, Var, Param, + Suffix, Set, RangeSet, Block, + ExternalFunction, Expression) +from pyomo.gdp import Disjunct, Disjunction + +from pyomo.gdp import Disjunct +from pyomo.gdp import Disjunction +from pyomo.core.expr.template_expr import IndexTemplate +from pyomo.core.expr.numvalue import native_types + +def check_units_equivalent(*args): + """ + Returns True if the units associated with each of the + expressions passed as arguments are all equivalent (and False + otherwise). + + Note that this method will raise an exception if the units are + inconsistent within an expression (since the units for that + expression are not valid). + + Parameters + ---------- + args : an argument list of Pyomo expressions + + Returns + ------- + bool : True if all the expressions passed as argments have the same units + """ + try: + assert_units_equivalent(*args) + return True + except UnitsError: + return False + +def assert_units_equivalent(*args): + """ + Raise an exception if the units are inconsistent within an + expression, or not equivalent across all the passed + expressions. + + Parameters + ---------- + args : an argument list of Pyomo expressions + The Pyomo expressions to test + + Raises + ------ + :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError` + """ + # this call will raise an exception if an inconsistency is found + pyomo_unit_compare, pint_unit_compare = units._get_units_tuple(args[0]) + for expr in args[1:]: + # this call will raise an exception if an inconsistency is found + pyomo_unit, pint_unit = units._get_units_tuple(expr) + if not UnitExtractionVisitor(units)._pint_units_equivalent(pint_unit_compare, pint_unit): + raise UnitsError \ + ("Units between {} and {} are not consistent.".format(str(pyomo_unit_compare), str(pyomo_unit))) + +def _assert_units_consistent_constraint_data(condata): + """ + Raise an exception if the any units in lower, body, upper on a + ConstraintData object are not consistent or are not equivalent + with each other. + """ + if condata.equality: + if condata.lower == 0.0: + # Pyomo can rearrange expressions, resulting in a value + # of 0 for the RHS that does not have units associated + # Therefore, if the RHS is 0, we allow it to be unitless + # and check the consistency of the body only + assert condata.upper == 0.0 + _assert_units_consistent_expression(condata.body) + else: + assert_units_equivalent(condata.lower, condata.body) + else: + assert_units_equivalent(condata.lower, condata.body, condata.upper) + +def _assert_units_consistent_property_expr(obj): + """ + Check the .expr property of the object and raise + an exception if the units are not consistent + """ + _assert_units_consistent_expression(obj.expr) + +def _assert_units_consistent_expression(expr): + """ + Raise an exception if any units in expr are inconsistent. + # this call will raise an error if an inconsistency is found + pyomo_unit, pint_unit = units._get_units_tuple(expr=expr) + """ + pyomo_unit, pint_unit = units._get_units_tuple(expr) + +def _assert_units_consistent_block(obj): + """ + This method gets all the components from the block + and checks if the units are consistent on each of them + """ + # check all the component objects + for component in obj.component_objects(descend_into=True): + assert_units_consistent(component) + +_component_data_handlers = { + Objective: _assert_units_consistent_property_expr, + Constraint: _assert_units_consistent_constraint_data, + Var: _assert_units_consistent_expression, + Expression: _assert_units_consistent_property_expr, + Suffix: None, + Param: _assert_units_consistent_expression, + Set: None, + RangeSet: None, + Disjunct:_assert_units_consistent_block, + Disjunction: None, + Block: _assert_units_consistent_block, + ExternalFunction: None + } + +def assert_units_consistent(obj): + """ + This method raises an exception if the units are not + consistent on the passed in object. Argument obj can be one + of the following components: Pyomo Block (or Model), + Constraint, Objective, Expression, or it can be a Pyomo + expression object + + Parameters + ---------- + obj : Pyomo component (e.g., Block, Model, Constraint, Objective, or Expression) or Pyomo expression + The object or expression to test + + Raises + ------ + :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError` + """ + objtype = type(obj) + if objtype in native_types: + return + elif obj.is_expression_type() or objtype is IndexTemplate: + _assert_units_consistent_expression(obj) + return + + # if object is not in our component handler, raise an exception + if obj.ctype not in _component_data_handlers: + raise TypeError("Units checking not supported for object of type {}.".format(obj.ctype)) + + # get the function form the list of handlers + handler = _component_data_handlers[obj.ctype] + if handler is None: + return + + if obj.is_indexed(): + # check all the component data objects + for cdata in obj.values(): + handler(cdata) + else: + handler(obj) diff --git a/pyomo/util/tests/test_check_units.py b/pyomo/util/tests/test_check_units.py new file mode 100644 index 00000000000..2e465d692c5 --- /dev/null +++ b/pyomo/util/tests/test_check_units.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# +# + +import pyutilib.th as unittest +from pyomo.environ import * +from pyomo.core.base.units_container import ( + pint_available, UnitsError, +) +from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent, check_units_equivalent + +@unittest.skipIf(not pint_available, 'Testing units requires pint') +class TestUnitsChecking(unittest.TestCase): + def test_assert_units_consistent_equivalent(self): + u = units + m = ConcreteModel() + m.dx = Var(units=u.m, initialize=0.10188943773836046) + m.dy = Var(units=u.m, initialize=0.0) + m.vx = Var(units=u.m/u.s, initialize=0.7071067769802851) + m.vy = Var(units=u.m/u.s, initialize=0.7071067769802851) + m.t = Var(units=u.min, bounds=(1e-5,10.0), initialize=0.0024015570927624456) + m.theta = Var(bounds=(0, 0.49*3.14), initialize=0.7853981693583533, units=u.radians) + m.a = Param(initialize=-32.2, units=u.ft/u.s**2) + m.x_unitless = Var() + + m.obj = Objective(expr = m.dx, sense=maximize) + m.vx_con = Constraint(expr = m.vx == 1.0*u.m/u.s*cos(m.theta)) + m.vy_con = Constraint(expr = m.vy == 1.0*u.m/u.s*sin(m.theta)) + m.dx_con = Constraint(expr = m.dx == m.vx*u.convert(m.t, to_units=u.s)) + m.dy_con = Constraint(expr = m.dy == m.vy*u.convert(m.t, to_units=u.s) + + 0.5*(u.convert(m.a, to_units=u.m/u.s**2))*(u.convert(m.t, to_units=u.s))**2) + m.ground = Constraint(expr = m.dy == 0) + m.unitless_con = Constraint(expr = m.x_unitless == 5.0) + + assert_units_consistent(m) # check model + assert_units_consistent(m.dx) # check var - this should never fail + assert_units_consistent(m.x_unitless) # check unitless var - this should never fail + assert_units_consistent(m.vx_con) # check constraint + assert_units_consistent(m.unitless_con) # check unitless constraint + + assert_units_equivalent(m.dx, m.dy) # check var + assert_units_equivalent(m.x_unitless, u.dimensionless) # check unitless var + assert_units_equivalent(m.x_unitless, None) # check unitless var + assert_units_equivalent(m.vx_con.body, u.m/u.s) # check constraint + assert_units_equivalent(m.unitless_con.body, u.dimensionless) # check unitless constraint + assert_units_equivalent(m.dx, m.dy) # check var + assert_units_equivalent(m.x_unitless, u.dimensionless) # check unitless var + assert_units_equivalent(m.x_unitless, None) # check unitless var + assert_units_equivalent(m.vx_con.body, u.m/u.s) # check constraint + + m.broken = Constraint(expr = m.dy == 42.0*u.kg) + with self.assertRaises(UnitsError): + assert_units_consistent(m) + assert_units_consistent(m.dx) + assert_units_consistent(m.vx_con) + with self.assertRaises(UnitsError): + assert_units_consistent(m.broken) + + self.assertTrue(check_units_equivalent(m.dx, m.dy)) + self.assertFalse(check_units_equivalent(m.dx, m.vx)) + + def test_assert_units_consistent_on_datas(self): + u = units + m = ConcreteModel() + m.S = Set(initialize=[1,2,3]) + m.x = Var(m.S, units=u.m) + m.t = Var(m.S, units=u.s) + m.v = Var(m.S, units=u.m/u.s) + m.unitless = Var(m.S) + + @m.Constraint(m.S) + def vel_con(m,i): + return m.v[i] == m.x[i]/m.t[i] + @m.Constraint(m.S) + def unitless_con(m,i): + return m.unitless[i] == 42.0 + + m.obj = Objective(expr=m.v, sense=maximize) + + assert_units_consistent(m) # check model + assert_units_consistent(m.x) # check var + assert_units_consistent(m.t) # check var + assert_units_consistent(m.v) # check var + assert_units_consistent(m.unitless) # check var + assert_units_consistent(m.vel_con) # check constraint + assert_units_consistent(m.unitless_con) # check unitless constraint + + assert_units_consistent(m.x[2]) # check var data + assert_units_consistent(m.t[2]) # check var data + assert_units_consistent(m.v[2]) # check var data + assert_units_consistent(m.unitless[2]) # check var + assert_units_consistent(m.vel_con[2]) # check constraint data + assert_units_consistent(m.unitless_con[2]) # check unitless constraint data + + assert_units_equivalent(m.x[2], m.x[1]) # check var data + assert_units_equivalent(m.t[2], u.s) # check var data + assert_units_equivalent(m.v[2], u.m/u.s) # check var data + assert_units_equivalent(m.unitless[2], u.dimensionless) # check var data unitless + assert_units_equivalent(m.unitless[2], None) # check var + assert_units_equivalent(m.vel_con[2]) # check constraint data + assert_units_equivalent(m.unitless_con[2], u.dimensionless) # check unitless constraint data + + @m.Constraint(m.S) + def broken(m,i): + return m.x[i] == 42.0*m.v[i] + with self.assertRaises(UnitsError): + assert_units_consistent(m) + with self.assertRaises(UnitsError): + assert_units_consistent(m.broken) + with self.assertRaises(UnitsError): + assert_units_consistent(m.broken[1]) + + # all of these should still work + assert_units_consistent(m.x) # check var + assert_units_consistent(m.t) # check var + assert_units_consistent(m.v) # check var + assert_units_consistent(m.unitless) # check var + assert_units_consistent(m.vel_con) # check constraint + assert_units_consistent(m.unitless_con) # check unitless constraint + + assert_units_consistent(m.x[2]) # check var data + assert_units_consistent(m.t[2]) # check var data + assert_units_consistent(m.v[2]) # check var data + assert_units_consistent(m.unitless[2]) # check var + assert_units_consistent(m.vel_con[2]) # check constraint data + assert_units_consistent(m.unitless_con[2]) # check unitless constraint data + +if __name__ == "__main__": + unittest.main() diff --git a/setup.py b/setup.py index f90ecb255d5..f062add29f5 100644 --- a/setup.py +++ b/setup.py @@ -36,13 +36,6 @@ def get_version(): exec(_FILE.read(), _verInfo) return _verInfo['__version__'] -requires = [ - 'PyUtilib>=5.8.1.dev0', - 'appdirs', - 'ply', - 'six>=1.4', - ] - from setuptools import setup, find_packages CYTHON_REQUIRED = "required" @@ -109,6 +102,7 @@ def run_setup(): description='Pyomo: Python Optimization Modeling Objects', long_description=read('README.md'), long_description_content_type='text/markdown', + keywords=['optimization'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop', @@ -132,12 +126,17 @@ def run_setup(): 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Software Development :: Libraries :: Python Modules' ], + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', + install_requires=[ + 'PyUtilib>=5.8.1.dev0', + 'appdirs', + 'enum34;python_version<"3.4"', + 'ply', + 'six>=1.4', + ], packages=find_packages(exclude=("scripts",)), package_data={"pyomo.contrib.viewer":["*.ui"]}, - keywords=['optimization'], - install_requires=requires, ext_modules = ext_modules, - python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', entry_points=""" [console_scripts] runbenders=pyomo.pysp.benders:Benders_main