From d0b9ad4c146161b6e230cbd73c2f2695668ac28c Mon Sep 17 00:00:00 2001 From: Liu Wei Date: Mon, 9 Oct 2023 17:41:08 +0800 Subject: [PATCH] feat: sync from upstream release 1.6.x (#40) --- .../build_and_test_with_resty_events.yml | 141 ++++ .../build_and_test_with_worker_events.yml | 69 ++ .github/workflows/latest_os.yml | 189 ----- .github/workflows/lint.yml | 42 -- .github/workflows/old_os.yml | 189 ----- .github/workflows/sast.yml | 31 - .gitignore | 1 - Makefile | 4 +- config.ld | 2 +- docs/index.html | 257 ++++--- docs/topics/readme.md.html | 274 +++++--- lib/resty/healthcheck.lua | 650 +++++++++++------- lua-resty-healthcheck-2.0.0-1.src.rock | Bin 94201 -> 0 bytes lua-resty-healthcheck-scm-1.rockspec | 6 +- README.md => readme.md | 78 +-- .../lua-resty-healthcheck-0.1.0-1.rockspec | 26 - .../lua-resty-healthcheck-0.2.0-1.rockspec | 26 - .../lua-resty-healthcheck-0.3.0-1.rockspec | 26 - .../lua-resty-healthcheck-0.4.0-1.rockspec | 27 - .../lua-resty-healthcheck-0.4.1-1.rockspec | 27 - .../lua-resty-healthcheck-0.4.1-2.rockspec | 27 - .../lua-resty-healthcheck-0.4.2-1.rockspec | 27 - .../lua-resty-healthcheck-0.4.2-2.rockspec | 27 - .../lua-resty-healthcheck-0.5.0-1.rockspec | 27 - .../lua-resty-healthcheck-0.5.0-2.rockspec | 27 - .../lua-resty-healthcheck-0.6.0-1.rockspec | 27 - .../lua-resty-healthcheck-0.6.0-2.rockspec | 27 - .../lua-resty-healthcheck-0.6.1-1.rockspec | 27 - .../lua-resty-healthcheck-0.6.1-2.rockspec | 27 - .../lua-resty-healthcheck-1.0.0-1.rockspec | 27 - .../lua-resty-healthcheck-1.0.0-2.rockspec | 27 - .../lua-resty-healthcheck-1.1.0-1.rockspec | 27 - .../lua-resty-healthcheck-1.1.0-2.rockspec | 27 - .../lua-resty-healthcheck-1.1.1-1.rockspec | 27 - .../lua-resty-healthcheck-1.1.1-2.rockspec | 27 - .../lua-resty-healthcheck-1.1.2-1.rockspec | 27 - .../lua-resty-healthcheck-1.1.2-2.rockspec | 27 - .../lua-resty-healthcheck-1.2.0-1.rockspec | 26 - .../lua-resty-healthcheck-1.2.0-2.rockspec | 26 - .../lua-resty-healthcheck-1.3.0-1.rockspec | 27 - .../lua-resty-healthcheck-1.3.0-2.rockspec | 27 - .../lua-resty-healthcheck-2.0.0-1.rockspec | 27 - ...resty-healthcheck-api7-master-0-0.rockspec | 29 - ...-resty-healthcheck-iresty-1.0.1-2.rockspec | 26 - t/18-event_handler.t | 111 --- t/21-run_locked.t | 347 ++++++++++ t/lock-failed.t | 79 --- t/req-headers.t | 116 ---- t/with_resty-events/00-new.t | 229 ++++++ t/with_resty-events/01-start-stop.t | 182 +++++ t/with_resty-events/02-add_target.t | 183 +++++ t/with_resty-events/03-get_target_status.t | 106 +++ t/with_resty-events/04-report_success.t | 316 +++++++++ t/with_resty-events/05-report_failure.t | 261 +++++++ t/with_resty-events/06-report_http_status.t | 499 ++++++++++++++ t/with_resty-events/07-report_tcp_failure.t | 242 +++++++ t/with_resty-events/08-report_timeout.t | 244 +++++++ t/with_resty-events/09-active_probes.t | 536 +++++++++++++++ .../10-garbagecollect.t_disabled | 105 +++ t/with_resty-events/11-clear.t | 298 ++++++++ t/with_resty-events/12-set_target_status.t | 207 ++++++ t/with_resty-events/13-integration.t_disabled | 207 ++++++ t/with_resty-events/14-tls_active_probes.t | 155 +++++ .../15-get_virtualhost_target_status.t | 322 +++++++++ .../16-set_all_target_statuses_for_hostname.t | 233 +++++++ t/with_resty-events/17-mtls.t | 145 ++++ t/with_resty-events/18-req-headers.t | 285 ++++++++ t/with_resty-events/98-get_target_list.t | 164 +++++ .../99-status_ver.t} | 30 +- t/{ => with_resty-events}/util/cert.pem | 0 t/{ => with_resty-events}/util/key.pem | 0 t/{ => with_resty-events}/util/reindex | 0 t/{ => with_worker-events}/00-new.t | 31 +- t/{ => with_worker-events}/01-start-stop.t | 60 +- t/{ => with_worker-events}/02-add_target.t | 99 +-- .../03-get_target_status.t | 11 +- .../04-report_success.t | 14 +- .../05-report_failure.t | 24 +- .../06-report_http_status.t | 31 +- .../07-report_tcp_failure.t | 20 +- .../08-report_timeout.t | 16 +- t/{ => with_worker-events}/09-active_probes.t | 139 ++-- .../10-garbagecollect.t | 0 t/{ => with_worker-events}/11-clear.t | 122 +++- .../12-set_target_status.t | 25 - t/{ => with_worker-events}/13-integration.t | 7 - .../14-tls_active_probes.t | 0 .../15-get_virtualhost_target_status.t | 14 +- .../16-set_all_target_statuses_for_hostname.t | 13 - t/{ => with_worker-events}/17-mtls.t | 8 +- .../18-req-headers.t} | 15 +- .../98-get_target_list.t} | 65 ++ .../99-status_ver.t} | 0 t/with_worker-events/util/cert.pem | 19 + t/with_worker-events/util/key.pem | 28 + t/with_worker-events/util/reindex | 27 + 96 files changed, 6619 insertions(+), 2483 deletions(-) create mode 100644 .github/workflows/build_and_test_with_resty_events.yml create mode 100644 .github/workflows/build_and_test_with_worker_events.yml delete mode 100644 .github/workflows/latest_os.yml delete mode 100644 .github/workflows/lint.yml delete mode 100644 .github/workflows/old_os.yml delete mode 100644 .github/workflows/sast.yml delete mode 100644 lua-resty-healthcheck-2.0.0-1.src.rock rename README.md => readme.md (70%) delete mode 100644 rockspecs/lua-resty-healthcheck-0.1.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.2.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.3.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.4.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.4.1-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.4.1-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.4.2-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.4.2-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.5.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.5.0-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.6.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.6.0-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.6.1-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-0.6.1-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.0.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.0.0-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.1.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.1.0-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.1.1-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.1.1-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.1.2-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.1.2-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.2.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.2.0-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.3.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-1.3.0-2.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-2.0.0-1.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-api7-master-0-0.rockspec delete mode 100644 rockspecs/lua-resty-healthcheck-iresty-1.0.1-2.rockspec delete mode 100644 t/18-event_handler.t create mode 100644 t/21-run_locked.t delete mode 100644 t/lock-failed.t delete mode 100644 t/req-headers.t create mode 100644 t/with_resty-events/00-new.t create mode 100644 t/with_resty-events/01-start-stop.t create mode 100644 t/with_resty-events/02-add_target.t create mode 100644 t/with_resty-events/03-get_target_status.t create mode 100644 t/with_resty-events/04-report_success.t create mode 100644 t/with_resty-events/05-report_failure.t create mode 100644 t/with_resty-events/06-report_http_status.t create mode 100644 t/with_resty-events/07-report_tcp_failure.t create mode 100644 t/with_resty-events/08-report_timeout.t create mode 100644 t/with_resty-events/09-active_probes.t create mode 100644 t/with_resty-events/10-garbagecollect.t_disabled create mode 100644 t/with_resty-events/11-clear.t create mode 100644 t/with_resty-events/12-set_target_status.t create mode 100644 t/with_resty-events/13-integration.t_disabled create mode 100644 t/with_resty-events/14-tls_active_probes.t create mode 100644 t/with_resty-events/15-get_virtualhost_target_status.t create mode 100644 t/with_resty-events/16-set_all_target_statuses_for_hostname.t create mode 100644 t/with_resty-events/17-mtls.t create mode 100644 t/with_resty-events/18-req-headers.t create mode 100644 t/with_resty-events/98-get_target_list.t rename t/{19-status-ver.t => with_resty-events/99-status_ver.t} (67%) rename t/{ => with_resty-events}/util/cert.pem (100%) rename t/{ => with_resty-events}/util/key.pem (100%) rename t/{ => with_resty-events}/util/reindex (100%) rename t/{ => with_worker-events}/00-new.t (88%) rename t/{ => with_worker-events}/01-start-stop.t (69%) rename t/{ => with_worker-events}/02-add_target.t (60%) rename t/{ => with_worker-events}/03-get_target_status.t (91%) rename t/{ => with_worker-events}/04-report_success.t (96%) rename t/{ => with_worker-events}/05-report_failure.t (92%) rename t/{ => with_worker-events}/06-report_http_status.t (95%) rename t/{ => with_worker-events}/07-report_tcp_failure.t (93%) rename t/{ => with_worker-events}/08-report_timeout.t (94%) rename t/{ => with_worker-events}/09-active_probes.t (73%) rename t/{ => with_worker-events}/10-garbagecollect.t (100%) rename t/{ => with_worker-events}/11-clear.t (53%) rename t/{ => with_worker-events}/12-set_target_status.t (95%) rename t/{ => with_worker-events}/13-integration.t (98%) rename t/{ => with_worker-events}/14-tls_active_probes.t (100%) rename t/{ => with_worker-events}/15-get_virtualhost_target_status.t (97%) rename t/{ => with_worker-events}/16-set_all_target_statuses_for_hostname.t (96%) rename t/{ => with_worker-events}/17-mtls.t (90%) rename t/{20-req-headers.t => with_worker-events/18-req-headers.t} (93%) rename t/{get_target_list.t => with_worker-events/98-get_target_list.t} (50%) rename t/{status-ver.t => with_worker-events/99-status_ver.t} (100%) create mode 100644 t/with_worker-events/util/cert.pem create mode 100644 t/with_worker-events/util/key.pem create mode 100755 t/with_worker-events/util/reindex diff --git a/.github/workflows/build_and_test_with_resty_events.yml b/.github/workflows/build_and_test_with_resty_events.yml new file mode 100644 index 00000000..eae40633 --- /dev/null +++ b/.github/workflows/build_and_test_with_resty_events.yml @@ -0,0 +1,141 @@ +name: Build and test + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + build: + name: CI using lua-resty-events + runs-on: ubuntu-20.04 + strategy: + matrix: + openresty-version: [1.21.4.1] + + steps: + - name: Update and install OS dependencies + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert + sudo systemctl disable nginx + sudo systemctl stop nginx + + - name: Set environment variables + env: + OPENRESTY_VER: ${{ matrix.openresty-version }} + RESTY_EVENTS_VER: 0.1.2 + LUAROCKS_VER: 3.9.0 + OPENSSL_VER: 1.1.1q + PCRE_VER: 8.45 + run: | + echo "INSTALL_ROOT=/home/runner/work/cache/install-root" >> $GITHUB_ENV + echo "DOWNLOAD_ROOT=/home/runner/work/cache/download-root" >> $GITHUB_ENV + echo "OPENRESTY=$OPENRESTY_VER" >> $GITHUB_ENV + echo "LUAROCKS=$LUAROCKS_VER" >> $GITHUB_ENV + echo "OPENSSL=$OPENSSL_VER" >> $GITHUB_ENV + echo "PCRE=$PCRE_VER" >> $GITHUB_ENV + echo "RESTY_EVENTS=$RESTY_EVENTS_VER" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$HOME/install-root/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV + + - name: Checkout lua-resty-healthcheck + uses: actions/checkout@v3 + + # - name: Lookup build cache + # uses: actions/cache@v3 + # id: cache-deps + # with: + # path: | + # /home/runner/work/cache/install-root + # /home/runner/work/cache/download-root + # key: ${{ runner.os }}-${{ hashFiles('**/.github/workflows/build_and_test_with_resty_events.yml') }}-${{ matrix.openresty-version }} + + - name: Add to Path + if: steps.cache-deps.outputs.cache-hit != 'true' + run: echo "$INSTALL_ROOT/bin:$INSTALL_ROOT/nginx/sbin:$INSTALL_ROOT/luajit/bin:/usr/bin" >> $GITHUB_PATH + + - name: Build and install OpenSSL + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + curl -sSLO https://www.openssl.org/source/openssl-$OPENSSL.tar.gz + tar -xzf openssl-$OPENSSL.tar.gz + cd openssl-$OPENSSL + ./config -g shared -DPURIFY no-threads --prefix=$INSTALL_ROOT --openssldir=$INSTALL_ROOT no-unit-test + make + make install_sw + + - name: Checkout lua-resty-events + uses: actions/checkout@v3 + if: steps.cache-deps.outputs.cache-hit != 'true' + with: + repository: Kong/lua-resty-events + ref: refs/tags/0.1.0 + path: lua-resty-events + + - name: Build and install OpenResty + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + curl -sSLO https://openresty.org/download/openresty-$OPENRESTY.tar.gz + tar -xzf openresty-$OPENRESTY.tar.gz + cd openresty-$OPENRESTY + ./configure \ + --prefix=$INSTALL_ROOT \ + --with-cc-opt='-I$INSTALL_ROOT/include' \ + --with-ld-opt='-L$INSTALL_ROOT/lib -Wl,-rpath,$INSTALL_ROOT/lib' \ + --with-pcre-jit \ + --with-http_ssl_module \ + --with-http_realip_module \ + --with-http_stub_status_module \ + --with-http_v2_module \ + --without-http_encrypted_session_module \ + --with-stream_realip_module \ + --with-stream_ssl_preread_module \ + --add-module=../lua-resty-events \ + --with-pcre + make + make install + make install LUA_LIBDIR=$INSTALL_ROOT/lualib + + - name: Install LuaRocks + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + curl -sSLO https://luarocks.org/releases/luarocks-$LUAROCKS.tar.gz + tar -xzf luarocks-$LUAROCKS.tar.gz + cd luarocks-$LUAROCKS + ./configure \ + --prefix=$INSTALL_ROOT \ + --lua-suffix=jit \ + --with-lua=$INSTALL_ROOT/luajit \ + --with-lua-include=$INSTALL_ROOT/luajit/include/luajit-2.1 + make build + make install + + - name: Install manual dependencies + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + luarocks install luacheck + + - name: Install Test::NGINX + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + sudo apt-get install cpanminus + cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + cpanm --notest Test::Nginx + + - name: Install lua-resty-events + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + cd lua-resty-events + OPENRESTY_PREFIX=$INSTALL_ROOT PREFIX=$INSTALL_ROOT LUA_LIB_DIR=$INSTALL_ROOT/lualib make install + + - name: Install lua-resty-healthcheck + run: luarocks make + + - name: Run tests + env: + PATH: ${{ env.INSTALL_ROOT }}/bin:${{ env.INSTALL_ROOT }}/nginx/sbin:${{ env.INSTALL_ROOT }}/luajit/bin:/usr/bin + TEST_NGINX_BINARY: ${{ env.INSTALL_ROOT }}/nginx/sbin/nginx + run: | + eval `luarocks path` + eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + TEST_NGINX_RANDOMIZE=1 prove -I. -r t/with_resty-events diff --git a/.github/workflows/build_and_test_with_worker_events.yml b/.github/workflows/build_and_test_with_worker_events.yml new file mode 100644 index 00000000..2954c703 --- /dev/null +++ b/.github/workflows/build_and_test_with_worker_events.yml @@ -0,0 +1,69 @@ +name: Build and test + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + build: + name: CI using lua-resty-worker-events + runs-on: ubuntu-20.04 + strategy: + matrix: + openresty-version: [1.21.4.1] + + steps: + - name: Update and install OS dependencies + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert + sudo systemctl disable nginx + sudo systemctl stop nginx + + - name: Set environment variables + env: + OPENRESTY_VER: ${{ matrix.openresty-version }} + run: | + echo "/usr/local/openresty/nginx/sbin" >> $GITHUB_PATH + + - name: Checkout lua-resty-healthcheck + uses: actions/checkout@v3 + + - name: Install OpenResty ${{ matrix.openresty-version }} + env: + OPENRESTY_VER: ${{ matrix.openresty-version }} + run: | + sudo apt-get -y install --no-install-recommends wget gnupg ca-certificates + wget -O - https://openresty.org/package/pubkey.gpg | sudo apt-key add - + echo "deb http://openresty.org/package/ubuntu $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/openresty.list + sudo apt-get update + sudo apt-get -y install openresty=$OPENRESTY_VER-1~focal1 + + - name: Install LuaRocks + run: sudo apt-get install -y luarocks + + - name: Install manual dependencies + run: | + sudo luarocks install luacheck + sudo luarocks install lua-resty-worker-events 1.0.0 + + - name: Install Test::NGINX + run: | + sudo apt-get install cpanminus + cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + cpanm --notest Test::Nginx + + - name: Install lua-resty-healthcheck + run: sudo luarocks make lua-resty-healthcheck-scm-1.rockspec + + - name: Run tests + run: | + eval `luarocks path` + eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + TEST_NGINX_RANDOMIZE=1 prove -I. -r t/with_worker-events + + - name: Run debugger + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3 + timeout-minutes: 15 diff --git a/.github/workflows/latest_os.yml b/.github/workflows/latest_os.yml deleted file mode 100644 index a63dd30c..00000000 --- a/.github/workflows/latest_os.yml +++ /dev/null @@ -1,189 +0,0 @@ -name: Build and test for Ubuntu latest - -on: [push, pull_request] - -jobs: - build: - name: Build and install dependencies - runs-on: ubuntu-latest - strategy: - matrix: - openresty-version: [1.21.4.1] - luarocks-version: [3.8.0] - - steps: - - name: Update and install OS dependencies - run: sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/latest_os.yml') }} - - - name: Create needed paths - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - mkdir -p $DOWNLOAD_PATH - mkdir -p $INSTALL_PATH - - - name: Build and install OpenResty ${{ matrix.openresty-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/openresty-$OPENRESTY_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz" - wget -O $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz - echo "tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz" - tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz - echo "result: $?" - pushd openresty-$OPENRESTY_VER - ./configure --prefix=$OPENRESTY_PREFIX - make - make install - popd - popd - fi - - - name: Build and install LuaRocks ${{ matrix.luarocks-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/luarocks-$LUAROCKS_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz" - wget -O $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz - tar -zxf $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz - pushd luarocks-$LUAROCKS_VER - ./configure --prefix=$LUAROCKS_PREFIX --with-lua=$OPENRESTY_PREFIX/luajit --with-lua-include=$OPENRESTY_PREFIX/luajit/include/luajit-2.1 --lua-suffix=jit - make build - make install - popd - luarocks install luacheck - popd - fi - - - name: Install Test::NGINX - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -f $DOWNLOAD_PATH/cpanm ]; - then - wget -O $DOWNLOAD_PATH/cpanm https://cpanmin.us/ - chmod +x $DOWNLOAD_PATH/cpanm - cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - cpanm --notest Test::Nginx - fi - - lint: - name: Static code analysis - runs-on: ubuntu-latest - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/latest_os.yml') }} - - - name: Lint code - run: | - eval `luarocks path` - luacheck lib - - install-and-test: - name: Test lua-resty-healthcheck - runs-on: ubuntu-latest - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/latest_os.yml') }} - - - name: Install lua-resty-healthcheck - run: luarocks make - - - name: Run tests - run: | - eval `luarocks path` - eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - TEST_NGINX_RANDOMIZE=1 prove -I. -r t diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index 10af9aa8..00000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: Lint - -on: - pull_request: {} - workflow_dispatch: {} - push: - branches: - - main - - master - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} - -jobs: - lua-check: - name: Lua Check - runs-on: ubuntu-latest - permissions: - contents: read - issues: read - checks: write - pull-requests: write - if: (github.actor != 'dependabot[bot]') - - steps: - - name: Checkout source code - uses: actions/checkout@v3 - - # Optional step to run on only changed files - - name: Get changed files - id: changed-files - uses: tj-actions/changed-files@54849deb963ca9f24185fb5de2965e002d066e6b # v37 - with: - files: | - **.lua - - name: Lua Check - if: steps.changed-files.outputs.any_changed == 'true' - uses: Kong/public-shared-actions/code-check-actions/lua-lint@33449c46c6766a3d3c8f167cc383381225862b36 - with: - additional_args: '--no-default-config --config .luacheckrc' - files: ${{ steps.changed-files.outputs.all_changed_files }} \ No newline at end of file diff --git a/.github/workflows/old_os.yml b/.github/workflows/old_os.yml deleted file mode 100644 index 4d6894ed..00000000 --- a/.github/workflows/old_os.yml +++ /dev/null @@ -1,189 +0,0 @@ -name: Build and test for Ubuntu 20.04 - -on: [push, pull_request] - -jobs: - build: - name: Build and install dependencies - runs-on: ubuntu-20.04 - strategy: - matrix: - openresty-version: [1.21.4.1] - luarocks-version: [3.8.0] - - steps: - - name: Update and install OS dependencies - run: sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/old_os.yml') }} - - - name: Create needed paths - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - mkdir -p $DOWNLOAD_PATH - mkdir -p $INSTALL_PATH - - - name: Build and install OpenResty ${{ matrix.openresty-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/openresty-$OPENRESTY_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz" - wget -O $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz - echo "tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz" - tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz - echo "result: $?" - pushd openresty-$OPENRESTY_VER - ./configure --prefix=$OPENRESTY_PREFIX - make - make install - popd - popd - fi - - - name: Build and install LuaRocks ${{ matrix.luarocks-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/luarocks-$LUAROCKS_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz" - wget -O $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz - tar -zxf $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz - pushd luarocks-$LUAROCKS_VER - ./configure --prefix=$LUAROCKS_PREFIX --with-lua=$OPENRESTY_PREFIX/luajit --with-lua-include=$OPENRESTY_PREFIX/luajit/include/luajit-2.1 --lua-suffix=jit - make build - make install - popd - luarocks install luacheck - popd - fi - - - name: Install Test::NGINX - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -f $DOWNLOAD_PATH/cpanm ]; - then - wget -O $DOWNLOAD_PATH/cpanm https://cpanmin.us/ - chmod +x $DOWNLOAD_PATH/cpanm - cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - cpanm --notest Test::Nginx - fi - - lint: - name: Static code analysis - runs-on: ubuntu-20.04 - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/old_os.yml') }} - - - name: Lint code - run: | - eval `luarocks path` - luacheck lib - - install-and-test: - name: Test lua-resty-healthcheck - runs-on: ubuntu-20.04 - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/old_os.yml') }} - - - name: Install lua-resty-healthcheck - run: luarocks make - - - name: Run tests - run: | - eval `luarocks path` - eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - TEST_NGINX_RANDOMIZE=1 prove -I. -r t diff --git a/.github/workflows/sast.yml b/.github/workflows/sast.yml deleted file mode 100644 index ecc76413..00000000 --- a/.github/workflows/sast.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: SAST - -on: - pull_request: - paths: - - lib/**.lua - push: - branches: - - master - - main - paths: - - lib/**.lua - workflow_dispatch: {} - - -jobs: - semgrep: - name: Semgrep SAST - runs-on: ubuntu-latest - permissions: - # required for all workflows - security-events: write - # only required for workflows in private repositories - actions: read - contents: read - - if: (github.actor != 'dependabot[bot]') - - steps: - - uses: actions/checkout@v3 - - uses: Kong/public-shared-actions/security-actions/semgrep@33449c46c6766a3d3c8f167cc383381225862b36 \ No newline at end of file diff --git a/.gitignore b/.gitignore index b9321004..852c1b9a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,3 @@ ctags tags a.lua .DS_Store -.idea/ diff --git a/Makefile b/Makefile index eaf80bdc..1b7e6b59 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ LUA_INCLUDE_DIR ?= $(PREFIX)/include LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) INSTALL ?= install -.PHONY: all test install deps +.PHONY: all test install all: ; @@ -16,5 +16,3 @@ install: all test: all PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t -deps: - luarocks install rockspecs/lua-resty-healthcheck-api7-master-0-0.rockspec --only-deps \ No newline at end of file diff --git a/config.ld b/config.ld index 192b8cc8..70432802 100644 --- a/config.ld +++ b/config.ld @@ -4,7 +4,7 @@ description='Provides active and passive healthchecks (http and tcp) for OpenRes format='discount' file='./lib/' dir='docs' -readme='README.md' +readme='readme.md' sort=true sort_modules=true all=false diff --git a/docs/index.html b/docs/index.html index 6492afd6..52451055 100644 --- a/docs/index.html +++ b/docs/index.html @@ -27,8 +27,10 @@

lua-resty-healthcheck

+

Contents

Topics

@@ -51,34 +53,42 @@

Topics

Module resty.healthcheck

Healthcheck library for OpenResty.

-

- - -

Some notes on the usage of this library:

+

Some notes on the usage of this library:

+

Info:

+

Functions

+ + + + + +
run_locked (self, key, fn, ...)Acquire a lock and run a function

+ +

The function call itself is wrapped with pcall to protect against + exception.

Tables

@@ -97,6 +107,10 @@

Node management

+ + + + @@ -156,6 +170,61 @@

Initializing


+

Functions

+ +
+
+ + run_locked (self, key, fn, ...) +
+
+

Acquire a lock and run a function

+ +

The function call itself is wrapped with pcall to protect against + exception.

+ +

This function exhibits some special behavior when called during a + non-yieldable phase such as init_worker or log:

+ +
    +
  1. The lock timeout is set to 0 to ensure that resty.lock does not +attempt to sleep/yield
  2. +
  3. If acquiring the lock fails due to a timeout, run_locked +(this function) is re-scheduled to run in a timer. In this case, +the function returns "scheduled"
  4. +
+ + + + +

Parameters:

+
    +
  • self + The checker object +
  • +
  • key + the key/identifier to acquire a lock for +
  • +
  • fn + The function to execute +
  • +
  • ... + arguments that will be passed to fn +
  • +
+ +

Returns:

+
    + + The results of the function; or nil and an error message + in case it fails locking. +
+ + + + +
+

Tables

@@ -186,12 +255,12 @@

Fields:

  • mostly_healthy This event is raised when the target status is - still healthy but it started to receive "unhealthy" updates via active or + still healthy but it started to receive “unhealthy” updates via active or passive checks.
  • mostly_unhealthy This event is raised when the target status is - still unhealthy but it started to receive "healthy" updates via active or + still unhealthy but it started to receive “healthy” updates via active or passive checks.
  • @@ -215,7 +284,7 @@

    Usage:

    end end -worker_events.register(event_callback, my_checker.EVENT_SOURCE) +worker_events.register(event_callback, my_checker.EVENT_SOURCE) @@ -230,9 +299,7 @@

    Node management

    Add a target to the healthchecker. When the ip + port + hostname combination already exists, it will simply - return success (without updating is_healthy status).

    - -

    NOTE: in non-yieldable contexts, this will be executed async. + return success (without updating is_healthy status).

    Parameters:

    @@ -272,12 +339,36 @@

    Returns:

    checker:clear ()
    - Clear all healthcheck data.

    + Clear all healthcheck data. + + + +

    Returns:

    +
      + + true on success, or nil + error on failure. +
    -

    NOTE: in non-yieldable contexts, this will be executed async. +

    +
    + + checker:delayed_clear (delay) +
    +
    + Clear all healthcheck data after a period of time. + Useful for keeping target status between configuration reloads. + + +

    Parameters:

    +
      +
    • delay + delay in seconds before purging target state. +
    • +
    +

    Returns:

      @@ -325,9 +416,7 @@

      Returns:

      Remove a target from the healthchecker. - The target not existing is not considered an error.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + The target not existing is not considered an error.

      Parameters:

      @@ -367,13 +456,11 @@

      Healt
      Report a health failure. Reports a health failure which will count against the number of occurrences - required to make a target "fall". The type of healthchecker, - "tcp" or "http" (see new) determines against which counter the occurence goes. + required to make a target “fall”. The type of healthchecker, + “tcp” or “http” (see new) determines against which counter the occurence goes. If unhealthy.tcp_failures (for TCP failures) or unhealthy.http_failures is set to zero in the configuration, this function is a no-op - and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + and returns true.

      Parameters:

      @@ -388,7 +475,7 @@

      Parameters:

      (optional) hostname of the target being checked.
    1. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    2. @@ -413,9 +500,7 @@

      Returns:

      If healthy.successes (for healthy HTTP status codes) or unhealthy.http_failures (fur unhealthy HTTP status codes) is set to zero in the configuration, this function is a no-op - and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + and returns true.

      Parameters:

      @@ -433,7 +518,7 @@

      Parameters:

      the http statuscode, or nil to report an invalid http response.
    3. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    4. @@ -455,11 +540,9 @@

      Returns:

      Report a health success. Reports a health success which will count against the number of occurrences - required to make a target "rise". + required to make a target “rise”. If healthy.successes is set to zero in the configuration, - this function is a no-op and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + this function is a no-op and returns true.

      Parameters:

      @@ -474,7 +557,7 @@

      Parameters:

      (optional) hostname of the target being checked.
    5. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    6. @@ -495,9 +578,7 @@

      Returns:

      Report a failure on TCP level. If unhealthy.tcp_failures is set to zero in the configuration, - this function is a no-op and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + this function is a no-op and returns true.

      Parameters:

      @@ -513,13 +594,13 @@

      Parameters:

    7. operation The socket operation that failed: - "connect", "send" or "receive". + “connect”, “send” or “receive”. TODO check what kind of information we get from the OpenResty layer in order to tell these error conditions apart - https://github.com/openresty/lua-resty-core/blob/master/lib/ngx/balancer.md#getlastfailure + https://github.com/openresty/lua-resty-core/blob/master/lib/ngx/balancer.md#get_last_failure
    8. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    9. @@ -540,9 +621,7 @@

      Returns:

      Report a timeout failure. If unhealthy.timeouts is set to zero in the configuration, - this function is a no-op and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + this function is a no-op and returns true.

      Parameters:

      @@ -557,7 +636,7 @@

      Parameters:

      (optional) hostname of the target being checked.
    10. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    11. @@ -576,9 +655,7 @@

      Returns:

      checker:set_all_target_statuses_for_hostname (hostname, port, is_healthy)
      - Sets the current status of all targets with the given hostname and port.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + Sets the current status of all targets with the given hostname and port.

      Parameters:

      @@ -610,9 +687,7 @@

      Returns:

      Sets the current status of the target. - This will set the status and clear its counters.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + This will immediately set the status and clear its counters.

      Parameters:

      @@ -694,10 +769,7 @@

      Returns:

      It will be started upon creation.

      NOTE: the returned checker object must be anchored, if not it will be - removed by Lua's garbage collector and the healthchecks will cease to run.

      - -

      NOTE: in non-yieldable contexts, the initial loading of the target - statusses will be executed async. + removed by Lua’s garbage collector and the healthchecks will cease to run.

      Parameters:

      @@ -706,34 +778,35 @@

      Parameters:

      table with checker options. Options are:

        -
      • name: name of the health checker
      • -
      • shm_name: the name of the lua_shared_dict specified in the Nginx configuration to use
      • -
      • ssl_cert: certificate for mTLS connections (string or parsed object)
      • -
      • ssl_key: key for mTLS connections (string or parsed object)
      • -
      • checks.active.type: "http", "https" or "tcp" (default is "http")
      • -
      • checks.active.timeout: socket timeout for active checks (in seconds)
      • -
      • checks.active.concurrency: number of targets to check concurrently
      • -
      • checks.active.http_path: path to use in GET HTTP request to run on active checks
      • -
      • checks.active.https_sni: SNI server name incase of HTTPS
      • -
      • checks.active.https_verify_certificate: boolean indicating whether to verify the HTTPS certificate
      • -
      • checks.active.hheaders: an array of headers (no hash-table! must be pre-formatted)
      • -
      • checks.active.healthy.interval: interval between checks for healthy targets (in seconds)
      • -
      • checks.active.healthy.http_statuses: which HTTP statuses to consider a success
      • -
      • checks.active.healthy.successes: number of successes to consider a target healthy
      • -
      • checks.active.unhealthy.interval: interval between checks for unhealthy targets (in seconds)
      • -
      • checks.active.unhealthy.http_statuses: which HTTP statuses to consider a failure
      • -
      • checks.active.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • -
      • checks.active.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • -
      • checks.active.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      • -
      • checks.passive.type: "http", "https" or "tcp" (default is "http"; for passive checks, "http" and "https" are equivalent)
      • -
      • checks.passive.healthy.http_statuses: which HTTP statuses to consider a failure
      • -
      • checks.passive.healthy.successes: number of successes to consider a target healthy
      • -
      • checks.passive.unhealthy.http_statuses: which HTTP statuses to consider a success
      • -
      • checks.passive.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • -
      • checks.passive.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • -
      • checks.passive.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      • +
      • name: name of the health checker
      • +
      • shm_name: the name of the lua_shared_dict specified in the Nginx configuration to use
      • +
      • ssl_cert: certificate for mTLS connections (string or parsed object)
      • +
      • ssl_key: key for mTLS connections (string or parsed object)
      • +
      • checks.active.type: “http”, “https” or “tcp” (default is “http”)
      • +
      • checks.active.timeout: socket timeout for active checks (in seconds)
      • +
      • checks.active.concurrency: number of targets to check concurrently
      • +
      • checks.active.http_path: path to use in GET HTTP request to run on active checks
      • +
      • checks.active.https_sni: SNI server name incase of HTTPS
      • +
      • checks.active.https_verify_certificate: boolean indicating whether to verify the HTTPS certificate
      • +
      • checks.active.headers: one or more lists of values indexed by header name
      • +
      • checks.active.healthy.interval: interval between checks for healthy targets (in seconds)
      • +
      • checks.active.healthy.http_statuses: which HTTP statuses to consider a success
      • +
      • checks.active.healthy.successes: number of successes to consider a target healthy
      • +
      • checks.active.unhealthy.interval: interval between checks for unhealthy targets (in seconds)
      • +
      • checks.active.unhealthy.http_statuses: which HTTP statuses to consider a failure
      • +
      • checks.active.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • +
      • checks.active.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • +
      • checks.active.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      • +
      • checks.passive.type: “http”, “https” or “tcp” (default is “http”; for passive checks, “http” and “https” are equivalent)
      • +
      • checks.passive.healthy.http_statuses: which HTTP statuses to consider a failure
      • +
      • checks.passive.healthy.successes: number of successes to consider a target healthy
      • +
      • checks.passive.unhealthy.http_statuses: which HTTP statuses to consider a success
      • +
      • checks.passive.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • +
      • checks.passive.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • +
      • checks.passive.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      +

      If any of the health counters above (e.g. checks.passive.unhealthy.timeouts) is set to zero, the according category of checks is not taken to account. This way active or passive health checks can be disabled selectively. @@ -756,8 +829,8 @@

      Returns:

      -generated by LDoc 1.4.6 -Last updated 2020-09-22 15:00:30 +generated by LDoc 1.5.0 +Last updated 2023-09-06 09:49:32
      diff --git a/docs/topics/readme.md.html b/docs/topics/readme.md.html index 84eafdbe..5863ee85 100644 --- a/docs/topics/readme.md.html +++ b/docs/topics/readme.md.html @@ -27,11 +27,11 @@

      lua-resty-healthcheck

      +

      Contents

      @@ -39,7 +39,7 @@

      Contents

      Topics

        -
      • README
      • +
      • readme

      Modules

        @@ -50,35 +50,37 @@

        Modules

        +

        lua-resty-healthcheck

        -

        lua-resty-healthcheck

        - -

        Build Status

        +

        legacy version +Release 1.6.x +License +Twitter Follow

        A health check library for OpenResty.

        -

        Synopsis

        +

        Synopsis

        -http {
        +http {
             lua_shared_dict test_shm 8m;
             lua_shared_dict my_worker_events 8m;
        -    init_worker_by_lua_block {
        +    init_worker_by_lua_block {
         
                 local we = require "resty.worker.events"
        -        local ok, err = we.configure({
        +        local ok, err = we.configure({
                     shm = "my_worker_events",
                     interval = 0.1
                 })
                 if not ok then
        -            ngx.log(ngx.ERR, "failed to configure worker events: ", err)
        +            ngx.log(ngx.ERR, "failed to configure worker events: ", err)
                     return
                 end
         
                 local healthcheck = require("resty.healthcheck")
        -        local checker = healthcheck.new({
        +        local checker = healthcheck.new({
                     name = "testing",
                     shm_name = "test_shm",
                     checks = {
        @@ -97,21 +99,21 @@ 

        Synopsis

        } }) - local ok, err = checker:add_target("127.0.0.1", 8080, "example.com", false) + local ok, err = checker:add_target("127.0.0.1", 8080, "example.com", false) local handler = function(target, eventname, sourcename, pid) - ngx.log(ngx.DEBUG,"Event from: ", sourcename) + ngx.log(ngx.DEBUG,"Event from: ", sourcename) if eventname == checker.events.remove -- a target was removed - ngx.log(ngx.DEBUG,"Target removed: ", + ngx.log(ngx.DEBUG,"Target removed: ", target.ip, ":", target.port, " ", target.hostname) elseif eventname == checker.events.healthy -- target changed state, or was added - ngx.log(ngx.DEBUG,"Target switched to healthy: ", + ngx.log(ngx.DEBUG,"Target switched to healthy: ", target.ip, ":", target.port, " ", target.hostname) elseif eventname == checker.events.unhealthy -- target changed state, or was added - ngx.log(ngx.DEBUG,"Target switched to unhealthy: ", + ngx.log(ngx.DEBUG,"Target switched to unhealthy: ", target.ip, ":", target.port, " ", target.hostname) else -- unknown event @@ -123,6 +125,7 @@

        Synopsis

        +

        Description

        This library supports performing active and passive health checks on arbitrary hosts.

        @@ -131,7 +134,7 @@

        Description

        happens via the lua-resty-worker-events library.

        Targets are added using checker:add_target(host, port). -Changes in status ("healthy" or "unhealthy") are broadcasted via worker-events.

        +Changes in status (“healthy” or “unhealthy”) are broadcasted via worker-events.

        Active checks are executed in the background based on the specified timer intervals.

        @@ -141,187 +144,242 @@

        Description

        See the online LDoc documentation for the complete API.

        -

        -

        Async behaviour

        +

        + +

        History

        -

        Since this library heavily uses the SHM to share data between workers, it must -use locks. The locks themselves need access to ngx.sleep which is not available -in all contexts. Most notably not during startup; init and init_worker.

        +

        Versioning is strictly based on Semantic Versioning

        -

        The library will try and acquire the lock and update, but if it fails it will -schedule an async update (timer with delay 0).

        +

        1.6.2 (17-Nov-2022)

        -

        One workaround for this in the initial phases would be to replace ngx.sleep with -a version that does a blocking sleep in init/init_worker. This will enable -the usage of locks in those phases.

        +
          +
        • Fix: avoid raising worker events for new targets that were marked for delayed +removal, i.e. targets that already exist in memory only need the removal flag +cleared when added back. #122
        • +
        -

        -

        History

        +

        1.6.1 (25-Jul-2022)

        -

        Versioning is strictly based on Semantic Versioning

        +
          +
        • Fix: improvements to ensure the proper securing of shared resources to avoid +race conditions and clearly report failure states. +#112, +#113, +#114.
        • +
        • Fix: reduce the frequency of checking for unused targets, reducing the number +of locks created. #116
        • +
        • Fix accept any lua-resty-events +0.1.x release. #118
        • +
        + + +

        1.6.0 (27-Jun-2022)

        + +
          +
        • Feature: introduce support to lua-resty-events +module in addition to lua-resty-worker-events +support. With this addition, the lua-resty-healthcheck luarocks package does +not require a specific event-sharing module anymore, but you are still +required to provide either lua-resty-worker-events or lua-resty-events. +#105
        • +
        • Change: if available, lua-resty-healthcheck now uses string.buffer, the new LuaJIT’s +serialization API. If it is unavailable, lua-resty-healthcheck fallbacks to +cjson. #109
        • +
        + + +

        1.5.1 (23-Mar-2022)

        -

        Releasing new versions:

        +
          +
        • Fix: avoid breaking active health checks when adding or removing targets. +#93
        • +
        + + +

        1.5.0 (09-Feb-2022)

          -
        • update changelog below (PR's should be merged including a changelog entry)
        • -
        • based on changelog determine new SemVer version
        • -
        • create a new rockspec
        • -
        • render the docs using ldoc (don't do this within PR's)
        • -
        • commit as "release x.x.x" (do not include rockspec revision)
        • -
        • tag the commit with "x.x.x" (do not include rockspec revision)
        • -
        • push commit and tag
        • -
        • upload rock to luarocks: luarocks upload rockspecs/[name] --api-key=abc
        • +
        • New option checks.active.headers supports one or more lists of values indexed by +header name. #87
        • +
        • Introduce dealyed_clear() function, used to remove addresses after a time interval. +This function may be used when an address is being removed but may be added again +before the interval expires, keeping its health status. +#88
        -

        2.0.0 (22-Sep-2020)

        + +

        1.4.3 (31-Mar-2022)

          -
        • BREAKING: fallback for deprecated top-level field type is now removed - (deprecated since 0.5.0) #56
        • -
        • BREAKING: Bump lua-resty-worker-events dependency to 2.0.0. This makes - a lot of the APIs in this library asynchronous as the worker events post - and post_local won't anymore call poll on a running worker automatically, - for more information, see: - https://github.com/Kong/lua-resty-worker-events#200-16-september-2020
        • -
        • BREAKING: tcpfailures can no longer be 0 on http(s) checks (unless http(s)failures - are also set to 0) #55
        • -
        • feature: Added support for https_sni #49
        • -
        • fix: properly log line numbers by using tail calls #29
        • -
        • fix: when not providing a hostname, use IP #48
        • -
        • fix: makefile; make install
        • -
        • feature: added a status version field #54
        • -
        • feature: add headers for probe request #54
        • -
        • fix: exit early when reloading during a probe #47
        • -
        • fix: prevent target-list from being nil, due to async behaviour #44
        • -
        • fix: replace timer and node-wide locks with resty-timer, to prevent interval - skips #59
        • -
        • change: added additional logging on posting events #25
        • -
        • fix: do not run out of timers during init/init_worker when adding a vast - amount of targets #57
        • -
        • fix: do not call on the module table, but use a method for locks. Also in - #57
        • +
        • Fix: avoid breaking active health checks when adding or removing targets. +#100
        + +

        1.4.2 (29-Jun-2021)

        + +
          +
        • Fix: prevent new active checks being scheduled while a health check is running. +#72
        • +
        • Fix: remove event watcher when stopping an active health check. +#74; fixes Kong issue +#7406
        • +
        + + +

        1.4.1 (17-Feb-2021)

        + +
          +
        • Fix: make sure that a single worker will actively check hosts' statuses. +#67
        • +
        + + +

        1.4.0 (07-Jan-2021)

        + +
          +
        • Use a single timer to actively health check targets. This reduces the number +of timers used by health checkers, as they used to use two timers by each +target. #62
        • +
        + +

        1.3.0 (17-Jun-2020)

          -
        • Adds support to mTLS to active healthchecks. This feature can be used adding - the fields ssl_cert and ssl_key, with certificate and key respectively, - when creating a new healthcheck object. - #41
        • +
        • Adds support to mTLS to active healthchecks. This feature can be used adding +the fields ssl_cert and ssl_key, with certificate and key respectively, +when creating a new healthcheck object. +#41
        +

        1.2.0 (13-Feb-2020)

        +

        1.1.2 (19-Dec-2019)

          -
        • Fix: when ngx.sleep API is not available (e.g. in the log phase) it is not - possible to lock using lua-resty-lock and any function that needs exclusive - access would fail. This fix adds a retry method that starts a new light - thread, which has access to ngx.sleep, to lock the critical path. - #37;
        • +
        • Fix: when ngx.sleep API is not available (e.g. in the log phase) it is not +possible to lock using lua-resty-lock and any function that needs exclusive +access would fail. This fix adds a retry method that starts a new light +thread, which has access to ngx.sleep, to lock the critical path. +#37;
        +

        1.1.1 (14-Nov-2019)

          -
        • Fix: fail when it is not possible to get exclusive access to the list of - targets. This fix prevents that workers get to an inconsistent state. - #34;
        • +
        • Fix: fail when it is not possible to get exclusive access to the list of +targets. This fix prevents that workers get to an inconsistent state. +#34;
        +

        1.1.0 (30-Sep-2019)

          -
        • Add support for setting the custom Host header to be used for active checks.
        • -
        • Fix: log error on SSL Handshake failure - #28;
        • +
        • Add support for setting the custom Host header to be used for active checks.
        • +
        • Fix: log error on SSL Handshake failure +#28;
        +

        1.0.0 (05-Jul-2019)

          -
        • BREAKING: all API functions related to hosts require a hostname argument - now. This way different hostnames listening on the same IP and ports - combination do not have an effect on each other.
        • -
        • Fix: fix reporting active TCP probe successes - #20; - fixes issue #19
        • +
        • BREAKING: all API functions related to hosts require a hostname argument +now. This way different hostnames listening on the same IP and ports +combination do not have an effect on each other.
        • +
        • Fix: fix reporting active TCP probe successes +#20; +fixes issue #19
        +

        0.6.1 (04-Apr-2019)

          -
        • Fix: set up event callback only after target list is loaded - #18; - fixes Kong issue #4453
        • +
        • Fix: set up event callback only after target list is loaded +#18; +fixes Kong issue #4453
        +

        0.6.0 (26-Sep-2018)

          -
        • Introduce checks.active.https_verify_certificate field. - It is true by default; setting it to false disables certificate - verification in active healthchecks over HTTPS.
        • +
        • Introduce checks.active.https_verify_certificate field. +It is true by default; setting it to false disables certificate +verification in active healthchecks over HTTPS.
        +

        0.5.0 (25-Jul-2018)

          -
        • Add support for https -- thanks @gaetanfl for the PR!
        • -
        • Introduce separate checks.active.type and checks.passive.type fields; - the top-level type field is still supported as a fallback but is now - deprecated.
        • +
        • Add support for https — thanks @gaetanfl for the PR!
        • +
        • Introduce separate checks.active.type and checks.passive.type fields; +the top-level type field is still supported as a fallback but is now +deprecated.
        +

        0.4.2 (23-May-2018)

          -
        • Fix Host header in active healthchecks
        • +
        • Fix Host header in active healthchecks
        +

        0.4.1 (21-May-2018)

          -
        • Fix internal management of healthcheck counters
        • +
        • Fix internal management of healthcheck counters
        +

        0.4.0 (20-Mar-2018)

          -
        • Correct setting of defaults in http_statuses
        • -
        • Type and bounds checking to checks table
        • +
        • Correct setting of defaults in http_statuses
        • +
        • Type and bounds checking to checks table
        +

        0.3.0 (18-Dec-2017)

          -
        • Disable individual checks by setting their counters to 0
        • +
        • Disable individual checks by setting their counters to 0
        +

        0.2.0 (30-Nov-2017)

        +

        0.1.0 (27-Nov-2017) Initial release

          -
        • Initial upload
        • +
        • Initial upload
        +

        +

        Copyright and License

        -
         Copyright 2017-2020 Kong Inc.
        +
         Copyright 2017-2022 Kong Inc.
         
          Licensed under the Apache License, Version 2.0 (the "License");
          you may not use this file except in compliance with the License.
        @@ -337,13 +395,11 @@ 

        Copyright and License

        - -
        -generated by LDoc 1.4.6 -Last updated 2020-09-22 15:00:30 +generated by LDoc 1.5.0 +Last updated 2023-09-06 09:49:32
        diff --git a/lib/resty/healthcheck.lua b/lib/resty/healthcheck.lua index c2f9f9a7..64a09792 100644 --- a/lib/resty/healthcheck.lua +++ b/lib/resty/healthcheck.lua @@ -20,38 +20,43 @@ -- - Events will be raised in every worker, see [lua-resty-worker-events](https://github.com/Kong/lua-resty-worker-events) -- for details. -- --- @copyright 2017-2020 Kong Inc. +-- @copyright 2017-2023 Kong Inc. -- @author Hisham Muhammad, Thijs Schreijer -- @license Apache 2.0 -local bit = require("bit") -local cjson = require("cjson.safe").new() -local resty_timer = require("resty.timer") -local ssl = require("ngx.ssl") -local worker_events = require("resty.worker.events") --- local resty_lock = require("resty.lock") -- required later in the file" - local ERR = ngx.ERR local WARN = ngx.WARN local DEBUG = ngx.DEBUG local ngx_log = ngx.log -local re_find = ngx.re.find -local ngx_worker_exiting = ngx.worker.exiting -local get_phase = ngx.get_phase - local tostring = tostring local ipairs = ipairs +local table_insert = table.insert +local table_remove = table.remove +local string_format = string.format +local ssl = require("ngx.ssl") +local resty_timer = require "resty.timer" +local bit = require("bit") +local re_find = ngx.re.find +local ngx_now = ngx.now +local ngx_worker_id = ngx.worker.id +local ngx_worker_pid = ngx.worker.pid local pcall = pcall +local get_phase = ngx.get_phase local type = type local assert = assert -local table_remove = table.remove -local table_concat = table.concat -local string_format = string.format + +local RESTY_EVENTS_VER = [[^0\.1\.\d+$]] +local RESTY_WORKER_EVENTS_VER = "0.3.3" + local new_tab local nkeys local is_array +local codec + + +local TESTING = _G.__TESTING_HEALTHCHECKER or false do local ok @@ -86,8 +91,38 @@ do return true end end + + ok, codec = pcall(require, "string.buffer") + if not ok then + codec = require("cjson.safe").new() + end +end + + +local worker_events +--- This function loads the worker events module received as arg. It will throw +-- error() if it is not possible to load the module. +local function load_events_module(self) + if self.events_module == "resty.worker.events" then + worker_events = require("resty.worker.events") + assert(worker_events, "could not load lua-resty-worker-events") + assert(worker_events._VERSION == RESTY_WORKER_EVENTS_VER, + "unsupported lua-resty-worker-events version") + + elseif self.events_module == "resty.events" then + worker_events = require("resty.events.compat") + local version_match = ngx.re.match(worker_events._VERSION, RESTY_EVENTS_VER, "o") + assert(version_match, "unsupported lua-resty-events version") + + else + error("unknown events module") + end + + assert(worker_events.configured(), "please configure the '" .. + self.events_module .. "' module before using 'lua-resty-healthcheck'") end + -- constants local EVENT_SOURCE_PREFIX = "lua-resty-healthcheck" local LOG_PREFIX = "[healthcheck] " @@ -98,6 +133,17 @@ local EMPTY = setmetatable({},{ end }) +--- timer constants +-- evaluate active checks every 0.1s +local CHECK_INTERVAL = 0.1 +-- use a 10% jitter to start each worker timer +local CHECK_JITTER = CHECK_INTERVAL * 0.1 +-- lock valid period: the worker which acquires the lock owns it for 15 times +-- the check interval. If it does not update the shm during this period, we +-- consider that it is not able to continue checking (the worker probably was killed) +local LOCK_PERIOD = CHECK_INTERVAL * 15 +-- interval between stale targets cleanup +local CLEANUP_INTERVAL = CHECK_INTERVAL * 25 -- Counters: a 32-bit shm integer can hold up to four 8-bit counters. local CTR_SUCCESS = 0x00000001 @@ -177,57 +223,37 @@ end -- Some color for demo purposes local use_color = false local id = function(x) return x end -local worker_color = use_color and function(str) return ("\027["..tostring(31 + ngx.worker.pid() % 5).."m"..str.."\027[0m") end or id +local worker_color = use_color and function(str) return ("\027["..tostring(31 + ngx_worker_pid() % 5).."m"..str.."\027[0m") end or id -- Debug function local function dump(...) print(require("pl.pretty").write({...})) end -- luacheck: ignore 211 --- cache timers in "init", "init_worker" phases so we use only a single timer --- and do not run the risk of exhausting them for large sets --- see https://github.com/Kong/lua-resty-healthcheck/issues/40 --- Below we'll temporarily use a patched version of ngx.timer.at, until we're --- past the init and init_worker phases, after which we'll return to the regular --- ngx.timer.at implementation -local ngx_timer_at do - local callback_list = {} +local _M = {} - local function handler(premature) - if premature then - return - end +-- checker objects (weak) table +local hcs = setmetatable({}, { + __mode = "v", +}) - local list = callback_list - callback_list = {} +local active_check_timer +local last_cleanup_check - for _, args in ipairs(list) do - local ok, err = pcall(args[1], ngx_worker_exiting(), unpack(args, 2, args.n)) - if not ok then - ngx_log(ERR, "timer failure: ", err) - end - end - end +-- serialize a table to a string +local serialize = codec.encode - ngx_timer_at = function(...) - local phase = get_phase() - if phase ~= "init" and phase ~= "init_worker" then - -- we're past init/init_worker, so replace this temp function with the - -- real-deal again, so from here on we run regular timers. - ngx_timer_at = ngx.timer.at - return ngx.timer.at(...) - end - local n = #callback_list - callback_list[n+1] = { n = select("#", ...), ... } - if n == 0 then - -- first one, so schedule the actual timer - return ngx.timer.at(0, handler) - end - return true - end +-- deserialize a string to a table +local deserialize = codec.decode + +local function key_for(key_prefix, ip, port, hostname) + return string_format("%s:%s:%s%s", key_prefix, ip, port, hostname and ":" .. hostname or "") end +-- resty.lock timeout when yieldable +local LOCK_TIMEOUT = 5 + local run_locked do -- resty_lock is restricted to this scope in order to keep sensitive @@ -247,23 +273,31 @@ do timer = true, } - local function run_in_timer(premature, fn, ...) - if not premature then - fn(...) + local function run_in_timer(premature, self, key, fn, ...) + if premature then + return end - end - local function schedule(fn, ...) - return ngx_timer_at(0, run_in_timer, fn, ...) + local ok, err = run_locked(self, key, fn, ...) + if not ok then + self:log(ERR, "locked function for key '", key, "' failed in timer: ", err) + end end - -- timeout when yieldable - local timeout = 5 + local function schedule(self, key, fn, ...) + local ok, err = ngx.timer.at(0, run_in_timer, self, key, fn, ...) + if not ok then + return nil, "failed scheduling locked function for key '" .. key .. + "', " .. err + end + + return "scheduled" + end -- resty.lock consumes these options immediately, so this table can be reused local opts = { - exptime = 10, -- timeout after which lock is released anyway - timeout = timeout, -- max wait time to acquire lock + exptime = 10, -- timeout after which lock is released anyway + timeout = LOCK_TIMEOUT, -- max wait time to acquire lock } --- @@ -279,8 +313,7 @@ do -- attempt to sleep/yield -- 2. If acquiring the lock fails due to a timeout, `run_locked` -- (this function) is re-scheduled to run in a timer. In this case, - -- the function returns `"scheduled"` instead of the return value of - -- the locked function + -- the function returns `"scheduled"` -- -- @param self The checker object -- @param key the key/identifier to acquire a lock for @@ -302,7 +335,7 @@ do local yield = yieldable[get_phase()] if yield then - opts.timeout = timeout + opts.timeout = LOCK_TIMEOUT else -- if yielding is not possible in the current phase, use a zero timeout -- so that resty.lock will return `nil, "timeout"` immediately instead of @@ -321,12 +354,7 @@ do if not elapsed and err == "timeout" and not yield then -- yielding is not possible in the current phase, so retry in a timer - local ok, terr = schedule(run_locked, self, key, fn, ...) - if not ok then - return nil, terr - end - - return "scheduled" + return schedule(self, key, fn, ...) elseif not elapsed then return nil, "failed acquiring lock for '" .. key .. "', " .. err @@ -341,33 +369,53 @@ do end if not pok then - return nil, perr - else - return perr, res + return nil, "locked function threw an exception: " .. tostring(perr) end + + return perr, res end end +local deepcopy +do + local function _deepcopy(orig, copied) + -- prevent infinite loop when a field refers its parent + copied[orig] = true + -- If the array-like table contains nil in the middle, + -- the len might be smaller than the expected. + -- But it doesn't affect the correctness. + local len = #orig + local copy = table.new(len, table.nkeys(orig) - len) + for orig_key, orig_value in pairs(orig) do + if type(orig_value) == "table" and not copied[orig_value] then + copy[orig_key] = _deepcopy(orig_value, copied) + else + copy[orig_key] = orig_value + end + end -local _M = {} - + local mt = getmetatable(orig) + if mt ~= nil then + setmetatable(copy, mt) + end --- TODO: improve serialization speed --- serialize a table to a string -local function serialize(t) - return cjson.encode(t) -end + return copy + end --- deserialize a string to a table -local function deserialize(s) - return cjson.decode(s) -end + local copied_recorder = {} + function deepcopy(orig) + local orig_type = type(orig) + if orig_type ~= 'table' then + return orig + end -local function key_for(key_prefix, ip, port, hostname) - return string_format("%s:%s:%s%s", key_prefix, ip, port, hostname and ":" .. hostname or "") + local res = _deepcopy(orig, copied_recorder) + table.clear(copied_recorder) + return res + end end @@ -407,8 +455,8 @@ end --- Run the given function holding a lock on the target list. -- @param self The checker object -- @param fn The function to execute --- @return The results of the function; "scheduled" if the function was --- scheduled in a timer, or nil and an error message in case of failure +-- @return The results of the function; or nil and an error message +-- in case it fails locking. local function locking_target_list(self, fn) local ok, err = run_locked(self, self.TARGET_LIST_LOCK, with_target_list, self, fn) @@ -429,8 +477,6 @@ end --- Add a target to the healthchecker. -- When the ip + port + hostname combination already exists, it will simply -- return success (without updating `is_healthy` status). --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target to check. -- @param port the port to check against. -- @param hostname (optional) hostname to set as the host header in the HTTP @@ -439,10 +485,12 @@ end -- default is `true`. -- @param hostheader (optional) a value to use for the Host header on -- active healthchecks. +-- @param tbl_meta (optional) a lua table with custom info of business stuff -- @return `true` on success, or `nil + error` on failure. -function checker:add_target(ip, port, hostname, is_healthy, hostheader) +function checker:add_target(ip, port, hostname, is_healthy, hostheader, tbl_meta) ip = tostring(assert(ip, "no ip address provided")) port = assert(tonumber(port), "no port number provided") + hostname = hostname or ip if is_healthy == nil then is_healthy = true end @@ -450,13 +498,21 @@ function checker:add_target(ip, port, hostname, is_healthy, hostheader) local internal_health = is_healthy and "healthy" or "unhealthy" local ok, err = locking_target_list(self, function(target_list) + local found = false -- check whether we already have this target for _, target in ipairs(target_list) do - if target.ip == ip and target.port == port and target.hostname == hostname then - self:log(DEBUG, "adding an existing target: ", hostname or "", " ", ip, - ":", port, " (ignoring)") - return false + if target.ip == ip and target.port == port and target.hostname == (hostname) then + if target.purge_time == nil then + self:log(DEBUG, "adding an existing target: ", hostname or "", " ", ip, + ":", port, " (ignoring)") + return false + end + target.purge_time = nil + found = true + internal_health = self:get_target_status(ip, port, hostname) and + "healthy" or "unhealthy" + break end end @@ -470,12 +526,15 @@ function checker:add_target(ip, port, hostname, is_healthy, hostheader) end -- target does not exist, go add it - target_list[#target_list + 1] = { - ip = ip, - port = port, - hostname = hostname, - hostheader = hostheader, - } + if not found then + target_list[#target_list + 1] = { + ip = ip, + port = port, + hostname = hostname, + hostheader = hostheader, + meta = tbl_meta, + } + end target_list = serialize(target_list) ok, err = self.shm:set(self.TARGET_LIST, target_list) @@ -484,7 +543,9 @@ function checker:add_target(ip, port, hostname, is_healthy, hostheader) end -- raise event for our newly added target - self:raise_event(self.events[internal_health], ip, port, hostname) + if not found then + self:raise_event(self.events[internal_health], ip, port, hostname) + end return true end) @@ -518,8 +579,6 @@ end --- Remove a target from the healthchecker. -- The target not existing is not considered an error. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -566,8 +625,6 @@ end --- Clear all healthcheck data. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @return `true` on success, or `nil + error` on failure. function checker:clear() @@ -599,6 +656,32 @@ function checker:clear() end +--- Clear all healthcheck data after a period of time. +-- Useful for keeping target status between configuration reloads. +-- @param delay delay in seconds before purging target state. +-- @return `true` on success, or `nil + error` on failure. +function checker:delayed_clear(delay) + assert(tonumber(delay), "no delay provided") + + return locking_target_list(self, function(target_list) + local purge_time = ngx_now() + delay + + -- add purge time to all targets + for _, target in ipairs(target_list) do + target.purge_time = purge_time + end + + target_list = serialize(target_list) + local ok, err = self.shm:set(self.TARGET_LIST, target_list) + if not ok then + return nil, "failed to store target_list in shm: " .. err + end + + return true + end) +end + + --- Get the current status of the target. -- @param ip IP address of the target being checked. -- @param port the port being checked against. @@ -629,7 +712,7 @@ end -- @param port Target port -- @param hostname Target hostname -- @param fn The function to execute --- @return The results of the function; or "scheduled" in case it fails locking and +-- @return The results of the function; or true in case it fails locking and -- will retry asynchronously; or nil+err in case it fails to retry. local function locking_target(self, ip, port, hostname, fn) local key = key_for(self.TARGET_LOCK, ip, port, hostname) @@ -656,8 +739,6 @@ end -- Increment the healthy or unhealthy counter. If the threshold of occurrences -- is reached, it changes the status of the target in the shm and posts an -- event. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param self The checker object -- @param health_report "healthy" for the success counter that drives a target -- towards the healthy state; "unhealthy" for the failure counter. @@ -743,8 +824,6 @@ end -- If `unhealthy.tcp_failures` (for TCP failures) or `unhealthy.http_failures` -- is set to zero in the configuration, this function is a no-op -- and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -772,8 +851,6 @@ end -- required to make a target "rise". -- If `healthy.successes` is set to zero in the configuration, -- this function is a no-op and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -795,8 +872,6 @@ end -- or `unhealthy.http_failures` (fur unhealthy HTTP status codes) -- is set to zero in the configuration, this function is a no-op -- and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -830,8 +905,6 @@ end --- Report a failure on TCP level. -- If `unhealthy.tcp_failures` is set to zero in the configuration, -- this function is a no-op and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname hostname of the target being checked. @@ -855,8 +928,6 @@ end --- Report a timeout failure. -- If `unhealthy.timeouts` is set to zero in the configuration, -- this function is a no-op and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -872,8 +943,6 @@ end --- Sets the current status of all targets with the given hostname and port. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param hostname hostname being checked. -- @param port the port being checked against -- @param is_healthy boolean: `true` for healthy, `false` for unhealthy @@ -895,14 +964,12 @@ function checker:set_all_target_statuses_for_hostname(hostname, port, is_healthy end end - return all_ok, #errs > 0 and table_concat(errs, "; ") or nil + return all_ok, #errs > 0 and table.concat(errs, "; ") or nil end --- Sets the current status of the target. --- This will set the status and clear its counters. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. +-- This will immediately set the status and clear its counters. -- @param ip IP address of the target being checked -- @param port the port being checked against -- @param hostname (optional) hostname of the target being checked. @@ -1013,7 +1080,7 @@ function checker:run_single_check(ip, port, hostname, hostheader) end - local req_headers = self.checks.active.headers + local req_headers = self.checks.active.req_headers local headers if self.checks.active._headers_str then headers = self.checks.active._headers_str @@ -1022,22 +1089,22 @@ function checker:run_single_check(ip, port, hostname, hostheader) if headers_length > 0 then if is_array(req_headers) then self:log(WARN, "array headers is deprecated") - headers = table_concat(req_headers, "\r\n") + headers = table.concat(req_headers, "\r\n") else headers = new_tab(0, headers_length) local idx = 0 for key, values in pairs(req_headers) do - if type(values) == "table" then - for _, value in ipairs(values) do + if type(values) == "table" then + for _, value in ipairs(values) do + idx = idx + 1 + headers[idx] = key .. ": " .. tostring(value) + end + else idx = idx + 1 - headers[idx] = key .. ": " .. tostring(value) + headers[idx] = key .. ": " .. tostring(values) end - else - idx = idx + 1 - headers[idx] = key .. ": " .. tostring(values) - end end - headers = table_concat(headers, "\r\n") + headers = table.concat(headers, "\r\n") end if #headers > 0 then headers = headers .. "\r\n" @@ -1053,7 +1120,7 @@ function checker:run_single_check(ip, port, hostname, hostheader) local bytes bytes, err = sock:send(request) if not bytes then - self:log(ERR, "failed to send http request to '", hostname or "", " (", ip, ":", port, ")': ", err) + self:log(ERR, "failed to send http request to '", hostname, " (", ip, ":", port, ")': ", err) if err == "timeout" then sock:close() -- timeout errors do not close the socket. return self:report_timeout(ip, port, hostname, "active") @@ -1064,7 +1131,7 @@ function checker:run_single_check(ip, port, hostname, hostheader) local status_line status_line, err = sock:receive() if not status_line then - self:log(ERR, "failed to receive status line from '", hostname or "", " (",ip, ":", port, ")': ", err) + self:log(ERR, "failed to receive status line from '", hostname, " (",ip, ":", port, ")': ", err) if err == "timeout" then sock:close() -- timeout errors do not close the socket. return self:report_timeout(ip, port, hostname, "active") @@ -1079,13 +1146,13 @@ function checker:run_single_check(ip, port, hostname, hostheader) if from then status = tonumber(status_line:sub(from, to)) else - self:log(ERR, "bad status line from '", hostname or "", " (", ip, ":", port, ")': ", status_line) + self:log(ERR, "bad status line from '", hostname, " (", ip, ":", port, ")': ", status_line) -- note: 'status' will be reported as 'nil' end sock:close() - self:log(DEBUG, "Reporting '", hostname or "", " (", ip, ":", port, ")' (got HTTP ", status, ")") + self:log(DEBUG, "Reporting '", hostname, " (", ip, ":", port, ")' (got HTTP ", status, ")") return self:report_http_status(ip, port, hostname, status, "active") end @@ -1093,11 +1160,7 @@ end -- executes a work package (a list of checks) sequentially function checker:run_work_package(work_package) for _, work_item in ipairs(work_package) do - if ngx_worker_exiting() then - self:log(DEBUG, "worker exting, skip check") - break - end - self:log(DEBUG, "Checking ", work_item.hostname or "", " ", + self:log(DEBUG, "Checking ", work_item.hostname, " ", work_item.hostheader and "(host header: ".. work_item.hostheader .. ")" or "", work_item.ip, ":", work_item.port, " (currently ", work_item.debug_health, ")") @@ -1146,12 +1209,51 @@ end -- results of the checks. +-- @return `true` on success, or false if the lock was not acquired, or `nil + error` +-- in case of errors +local function get_periodic_lock(shm, key) + local my_pid = ngx_worker_pid() + local checker_pid = shm:get(key) + + if checker_pid == nil then + -- no worker is checking, try to acquire the lock + local ok, err = shm:add(key, my_pid, LOCK_PERIOD) + if not ok then + if err == "exists" then + -- another worker got the lock before + return false + end + ngx_log(ERR, "failed to add key '", key, "': ", err) + return nil, err + end + elseif checker_pid ~= my_pid then + -- another worker is checking + return false + end + + return true +end + + +-- touch the shm to refresh the valid period +local function renew_periodic_lock(shm, key) + local my_pid = ngx_worker_pid() + + local _, err = shm:set(key, my_pid, LOCK_PERIOD) + if err then + ngx_log(ERR, "failed to update key '", key, "': ", err) + end +end + + --- Active health check callback function. -- @param self the checker object this timer runs on -- @param health_mode either "healthy" or "unhealthy" to indicate what check local function checker_callback(self, health_mode) + if self.checker_callback_count then + self.checker_callback_count = self.checker_callback_count + 1 + end - -- create a list of targets to check, here we can still do this atomically local list_to_check = {} local targets, err = fetch_target_list(self) if not targets then @@ -1172,6 +1274,7 @@ local function checker_callback(self, health_mode) port = target.port, hostname = target.hostname, hostheader = target.hostheader, + meta = target.meta, debug_health = internal_health, } end @@ -1180,8 +1283,19 @@ local function checker_callback(self, health_mode) if not list_to_check[1] then self:log(DEBUG, "checking ", health_mode, " targets: nothing to do") else - self:log(DEBUG, "checking ", health_mode, " targets: #", #list_to_check) - self:active_check_targets(list_to_check) + local timer = resty_timer({ + interval = 0, + recurring = false, + immediate = false, + detached = true, + expire = function() + self:log(DEBUG, "checking ", health_mode, " targets: #", #list_to_check) + self:active_check_targets(list_to_check) + end, + }) + if timer == nil then + self:log(ERR, "failed to create timer to check ", health_mode) + end end end @@ -1193,7 +1307,7 @@ function checker:event_handler(event_name, ip, port, hostname) if event_name == self.events.remove then if target_found then -- remove hash part - self.targets[target_found.ip][target_found.port][target_found.hostname or target_found.ip] = nil + self.targets[target_found.ip][target_found.port][target_found.hostname] = nil if not next(self.targets[target_found.ip][target_found.port]) then -- no more hostnames on this port, so delete it self.targets[target_found.ip][target_found.port] = nil @@ -1211,7 +1325,7 @@ function checker:event_handler(event_name, ip, port, hostname) end end self:log(DEBUG, "event: target '", hostname or "", " (", ip, ":", port, - "' removed") + ")' removed") else self:log(WARN, "event: trying to remove an unknown target '", @@ -1225,10 +1339,10 @@ function checker:event_handler(event_name, ip, port, hostname) then if not target_found then -- it is a new target, must add it first - target_found = { ip = ip, port = port, hostname = hostname } - self.targets[ip] = self.targets[ip] or {} - self.targets[ip][port] = self.targets[ip][port] or {} - self.targets[ip][port][hostname or ip] = target_found + target_found = { ip = ip, port = port, hostname = hostname or ip } + self.targets[target_found.ip] = self.targets[target_found.ip] or {} + self.targets[target_found.ip][target_found.port] = self.targets[target_found.ip][target_found.port] or {} + self.targets[target_found.ip][target_found.port][target_found.hostname] = target_found self.targets[#self.targets + 1] = target_found self:log(DEBUG, "event: target added '", hostname or "", "(", ip, ":", port, ")'") end @@ -1243,7 +1357,7 @@ function checker:event_handler(event_name, ip, port, hostname) end self:log(DEBUG, "event: target status '", hostname or "", "(", ip, ":", - port, ")' from '", from, "' to '", to, "', ver: ", self.status_ver) + port, ")' from '", from, "' to '", to, "', ver: ", self.status_ver) end target_found.internal_health = event_name @@ -1266,17 +1380,14 @@ end -- Log a message specific to this checker -- @param level standard ngx log level constant function checker:log(level, ...) - return ngx_log(level, self.LOG_PREFIX, ...) + ngx_log(level, worker_color(self.LOG_PREFIX), ...) end -- Raises an event for a target status change. function checker:raise_event(event_name, ip, port, hostname) local target = { ip = ip, port = port, hostname = hostname } - local ok, err = worker_events.post(self.EVENT_SOURCE, event_name, target) - if not ok then - self:log(ERR, "failed to post event '", event_name, "' with: ", err) - end + worker_events.post(self.EVENT_SOURCE, event_name, target) end @@ -1285,15 +1396,10 @@ end -- after the current timers have expired they will be marked as stopped. -- @return `true` function checker:stop() - if self.active_healthy_timer then - self.active_healthy_timer:cancel() - self.active_healthy_timer = nil - end - if self.active_unhealthy_timer then - self.active_unhealthy_timer:cancel() - self.active_unhealthy_timer = nil - end - self:log(DEBUG, "timers stopped") + self.checks.active.healthy.active = false + self.checks.active.unhealthy.active = false + worker_events.unregister(self.ev_callback, self.EVENT_SOURCE) + self:log(DEBUG, "healthchecker stopped") return true end @@ -1301,34 +1407,21 @@ end --- Start the background health checks. -- @return `true`, or `nil + error`. function checker:start() - if self.active_healthy_timer or self.active_unhealthy_timer then - return nil, "cannot start, timers are still running" + if self.checks.active.healthy.interval > 0 then + self.checks.active.healthy.active = true + -- the first active check happens only after `interval` + self.checks.active.healthy.last_run = ngx_now() end - for _, health_mode in ipairs({ "healthy", "unhealthy" }) do - if self.checks.active[health_mode].interval > 0 then - local timer, err = resty_timer({ - interval = self.checks.active[health_mode].interval, - recurring = true, - immediate = true, - detached = false, - expire = checker_callback, - cancel = nil, - shm_name = self.shm_name, - key_name = self.PERIODIC_LOCK .. health_mode, - sub_interval = math.min(self.checks.active[health_mode].interval, 0.5), - }, self, health_mode) - if not timer then - return nil, "failed to create '" .. health_mode .. "' timer: " .. err - end - self["active_" .. health_mode .. "_timer"] = timer - end + if self.checks.active.unhealthy.interval > 0 then + self.checks.active.unhealthy.active = true + self.checks.active.unhealthy.last_run = ngx_now() end worker_events.unregister(self.ev_callback, self.EVENT_SOURCE) -- ensure we never double subscribe worker_events.register_weak(self.ev_callback, self.EVENT_SOURCE) - self:log(DEBUG, "timers started") + self:log(DEBUG, "active check flagged as active") return true end @@ -1344,49 +1437,7 @@ local MAXNUM = 2^31 - 1 local function fail(ctx, k, msg) ctx[#ctx + 1] = k - error(table_concat(ctx, ".") .. ": " .. msg, #ctx + 1) -end - - -local deepcopy -do - local function _deepcopy(orig, copied) - -- prevent infinite loop when a field refers its parent - copied[orig] = true - -- If the array-like table contains nil in the middle, - -- the len might be smaller than the expected. - -- But it doesn't affect the correctness. - local len = #orig - local copy = table.new(len, table.nkeys(orig) - len) - for orig_key, orig_value in pairs(orig) do - if type(orig_value) == "table" and not copied[orig_value] then - copy[orig_key] = _deepcopy(orig_value, copied) - else - copy[orig_key] = orig_value - end - end - - local mt = getmetatable(orig) - if mt ~= nil then - setmetatable(copy, mt) - end - - return copy - end - - - local copied_recorder = {} - - function deepcopy(orig) - local orig_type = type(orig) - if orig_type ~= 'table' then - return orig - end - - local res = _deepcopy(orig, copied_recorder) - table.clear(copied_recorder) - return res - end + error(table.concat(ctx, ".") .. ": " .. msg, #ctx + 1) end @@ -1432,6 +1483,7 @@ local defaults = { shm_name = NO_DEFAULT, type = NO_DEFAULT, status_ver = 0, + events_module = "resty.worker.events", checks = { active = { type = "http", @@ -1454,6 +1506,7 @@ local defaults = { timeouts = 3, http_failures = 5, }, + req_headers = {""}, }, passive = { type = "http", @@ -1501,9 +1554,6 @@ end -- -- *NOTE*: the returned `checker` object must be anchored, if not it will be -- removed by Lua's garbage collector and the healthchecks will cease to run. --- --- *NOTE*: in non-yieldable contexts, the initial loading of the target --- statusses will be executed async. -- @param opts table with checker options. Options are: -- -- * `name`: name of the health checker @@ -1540,11 +1590,22 @@ end -- @return checker object, or `nil + error` function _M.new(opts) - assert(worker_events.configured(), "please configure the " .. - "'lua-resty-worker-events' module before using 'lua-resty-healthcheck'") + opts = opts or {} + local active_type = (((opts or EMPTY).checks or EMPTY).active or EMPTY).type + local passive_type = (((opts or EMPTY).checks or EMPTY).passive or EMPTY).type local self = fill_in_settings(opts, defaults) + load_events_module(self) + + -- If using deprecated self.type, that takes precedence over + -- a default value. TODO: remove this in a future version + if self.type then + self.checks.active.type = active_type or self.type + self.checks.passive.type = passive_type or self.type + check_valid_type("type", self.type) + end + assert(self.checks.active.healthy.successes < 255, "checks.active.healthy.successes must be at most 254") assert(self.checks.active.unhealthy.tcp_failures < 255, "checks.active.unhealthy.tcp_failures must be at most 254") assert(self.checks.active.unhealthy.http_failures < 255, "checks.active.unhealthy.http_failures must be at most 254") @@ -1554,24 +1615,9 @@ function _M.new(opts) assert(self.checks.passive.unhealthy.http_failures < 255, "checks.passive.unhealthy.http_failures must be at most 254") assert(self.checks.passive.unhealthy.timeouts < 255, "checks.passive.unhealthy.timeouts must be at most 254") - -- since counter types are independent (tcp failure does not also increment http failure) - -- a TCP threshold of 0 is not allowed for enabled http checks. - -- It would make tcp failures go unnoticed because the http failure counter is not - -- incremented and a tcp threshold of 0 means disabled, and hence it would never trip. - -- See https://github.com/Kong/lua-resty-healthcheck/issues/30 - if self.checks.passive.type == "http" or self.checks.passive.type == "https" then - if self.checks.passive.unhealthy.http_failures > 0 then - assert(self.checks.passive.unhealthy.tcp_failures > 0, "self.checks.passive.unhealthy.tcp_failures must be >0 for http(s) checks with http_failures >0") - end - end - if self.checks.active.type == "http" or self.checks.active.type == "https" then - if self.checks.active.unhealthy.http_failures > 0 then - assert(self.checks.active.unhealthy.tcp_failures > 0, "self.checks.active.unhealthy.tcp_failures must be > 0 for http(s) checks with http_failures >0") - end - end - if opts.test then self.test_get_counter = test_get_counter + self.checker_callback_count = 0 end assert(self.name, "required option 'name' is missing") @@ -1600,7 +1646,7 @@ function _M.new(opts) end -- other properties - self.targets = {} -- list of targets, initially loaded, maintained by events + self.targets = nil -- list of targets, initially loaded, maintained by events self.events = nil -- hash table with supported events (prevent magic strings) self.ev_callback = nil -- callback closure per checker instance @@ -1622,10 +1668,10 @@ function _M.new(opts) self.TARGET_LIST = SHM_PREFIX .. self.name .. ":target_list" self.TARGET_LIST_LOCK = SHM_PREFIX .. self.name .. ":target_list_lock" self.TARGET_LOCK = SHM_PREFIX .. self.name .. ":target_lock" - self.PERIODIC_LOCK = SHM_PREFIX .. self.name .. ":period_lock:" + self.PERIODIC_LOCK = SHM_PREFIX .. ":period_lock:" -- prepare constants self.EVENT_SOURCE = EVENT_SOURCE_PREFIX .. " [" .. self.name .. "]" - self.LOG_PREFIX = worker_color(LOG_PREFIX .. "(" .. self.name .. ") ") + self.LOG_PREFIX = LOG_PREFIX .. "(" .. self.name .. ") " -- register for events, and directly after load initial target list -- order is important! @@ -1660,19 +1706,101 @@ function _M.new(opts) -- just a wrapper to be able to access `self` as a closure return self:event_handler(event, data.ip, data.port, data.hostname) end - worker_events.register_weak(self.ev_callback, self.EVENT_SOURCE) -- handle events to sync up in case there was a change by another worker - worker_events.poll() + worker_events:poll() end - -- start timers + -- turn on active health check local ok, err = self:start() if not ok then self:stop() return nil, err end + -- if active checker is not running, start it + if active_check_timer == nil then + + self:log(DEBUG, "worker ", ngx_worker_id(), " (pid: ", ngx_worker_pid(), ") ", + "starting active check timer") + local shm, key = self.shm, self.PERIODIC_LOCK + last_cleanup_check = ngx_now() + active_check_timer, err = resty_timer({ + recurring = true, + interval = CHECK_INTERVAL, + jitter = CHECK_JITTER, + detached = false, + expire = function() + + if get_periodic_lock(shm, key) then + active_check_timer.interval = CHECK_INTERVAL + renew_periodic_lock(shm, key) + else + active_check_timer.interval = CHECK_INTERVAL * 10 + return + end + + local cur_time = ngx_now() + for _, checker_obj in pairs(hcs) do + + if (last_cleanup_check + CLEANUP_INTERVAL) < cur_time then + -- clear targets marked for delayed removal + locking_target_list(checker_obj, function(target_list) + local removed_targets = {} + local index = 1 + while index <= #target_list do + local target = target_list[index] + if target.purge_time and target.purge_time <= cur_time then + table_insert(removed_targets, target) + table_remove(target_list, index) + else + index = index + 1 + end + end + + if #removed_targets > 0 then + target_list = serialize(target_list) + + local ok, err = shm:set(checker_obj.TARGET_LIST, target_list) + if not ok then + return nil, "failed to store target_list in shm: " .. err + end + + for _, target in ipairs(removed_targets) do + clear_target_data_from_shm(checker_obj, target.ip, target.port, target.hostname) + checker_obj:raise_event(checker_obj.events.remove, target.ip, target.port, target.hostname) + end + end + end) + + last_cleanup_check = cur_time + end + + if checker_obj.checks.active.healthy.active and + (checker_obj.checks.active.healthy.last_run + + checker_obj.checks.active.healthy.interval <= cur_time) + then + checker_obj.checks.active.healthy.last_run = cur_time + checker_callback(checker_obj, "healthy") + end + + if checker_obj.checks.active.unhealthy.active and + (checker_obj.checks.active.unhealthy.last_run + + checker_obj.checks.active.unhealthy.interval <= cur_time) + then + checker_obj.checks.active.unhealthy.last_run = cur_time + checker_callback(checker_obj, "unhealthy") + end + end + end, + }) + if not active_check_timer then + self:log(ERR, "Could not start active check timer: ", err) + end + end + + table.insert(hcs, self) + -- TODO: push entire config in debug level logs self:log(DEBUG, "Healthchecker started!") return self diff --git a/lua-resty-healthcheck-2.0.0-1.src.rock b/lua-resty-healthcheck-2.0.0-1.src.rock deleted file mode 100644 index 56053445b27b7ea7e56da06ace5fffe2363fa5b9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 94201 zcmb5VW00=Rwyyb>Im@mWTjG-V60*V3v03ZR3ZfuHmu_v((KmfohFaSUXzyVmh7}7eLI5~UJnwuC}JDVGu zn;2WsGSM;6G0-y7IocUpIoX>StExZ&K)N3-^#5zPxWfQ|K%f2*`Il};BBT5u1478l zccijuW|MAweA!UwkpGaDOWTmR(NcYk4_pPpr|YyFr6}-T|6tCJ+sw@KDsCNlTgU{7 z4tS)u4qR|(m~H;G0xcP|B6pU&^|@-dR!ya-Go`cUV++6^MdtcB(=Jo&nXuXAWaN0) zdgoKFdxC4^TP9`VLrC#5@R=u$`Z9e4agZ^u^JiJy-4zIrckNF!xU0f>y!W91hfpzt zMLh;4=dedEk(iAHly&PQm}c)f2riJJGZiK^{?otRy2&wGvBk3=Jp*8FYHxhCqK40ZclE1TWjoXGjA zYGEAQe7xM8p7v;cb!U~;C!fBy?!(|;e?h%k($%O`bMyK;+I|mMF+Y%JLbWEz5^Iri zGxkYBm}ry#R*#r>My*9^t~YY7Ku~*HRxysyN7qB))3#vO=*GPBBFrhXmsXAVD_EsE zF4P13wj8~Y^D38)#b@q5oorm$v8O?AERRAh>sRO5i5BmZN*D7xptt2H%0Dn2PG* z?GAK~*t7(P3e>HAg#*Sjj7bjEenhgkT&E3133;}JC$43uT3r=#^k^lE_7kk#R61|Z zBB_arQ`OmkIgSz^T*w?`?p*2Mc3Z^!R5yC-*hI)t0(=p+Ni3)b%dZgpS^MPL^vK;- zqKN6eY5fgWU|-R33HOQHW^7HtZ)v6VTE96C*8~RztT@N%trY5CXovMvkET@TA)&*_ zo1f<3EF+PnXDo!NG>z{BlK?RDuO)|=o`aEl?>oIxfIG3Rq@o?RKYcLvV|iYHqPX0X zo=>-jB&IF#zaC&T06iszBo8k0r7=N~Kqob{E+5ZiV{p0-As=@j8+|`1sz<%5I~K5c(Dm!{ted1oP{ErRm#el&rOF;Y+BY0;glaQ#Go-z4=gcuJ0EolN9}AFS zu@pq6ZiEtK;hhT!hQ`L`4UD+`*MrT&P~#{$NP_S}dUc8=8Kq9f&IO_~`~4Fo4n%G# z>5+b@{;lhYA+!5NqN^55tyyXGQ>D8Jh0+|IORTcW5-uKMi=1f_yb$c5IH|e=R&E6# z(vnJw8bMTC9^w|p?liJ;83L60>{u2WW`2#>;c2Q_SelpzuS zdzGyT+SgC&jYv8YNO7lcS=Qf%-4?*j;|{tlI47E!PCRgjtcBPmb9B9CnShKc zVzQ2R;8WC>&YHe&aer^Jq&KynVXB9=@&P$nC&3!;zYM@YKMjeorigt;TX7NL8=2PD zH$XcSQy*6_|I=UcoA9G^e@zLco_Yu5Tbc`q=o}oKU4T+hORa$j)e(ZdZ21TQj@3;? zLrAJ3;Ty`8UyaX{I>G8ljGhT2LqyQjrU~1A)6rSw$YM10c79c5*HnF0nP`8CfV#_L4$@?&z_F_%cjE1V6!WMvN$M$r|iAm1t(^?UE%V?$w3I zb*yhjDRbVXemFXSGgujT@L~y&3bh9@ zyQ1P1qf^AZC4Ju&gnTfX*5`SNhE47i|JUAKQyVOg1)&o{nJ7L_#cAhfFSKAI5H;SI zcjOLgo|NNe^j8%awNUrz(2&NR=A9ynp-C+Lfb@2x`0< z+o3V&(nU^7c1=@TAa8b#0@qSM_Z9sbY29FfQXdiM+dn}>O@1;KJs89x?4pvkY=*?H zB74Uut_HH%mM)d`o&Q+~E*{#Li_2tITTRfG^jn7D1p6gQrH-hzWu!grHv^BDh1j9| zCngWQDDjK!A_!iyqBb3{L4CgWKB#!vD^8!Htif=OU2{@46Xyv+Eb8YRk}?>h%y4M} z28FV@m``c281Yd$y+?qmgG(QZ=uL=50v$pNbPsGldNs^^oqQDrbWcVUcZz)Iagz%* zJk(mOBBZ?nge`M_k(mZwJE#S*H5>4lA`W3uzz=&pk^F`!-JU#wW`Y(WlLU0N8s)R2 z4c2KyTMxvgC-V7aNY`OfEP{phBKp#SFUV%aUO1!{=TpoU;dc&>ijohqrWeH7f7nKo4LI5K|DytA9?WN3P9tfSXmR;F~}rLY@NFGzn(a=lKhP zXFqV=48!&Rm`;Dn9tLQiaU2HW5wX^eDU_bSZW0Y~$(TOWTaQ%?_-S9d-G@&VgD!Q4 z^pGG!xAcIBADR{VLXTprmULKJgV^MV$NIKx3sfa70;={TjuL>q59@_shz-}?@q>4M-sBdmbnbu%`m*Nu5#L$j2PE+6Ut}I zm5ZCo%Qbm;<{}Rl+S8>-;9I9lJtPB?HYy}hW2>;u#zjgPW66v&R?Bf*fRV$zrT@nE zLhb6Y)(fzs<8zf2#%7n|85SuSe@G)W!X$F+aNsnKS*NGO)aT;pl9tACbyViNr>$TIL9|#2;cpm|c z8BZ5eXgYw>&6PMf^ab5gMt^`c)Y`!o4fB^=cd}1kM$Zo<*Bi!hnD8ukjBs%=+fs$T z*p*#eT|W0MRI0NhC5rFD{u7HOh#L5|;s8)Lwb4RA(t6UH^;qdHyZ*1~WI)Se-pRfc zw`I=ld zKmD2-|7-IeAm5?0GHLR;U}{x1GFeO!CnZe#bAT*eMkE%I7j(VB)W^}+#|3azk7v71 zcc*sEx98Ww(|Hd9zwS==7h;pnXPtL%*H4Df>DIlkOZV{#1e)H+)jk}~^WN9K+@^0^ zXSb%lQ}fZr0Jt5Rzz-hVlJkWCJM$*0+#nc_nXDb_yc3uGuep#og-_CS1wgkKy z;NDBKb;!=aa@^R>!C5qr`GokG&XybI2Kpr(H2XGgjF?9w<1=OlUx5+8(~sEQghJvI zHV$}*H?#w(AUy%Qr>>vgz6_pn6A*AFfsJjaO1F^I!PO)grH|#ABGt;(A zd04vKZe)0^y?X3Hpib7(^$W5*T;IS#>vz5VWCYIs@W>S<_M#(8k}S_lr1k2U z`FLM&C~g&3w@ZmE2*!{JfAU3*M6-$Ma`@M(t81vwbc2ic(%ahYB^>eZF8=?Ucz2`1X7CxUvKEZTrL(%x$@BwpakxU@Orq%CR|TUh>|ls)d2RuXl!uwWv>?eZM#dgl)KBr2-RGDGDPdJ(ATZUbJYkB(j_h#rX{L3MgQUwLNTx8B&2^BKid ziaCf}zD$J|m^d-lLIhHUAzx;?9pCZVJ}iB{kW@-Z^#^A2_VK=Rm{F2q7Ug#A2c&~O z1O8yV{z&wK+>td_r(~8P1ZJE68iK{C-iSr87l6vAaUQjMezTC=zqo$Xc|iXG`kzmD z+CyPo>woYJ4LATm{%@b~u7=iTjuy7R>6~2b?f*eQPXFmKKdjO(1;U8X{Xz|__o$@8 zgNitmq%;@gjyv+ZMK)_H*t$Y@*pGdy!D@x7sri(VKIvtf#Lh7vZ!V_Lq-a7DGGSsK zx^jR^DMW-dd++a45Z4v{i0*bDasLnBDf!1x zynkz^tf7^OsfD%4fATOSUe+p@0U>lIM?XOrt~B4NRid7C5?n-dYo0fvlbRG^e}8oT z`-3b~ZGCdukbrq+>O<~6(2TV9+*mo;n|zx%dlX33&ldH0MGk(8!w0BJ_8uo;XH`G~ z-hsGbcau{SC<4IzEwH#VKmBaTS}MI>8K9L~Y3-Q%mpM z!7>FHGSVj(SJ(*eH`8@XtR$fauzd(v0K|DaZS*TTaYnjT48(C$HfiW&$!l0^&GNmm z!R8h|xYgJhnnQP7S=!HsLFl{ixjc@%QdqK9q)yoPGNL{x&^p++6o$(_{BPh_@c+4z zneu3FLI14e!asxX->xLxKScCj5Y+KMQcu~bez*YwSpPk4c|i_4obET8;-nhlej{^q z+2|YFG+Jf3qayyowV%97c$r!ibx7MIRR;1?lF zgVG4gtV_#xS8Y@gFT=HY%+ zIU0QRzMx@G2wkg5NvAJ!op@R*8Y0Y+xY|`OIXgd!10^H|(up_$=GqSz@@@x0N`*3- zsm$zlf7i((q}_@>gxO~WoMVG_{DffG+5Y6IWXbuY;D2W=^@%Pf6@2C+SM1JYb<=Nt*F?< zr;}hk$=%_8YC#TSvdyCNr`^BPc;=Xi{Vw%urx6)4`fx_Wbg&B5IPpT8X`dg+{&Ig4 z{^>0uY$0`L5>i?%Pas1rY7P8jiZ>(bJ*>p3s6)HPqJF7N*g!9ob#yk1)|8@@q02}* z-w4T9A~X}ifS5(i$Qw0J5(iPgjL0mIKpdP~e^PMu?wm+IsaWflIc7zc)ChTY%!Eu; z(Mzk6n_M5@=BMeyF882`7b6#T+jHeKf)30le+cUporl9rEFL3{OumlK*(U(=f z-HrXK33VG)C=vN3adeu@ZRqDUPEPP+{@X7Pjx!z(OijvX3Yk&wHhN|VI5SG@HBgm) zwKP(V9!vGA(uS&cvN(Y!rFYC4E#_SQiAwnQgyRQLv9u0{sb;ogwV>x)%)V&wggCFi z;o+LKNFp{1$w|**)<`J*gV4yul^U&Q<}XwnUrxWLy3wT@&a*Cmo4dCYeo-15>a@f{&ElTUpnQU*e2#Lt0eOtB%$-?#^w19}aVX%^+6 ztjE%gL1vIrGqJ18$OrT^?^9zTW6}?o_I-LL()x3*Zd7PRJkmg#yYK*H0gA%_E#0%1 za9OEAcX!z*n_t!;gGm8*$SssRQRFd0rlZv(-_%tin^q$Q&Kcu*(g}2iIEJ7{Dv`mm zsS*h)ms@5dNg{?jW9kXigAI<1h!aqTj0u<-FNoTQ>va|x!>?rpanwN7hguA=ynm&d zGPuJ#kfi?>?x!Zd zZ^&$l5Wfa!;hy5wwGu@sR{EE-q-Y@{iwNV5mI%o%Jc#0#7uWcgqsYMdDa#ixNJJ^y zfk&E~bOxB5zI60Q!O>ippw0B>AJO+g$En5|=T%bA%PD}*?r^fhHjm8@u=vAh;OJW= zmose;JU&?A#Lt%RD+#+X0Pk!0L#@LV@GB+}pLc{(dXxR_%7zsH3jKjm!cguETz(SE*}IRY!3^2ha4#aW#V(C6W0}b zdbtu4G3g`~od$PHyqo?TG@3FtsA(?cKV0ZYk%}D%&N=1 z*THXaS8Ee~ZI{7rF@L?)_5K0a4D+7nW;DVM6{llzB~KH2L9K(_w`!$#I?AUv&hS?D zY7~bc9%B%6Hu!hJ=x%ZhZwM;;MzSdcs#)rS<;11YJ+N@1tHDWg{t>v2JQW*=Iy+1v zEYJkr?*_wXt&hbc5+45pS>?6RwS1@XJ;Y-Gf!b1X4qoS0EDTlXrL4O*t1?0r8J?~+ z#^Wpyi-+PLD*6^eXenM`S-$1p5FUokf5b~cM@4^4*F|3S-VEww1o#t*Y~{oGI>h)s z>nu9sbRe)S`drjDKuU@v&Jf|75?Jsp3h36;lAKkvh|rlzMt=;l4NYq_VJQV;%#2D@ zGKP9x1?JBh?WdZzMl*S;%#Q5zggyLl|G0DZygnYJt(@@TGu1<;YV zy+he$>)U3vD$-X^glqRu^jVS-{j~#+R*TE_iPQqbY~ysem>Ymez-GEhcNzqBXL5d+ zuPD;Mi0>qL1(+l05tgp4;OT9W@4oNX@81i~=A{+kE117uw{-#%xM(TcfU$nnm}=`| zf@AWjvtrP4y*7ttaRS`!LrY9sl$73veFZA%w25wAfpNfV740LWgNJCJG>VGZB;dji zIA=_YDS6G}q+R`k8pJq7V%yXX!dRW<YWzh5Uc*yFh3Y(M+w}=1RpY8qep~^)+;$(BWs#kh>DRkA zdz)`2emAH{XL`ah3D{*=+j_#bvj(M@tSj}Rj#f2W z7ix2p^o!=G>p}gc0Co52e`_3pY>rP4TQS-y8<$XU5X7zy3+br08)G9U0kw0gek+QdA zeE5xfA_lj`N)MX|69=T4sBHjg2>w~)MLdr(`@IVX#eQ>ke9e8Y8=BchxlxX?bkV4w zQeFI|5Cylhz65X<%RY?PIH)V_kH0-O**nd{X3l=g==u1^@a$-r>~BMzwLESdb6Zw3 zywCLf#6am^OSbo4&5@N>S>FDK2!$ll_K}8NebTA6Fsjn&@Tt5EI?5F^m+E*1Rk4$( zERu>0mN|O7wjF4;NIW=gv>Qeu*xC5}y15J6$56J?CJdp{kK#5cQQFh35};Ra@47S& z)cf=C*o{;B9n>Fd@|)c4ji-NVr>>j-&WZKK8jp%`dM($SU8r zPCP01N2~GB3hXeAh_2R4cwSe9pC@%#3=*)Km`v88uXGhlX@0dJ@7FxOA)7~nNDLxK z`ovFEmUZ@6wpuIio|Jsx`V6_OEa+?hk(00Vo^NSx9wEMk`C@C9=^qGDYOhFnu!^iX z*z^cP)9e(Oz7n(~>L6~f=JQ&%;;+NodTMj+xIq>q`2L|Uv(*X&3#}xO zxt2`?5Mh}Ejv>{Flv@nn`6L3ril%RR7>yhr9XfrPe=CFw+*BOGce$vNKJ zlF}MjA6Tg>I879wXTjW{z)JIAVLn}Klh%(r=k86G$H8JI-VwtFv3f^F2o1iNF z;0Yfb;8(7c#`!;>2xFdC1$oW5lfbL4WJoestV`XMaD(|7- z;4}kqxZx_rH32#<=snZd&#YXtmf!`kQWf;#>r}30){z==`6q&^9*5&RNEWAvSrvaD zW3on5B{(6n5t5CD z)&p4gM2!bkQYr3_W&!uEEA69LL(bc_{S{#duE8x7+EPQiw!C7YN=66%lY5eo1p8Gc zLdyhy_n){cW(95UOM4m1UcQ#ZOqN3IwN}B6ZLu~Vp$+n;ucF-zb%6r0XLod@t3lH3651mVt7CF@SLuygXnB)mc9jg0VLeZs|}sTyPU zZzhlkX?fe;jy1!-mb()3E&Bqd2z)hH$?QO_Xv)KgH)xKec}QSAQ>k?thZaEuPg1}{ z?1CZ6oFbYo!f{rac$X7LpdZbW%d{p*DhT-_9%}fD*K7v z&>)3NAj&b7-rvuz$OU|11)fLsx%%C+9vC(XZ_XpqU3Z&9as^&yKsw}O*K1>8HgM@T z!qL1q?3RIuKDZp7F(N2G7~gF34p$f+aXs6Kg{MeEa+=INZAzM1_keoCH1}U@EjNxK zuN;fF99q|(d6Sm2 z#C_%_)MG#R7<9LAFScOu_pZu>8XdI z#|Tolck9`b63e z`l4J#7YPo+wDJ^-T-f+lrq$%sy+5k3w~vHq7gXfZkHnle39DEQph-*fEjtkqVUjaF zk2Yq|iCEK5Il(~Un=y6jjx*Pt0JOMQ&N$HanqHs=AA&FCQ*AQ`NVH49tK@g~~0dZmPxmh9^_zm5{$y>3#-PEgVwDRL(a+3z7Y?b&d0Ao(eL0H4W`?j za-0g!d4ZDXfy&pRm}{k8^8c-E9;{DYj8bTo8!Gn&W4^e#d#Ccp{)q*zmf=NtK+H8x ze+Tkw-R4qDEv^pw?0&Y=OR)9E+-0<*=0Ua8wh{T{5CWpRvet6xNiVVLKHW9xNUCNL zZr+*i-KWEHZMHJ1DR*z3ZfSiB=;gGzJ#z~{_hJ#(-CUBp)Jc-Smo448-~Nz%?;{=f zmN4TX1~J(hkhVRKy3Yf~(JS4^BNZxA`7imT7VY#U{kum{`{1z0luMToTF#6-xBnv?~Uf$~@vmdvTxx1PA z?knZQdGG67yaRp5_fEZdux@wT{6-~qU`rJPnXSg&($>d{n81Fj@iWtG@Wt+)18YzI z+30p__vV?7Q3ZDV6rY$QsuW>21G(e#)r(@HncE#6ckgKQ>*QyFpU;%Ohx-ltd(`?0 zb%HjdT1NFpCE&yQhc;-T;0qn=8~=a8yT(JD%?~gDpa~5CApXxJ%K9J3MQ8km}_X$ff3ogO#@Y<#ZbH!kId#YKqG=$|Fa1p7W=Xa^KHzN5m1l8DfqaG1DJC@N- z2lqkuShtINI2Q>}d2N?|*qHf}qF~-_IBOD;U|a6MG9_p=h<~p9Zf(^1sHgTch?KfS zl%0dPJ4b~GE)h_w*|$X+X>9|XDNUY1TeCRKmkbrtB0>NLi3kpVoZu*2Lhw=5;a$TgJe8pW?b4JdKc?#87OHIsmkz2JWxb z5{!RN;r&63wt*b(G zIHV&HmW+wFvuvc4+yfud{@w72r%FVeQa3on91w|YAT1*S=B*_!ZlK*0)=tD*PhKJ& zWk)?6j+Z^!?2O5t(|1VWkkxdsEn17cK|SS#^mNM9ds&LK7I%H3!8=~@TXHGsvZmgw zefh>gXreUSrN?&YNy+qkD814Jo_afAB*O0C=a95o_-l zVIuoWbBX%(E$OUEt>Xu;tk+Dm8dDwb=h1^I^yUeLT`5{twvo4(y>|~ffYoQ%V_cVn zOXb5o4I}b7Q?RNVILizGDhh#LX%FdBMEHJ`#6f4Zy8MGReLF*@Ps>VZHOi{G#`C8t?AhS-^uSbLRzbdnptf!U zRTp*twoOxr%5IN3GOv_oe&IP;^O=GV6^kP5@q#AS8Nk)(EDv2OsJkKyx4T2ue0pj} zIX{R8%8!YQa+nJpm5hz}<3x-bw6#8)`(XPlm*aLjlZ?5ss*<_E@}kBV$UNA_Z+pGH zPy=K~eXNS7pX1yV`f)1nF)RybSA&J-DBq9)a$;*!rN=*I+{p7@H4?}XZ4~10@$@JL z$y>^jrMh_m3@2;fdnkYE3*lt+8vusYLpRhs-~+?jxa89`Z`d-w=V0maH0>hWEX5GF zQfyR`Vu61(Xn6{FWA1E2xj+qs|EF!DT(AWR5(bvu(6QP-y zPQbcAQeqsdPmzfJ3doUSg_Q*N^$4Sln9b7%Fd!xFsIqB zP^ndd=SCZU0+BEyXhkbXQigtf#g$TXR%u(n!)X%SW_Vwuj%TK~>D7O9dP7E$l+X|*?an|TGXt`wzp!T8--9e;z2f#Yia^ z(HV5;AZ;aWv8QnZMs7F2Bo}0ubW2j2L2(kX+Jf!uc%qgclp0+aGY(}o0}NpH%ase- z2sen> z;$8(33I5W~pp_@p?UUBFofi#JCQR1#PA)s?p2?|v zsK6&W3!-r!8Tl!*n0P zPO!Fx3ODY=Sme40f}}zM*2A?Zs7PDN=WH+_(Zr=_br#YrN;WRxqvcwv=}N7YBm=xb zl*|u~K`&0!@`!Sh3@O!gJ~w0smE5eF*4h|v|1jiD`-0hl)2XD`CbrgkQBu{=?96#D zC_cg+pFM79*?F#*Hx@b5Sqp2oo!_k~xx!Y8dAHma40uP*Y+4t%TD|lxdX=GyOHXYp z(6t8jkyBb{^G*koV_WUOqle?x85fH+FUQsGfyHhzsvI$D(AmWi$}XJDl^Vz0S;xs` zuu^Ud$jVAu<*H~&#WLiogCg6Idqi=xSsTIIxGB8IM1EGY^9p{cJJ} zm*8=`nGC!f8dctmNQjdk`iCe6k0Cf7ho(V7zz`VI@aSyfB))a;!V(D+iEFUoX#5cK zAh)r-%k8Q_a7nat~OX*VD^g31oMUKV04nY83h`2mR_B-pNSP+DBO4B zW)?OT7VV6Us`N@XoKtO203jr3)e3;nx?#{!z1~yh@REvr7~etalJEaO@k$IrZ7!@~ zH=l(&uvexP#|UM+xgKqz@JZtIu9-5!?y;F zmgE@GXj>jad^_}P~Rz!zqd?12z-N6l!grKSt@KL1ovdF>e_;O@R^xBqIwVQV>hfpr)+45Hk?WKj4fgM!2MZ>jj9pF^ zZq6K8dHt@yvaeu|VvpJKt7fOw*q3>pDD><9J8u7Xn~IMDSdY~|Z7L}e008spAx~k$h~oX|$O5H1 zqbIbxBLVVBZwXh`I1trG^5*&a)(@Ymvbo&0z)#Zzc=7=cEFRpJmpP{0Y_sR3+ z@aTgur9b($={?U;IQ(sg)8T5Lp?q0`?Le_Ma}W>)iPiL(~rsz`M&9nZrH zb*nhBl<0)pr*!`&i4dxoiQ<4kjp{E}j6-+K3=~K6cvmH%meg;M9ZrofE-6sPoZy7n zBrdHfp_T4D%VVFy;4NXC*kGN+Vh>ny|95uFvpo z%5uI7QiNEzwwpnC9)MYpapR%4hWQknts4Q>=tX=tKelI{sAf$PF<4|m401S!SY54T z#L?izWctfXH21caaGWhYdtv7woUnrfroM!IDoj`|Zrpx=jXpLsNSwz+;Sz-rg{iwB1>Yp&#;J?uqL%FJdUvSIaj+G8+If`J%g8}$ zXH(Qd`Xz)|0ab^olzZd8dr-ZB?1-ksP3vQ`!G7lZxS7yfnbF6nQoaSE1JRN^+|XMt z{kIUb&2+OQBKf)TJ^a&^Sxuy0rYo#%!s)758obua8>me%gYsA*qgQHpk>Y>r{$6Ox zep_k%E>rKbcTI`fp~|gM2&CX+7(xaUWVG~=02 z?&r&>Eg`5L6D90;I@L0)-Q9sbkqJfQWdujc6!) z2X2iB9~&}5$`I$gN>i&26`fXBQ;2P!;mNr15Pp7f{r-d!!#lje63LM!&SK-B*DtKh zMK?csO~buU)mu2szOqoPDb}EIx$=$M8-6J29pslZIPt(?>(bEwkQ?Gr?`pnhr6PuY z6@B%7saQd+=&`2tq>iV=?5e#r8%ZRTN!wQk(^sg{`J6D3#;6WDK7LqebCw0!uJpR< z76@rpj7~i^Q^Hq7DfQy z;RMl?dLX^y5)DB4A?+uzGQGe8@N(~sADz&b>6aHr zYC3nnTHHck#9tdVm+d|2qN?InxEF8H_6~{53J*oDL%3%)ye;BdRp+NpPvSUxGY-Py z(64EQgHUonu-9APs*J_|-d0ogm7qYTxRWXOCGjAj(#@{t_dyuEn6z)47{(8~JN=1m z%56@dle!-!)9J+h_N9mQEd7&5AbOI^hk(kavR&L4yOVR|8&qxCGkERA+%I0|+5}i5 z+VKt!;$!Mfw>aD~rRmX;h<-z^HDPn_`s0p57GzTu>4;x!zf8P)NbJl(Ayxfe$1&`k z;wTK>G&F$!W-WL)W&nHXRu)v7lAZ^GZr)QSXiscOl}pm5E+VT9*8`S%tO82rahlcEr}s2`p*zzLti1&eD{JwnmsivJHmR zo%$-@DGSb?sSb%VbfZ%WcYQY>)#DYk-;hQsY-jdYJaSnWgnh;iHLuNj(Qr( zTt#?jeSX>pFFJ1*CHEyH}rCw?EoVAW{v5J8^PE z701&*cF^;tcW{&v8t}Ws!s!SRNrMgDer;LLI^f|z_yWl}Jjm7|gl?cK^pCe4nN_F6 zJn!nA^_T+7 zA$zTwpht|^Y&gPLCo>6k&7zq>j+40w4&arhx6En1zR15Y54Fi?3POcNoa1%s7rN1qrSR zR>N2i@-%vCrY3y^WPrkUCjy*2^6Z zzoEE9?l-YR*-%*KKucu4}9S4D|$SXBh>mk$V!pS zSTLsHsc{hrwMseNf{w}pEBCbZ6cw?$BljooR4Bw2_|*st!Kp2R9x)Ot_i{xYl>eEeRPnVt# z8`Y*_#Q;Bh-tr5}ilEwFm;$vuol+A%oe81#gSY4a5S`Ad8LvuSf#07Q;!kk1DPTshyE37I1m zD0en3W6z&33I_KqzBtkTrJq*vGmwLi6<0Cy4pLQSkJE$CzKpn($}35zP?u;m{%>XO zHr}f?C&YEL$<}tvt&KMJ8!^UoaelKv&Iw(xC(QyGM$zTP-6DgLvfH!Fv)kyLC{Yu7 z=$MxWRk&30tMxy_2v^|KC&?h0K7?*U;@*rX`c;{a2+UCReV4I|pX2kkW1(1gb|I!D z)_F-hFe!}$qv@>=)vUNwFHQZwO&!4G6 z2~D}tFXKAb7IZnsDSSz5y;&yuFzf3x_r)vh{^c3nM@T6_RQ7&b5<|!8LngU6#tLza zqOs$cmds=p9!9L2y43M}^87t-rxp1tP4`NNP>fQC7=FK9C#~Re<^{ie0mas$xMvvN zD2ewn6>^RXI5Ui(Oy|;yvP9O`qwzGn{(fVR^db29)XrtNVuRXE6C~qz- zolUHqksCaGOmiDTuFwmqT3T)#Gs0@>VxXK-??enzvulK2&Gohoo%FNrU+8SLA5yV? zwARPG$JJT$qsOH;Y(!8}*gAVU32cWgm7O+r?K@Q)!`eSst>AXM$_X)2L<6a1EC@@V z9(z)~)@rlU)erc*vvV_oURQ7Dw3;zkKa|*-Uf4shE6-|0x=9W$j|1t z+`a-_dZC-EoEhT}wRhrl{H(54BiD08ZJKYo8dd03Coc;dH%T(9X)jq|Yv~q|5+vx5 z5HH?Cdk^bFJW* z!r=-Hg!m;R;&kUc%>Agn5EKPHvCF4luQ*XV2ntF!M2B211tJagN1U)6{7LrP{S7HI zBzK-UW=c*g#h6^auk}x>w4u(=GWA%lHa1bRrApJDmLK~WGer#1zns~g0-63V%HF9> z6rkDCEZeqgmu=g&ZQHhO+qP}nw#{9f-Q9C@p6UM1^dHEJyjT&ra>W}nfW0*Rl*~EN zxKMBYR?raz6{5Nde0jik;Pc1NEH3#Rxh+u%xu;tX3qjT^>*Z=&11%>=LfL+%?w7W> zS!xb%;L3Z%Hi%kIW6Pk2g1wzoh4896i9NVvS(3Lq?i>C+RBSpVF4*!ln*FS)imZ3| zc4wAVm~UwEHJElm=lDDX%tN!-;mp)Md?~cKy=^kjQVzI_MOY!=u6{4qY_Brve&N2h zfS>75ok2zLl?62*v!Ri0)7O{bV<0YUrl@Skc0n<9h7WqS2EFDc-vxhp(LRU$Fu}4i z9*6a*|GY@CqOiCUrP#hmSi}~XB%rHN$L;n|9R9o119ETFpPAlg;bUJG01&MU0KoD8?_K}@wM_rp6gu~of3DAW2UeUbkWLQlHRD5gvm20DlC#NLSkqy721 z9GZIg_3buWE?tJ`OF`~ zfJPfVJ{vl)Y=Kgn8R7=fbS6JgS1&i7uuS4l{EGoN*1RDc29xmK^O$FE*QRMgcveFGE=~? zz@-Nk=g>Z91H#ulM_;1Wjq~h@Qr~67)B_9WOskL=__nBRY!8Rw{#-h3l1Q9s(syPE zKC`J)>P5Ntq&&L^&s^~PYN!&Tb}lxyHBL<(+Jtv~I2*_hfX9Xbn1Pxj&BvQZw*D(y z9AcxuD*m@7+lh-6q0Ss{R#ajD9EyWmUD*kMn{%jFBoGGNHICMOL^osf&?(#rJ9>Bj zEs7A_xd2r^N=JX+jF=1^Yt)mqQHTZX!(h6M-eq9icXiaf*)wm%u<@88FNzcfJ^CK7 z3a?%Vw?>?$b5rco**8g&*n`L~?E)vF+P8tmFk;h4VP=z_AnYFu1Q;T3j*AxAGQT_om)539y4wJ z{_XWzYr@77O@=>Cc=j<;N0>gzCOiL36@>25b_qcj#>p~5DIsp%r4@yGCVHM)DnkQ8 z)jZuUb!Id^>0K}1wZb!|}KqKf!W_c}yGPh?xU8Ih*fN*5^!rvdP z*YvV(3&~ecF2oCe^i^PHINPYQYqh*US7@VN>ggQ0mFBHx)pH)TC~i;Dmu5*u2ny8q3^O? z33dBG`Jk};Y2BCJ#*swlkI)2`?S};kXJwzWr|~~Y-l(;v?LRvd9WKB(2xlMA6ZUQv zIL-a(f1uz4S7K`@Z7A-2jm{bGmitpb*I$i^Qw@K~-C)_$D!1^0a@icDLuV+j-v_~= zwNh^>BfFE8NRFtjB0MkWx)`z|Wf@u}xuZC|eGgs;tjzgGw)vp6yx8hf1v{$?;kQgj z5EH{V1nE$e$d8|$l`kHIGJKtJn4hU)T-G8V21@}^`}DO2=>HH_kBY*$@qBh`Hu#U>8- z4vUEiMq>^jIoE1kt-8+aU~u_vD~hcKEayM$cm}Wl#ws%F7iMi+4l*ssj8fIQAXGO23~laD^}fvR!-N$^Y?5~b%K$lZXIwpHPmr1-F_5 zXiL+JLda(6+J?6L(;k@*ner089fH3MxR1+)?GTi>IB8A)RywSY8)O4uZ3 z6UL%(=Oh(BXYK*~;{og#1;W{%2PL1z2r;05|6;AI6m4u6}M6WX(*rjtppCK<&O zxxn{MGV1Fn^(rrVnN;&qAwo*~f|5Q0Qaz?mU)CJ+vZYqwSy1@8f&JKT#xg0Ox~wqo z2<#12JXw@UZukv!f5__0sa$ZfXJIgN0E>=|76bu)lAP*y7*q@_<9XW6swNt0J!saF z$#&&a#x6~KO>_C1(J_%*Nx+0O9!G<yE1GV#OmDQ$H`?5F+xGkF;MW?a?e$4sz;lzro{cCt?M-dz}^5?wRZf24G}TE#-nFA z!0ka_>x)l&{(%gn(ok};RTG2pV8n=7s{Lcr5j|U0K0J7S*9Wr{wyzO;P!ND{{S~|c zT@8oUnI;kbh_)Vz{_Z6W(bYKd%^TF9Gd<^T1oW8?M1*P+kr4_aG`k$(4yo^53C-hx zX9VOVej)G5Mmnjlnye~!j+NaoendXz5ca2gOLa1w7GWEfa`<2DeMQvB^Wp`{rIt6| zaPxq(*=%Ms25Ab8UZVeyaV-(c2Ku+_qLrVid$eSltyQj$CVSsiaJe@vK`$!O2g>=b znvO?DTSKGwafT>88$}wuWH#`sx*SF)L^gY~?uA!%Qht{XCZ0yWj%D0;O$8f-Q(m?f zo76d0Nv5Nn2<(K%7Fg4%7LcoGCqymV@3lZ;Ptd*1e{iPU?~dk$*4fQdH$8uQP2zA= z3T*PapC52V)UVnBYXOV!Qhv2l1`I^dszVSsHhTDuneKNN%EMwQ1e1yys3e3ULs`<$ zQiK9yXJ9|Lr2BukUUN$Gedc=4cF$|ntXtw221gN;$Inp%i&s|hhGc>vS5NFZt|G|os~VVOX5P4x@8`7?^MJ^E#_N^IV$aJWZ|d*!7v4r^nE zwiA!7Bog2Tb19+EGWsoinYN@GLR-FVZ&HpxOhDeszoV8U(<*b7i{FbIha#b4?ewR!z;i(`tx0G$uKwb#mwK-g3Kr2gxl@fz^ z4J~R-m0~|3G@>MzDi522^M_t|@b&5t3;!mJ#!e}+ef!f1A3V9FEM)+ldt_214lf#d zR4LIK!y26!7^D_#2L!HtG`H1V1O`B<2zA+%-;LhHnQvQquHwBGP;R`jCmBo3ht9WV zsIbKcJ*Sc}+FtJ9uPmp!wxSgfZOZPbYI`B~%hHPii&$+KR-;eK4~Xu_d2kc!qVc$5 z-uF}z++A#baE_y`wQ~15X|JhD$RI7)M7%Y`_T-#Cbh&)pHOKzqUoW87W5r}1(M~yt zBR=+q7Pr87+|A#2#ripM{-{UlOsY$HAX2YjMq5z|G*ghHpd;qJVdSpJh#2$d0Mjh4 zSGHFX35MYLxxWn1>`+~M5DPH-17!sA^)(CCf>`}?O^Q0H53IOxpbpb0q(=_Gn)010 zfeJAf^-w0&H=i-U8>Qn~P^f@@C`h#}QKJyq@rEoCWx}LlR{;APF6eXo@MJv#Q|c+T zNBIB@-PzIFP0zKhzn5>_fIl$6mhm0WnR7q0J=fPXZF9ZM*^*lq+=&0)U`^b`&a15* zeZzfUFW(Yp16|60807$SW`?@b{nZnqgZFn7+lZ}N=CDJY)-fp-o{Wk|Vl;jqFE34$Hxa*R; zxB?t7R$IAcW(t|NH6*EyXM7zZ{=BFHlhHW={jYX@hR%J7cClsKs&`&Wc+c91x)83C z=98+iy_O>@CKoO88WY`)SW*(KAKHJ%o2!kS=#B{!3Rbc{enA!zfvZH<^Xa-B+fGl4 zIrlAp!XHsV70gAXu|6XHd>K5I!%da;1^PCc6fArUSR&KdXXVFnK{PAL58kGPeUw_g zw(iR@XF0KqV0Ygql46iHvDw-zM{`t!gRCiMS4BR%SD`a{dWjSaWgV+}+GYg)Ds8ogLz&B3u57k0SWxM3W(XH@FL=)0 zgXd!ffGmA9Yk3k? z3YXod#T8aTZsVp1_yE^WnTDFn=zy$JqjeA6vqQoLh29Fiu1HrzRliEBaLE8#`@sOyJ(lbxVg(bP^hVZGYvHRY;oID+ zL6VBeb%ue+_kzD6Q1x>ED0rTjaOh%s?IE7puVbpU-G6PvT>9S3qPxJd{T$u%g;@&R z((8fdprk7pSTA)zmvVAb!UhDCPc?1h#_jb<5gCB^ZEU(9yRUmyBtZEW5&w9a(oZ$s z&Rzm7HJS^YZ*T+RM1<3Fr`MN-4iIb$;8_&GBVu zb{qWtpCR%8eVC8vKbuF+cJ>zkS11Ha&oud|f5y}uI{*Ov|F5$hO$>}}OlWP4|J$_5 zj+T|vR(s-i_XjG*Y?zgrhTN71d!sAvR>#9`_oVCX_3~;e6cHih8p;Hz_(WyJ*PB-- zpklK0=q$TdOV^|^Q38L_!rT(9p%KgogJTc4s_kl&5x`5?v3uY zZuqae-{(Wnxtr(VAE}l+m#(zD?o@=qEHe$q#hY5ys@cnW@G`sx zT9;zQI!_jfBZ4<3?a#Cwt0Vf{kzMokk(bV`1a0!|+;hyIFoUl&UuGGIv@W(1qRe><>Mx8riiy37?DX_uR6p6}dA_4bg!Q;5Sv{{8lF>0aJrIwMZ9< z1l7T|N5Y@je z>99Ko+tDW!=uQVqeB+ZXGE8msX$qD>TQI&?^v@{W+0+gJFvyyA2sk%v$x=_54Jsn+ zLDJ+nGP8>FdR^K`cNU9cQPOImE+nHMMz9k7?DvC=j)5)6f*t`BC>iI+K|?bzM?_18 zoJK*4WT?@DYU9!DF3ajxk5#evC|#)2OaSHy3yX15mCCFd6>>ltS%1f&?Va6glsJ{G zV#GqkDKI(@4@*WY-8eMS=Zdp31%E4}0=JFI4~CoWjyJWMZ=9>&@~|C=wTIuq1xqT` zH!9*{Tq(~zD&Kr7Eat;Uyfp{J=^&y><5(-q-v>HV4Cye2gnQ-u&_3(`?(S(8 z=awj&czpO*d^Au(MF#K{K1WXR&49i2ngyX+j+o*r?Vz!7DUP+?oHKQjqfB#H;;GN1MMC%p8YGoW zthmk-mBKtI3#U0E$dy-i`?{*=yiCbe%VOWe!f&j-54ZvC6E;jlmNq^Qufp zrM|fYaziw}M0#@^oye6*_*W@DL;~ls7ge%JF~SRY*ss-(P?7e{Em{=1RxZ{Q%?8J@l&Ew!uC-EX zL7CimtLium-IQtuL38LTj(T&QHY6Hcd$ z{ACuyijw(eZLB%P4)t;A&%^5}6*2w73G`Ovpb6&jOy4TbG;>~az-edJIkIFuHz_wQ z%VwPe7Rz=4Mk@B5>tQT5ZctKauc&d@2($CC0IU@N$V3oEvf&QC>&JZAtL&UP0DZ_P zmXX{uoMB?MXGy<@SC0}2tf~I8Rl$DjT|?)g?TnQKB?uYix5+sRv{WJrnPNXSsUsZC zZ1JG0h8|m_ldFn9b}P^Tp+Fxh;eEhHEt!1lix!pMA%ycwK1h~&Wku}sj9B1kofElsfn~1eF~QaDgENN`k1(86r8nockV^( zP%g#8VNvVo0@r89*NY!atm7O7-*I6juJsKpfh*0r5~CV+i*RjF-D8caj!m{mTs{k4*cZ;^DSll=evG|mN= zk`tBxVR#Pt(*|==!Cia~7DX(wj%hgO3Cy^m4MLf0wXn5E|G?2`18spjxf#G3Qiu49 zwW7qBR$Q)<9d^$tVi<8*Y<_nroNiH(GNG-|3qMn z-O``iv2{6Mg3egDUr(rHkv|JBwnnL(7$jP(8d5(Exyo0JxU7E4jCGlyR0hh^PcJWo zQ8&DscGc(K;8B2i?m6192>;}tq-z4@&8u^_)#@V9wmoU8?hnC_sl=eR4zf;{zLk4j z7MY&NO;5T(3}FW^k#Lg|LmSRzO0_nxplCqvC4loCVm*vSumQg;-nC_r9uHNGXRsor z;VrrCc0WdpbY0;3RA~&++POUZ1y>?H8(H4=@AD$c-7ir!G0Q_B{0p_Y4g6Rl9Aw;i z+y|jF<5+?a>P<^|biuRqGtapGmvqx6d^uJ=)6qE3R| zMyaUE+zl|$K3ONFh2o(}O=HF0x8CvGC6+tGKzIc?fdqbF4chMc>LF@IU`B|c z8TN_MWOX*QE-=H2wEbzuo@t0yp)z)MUg=4!UIc;p+Biv z!@er$I#nXxf15JZF+=BkcAZ5^d)Wj(alsRjIN&{bBC^1MBV4{#{3*OBQ%m7gW11aI zX|=VMa7mw$lL%l&AMjkb0=f4)2;*fTchOsEAT#pWMZ zf+lcDACNwZeyvCc0ou#743ET9wu zwjQo6FX36gH<|Fq^Vf!)NR|?H`Z)6sWz&9|4bMp#jAF+kexK3NAXksPT zELpC_juUMQErBr|}5)|PL(es0}V(nR9OKN6ZZ zkm)KIirh?SA-W=woc$G*d^p|yx~jQ!w1msQrd-mDBv+YK=ZHyAj3LH zduY8Vzl!inFlX);Ff!*H+Ji#gSI-{7Az;*d{kN zsa_jha-7VY-kR-)PR1y9rMIQ$O{@HH?&UP62zUTJgxbO9a%H_y2Crb!0&{jGA!Q^r zh>t7%Z z@@RbAmT1u1AN6a!3KC7~`>R^-IJ2V-;VtRdC?c{4cWf%@z*fyoM2?Hf8aLW*txd2} zd0g}skp=ZfSy}LhJk?v%kXfCKCmTeV2n2ZDmPitD!i!v@jF4F(jTM9B{qbRTSeyRg zwQ~A#V!>YO5O~HT&5-)EGQ~``EKD!tx~Sh=9cTf$vMQ3V^3sgf#*~ixAz2?Nqw=Kp z8L&BL^n~Gtg~YYuQ8;kw<2`bqg}fm=1X}O5FhmwPjN|4u#J!G;&R#zw0=tg|DQ8v1 zVw4vRlGZvkcNg2i^eaq&8hQB!zb-bKS1!8p>gTvEbBWIh_53IHx$5!b`r%qa4#9)` zvP_N6UO1D4{+t-($lBOv9Elf64X=7PlA)zMD%{Sm3gSoS=kM_b-wy^w!umf!=pw@I zDpUznoTt!_jf5v(8=B%{t9RvaFU~x*{@H`Om`oe6hHpKpO0O!{O6T$pU17VGMlVeQVH^!>0GP9hpIEY}_IIVn>E=^$j1v3=o zx(_Werm|5-|Zev=nis8{xomjaxAupBugLGCNz4g8jKR`Xp zfYc@DoP#-3`2uAn?(IYBO}qUENZ@OiYhIQe4JyLxn}%14LL|o4%aAP50OY_34{~)O zst2@Wyd<2C3j&!{BcIHc-KyjE&)Yp zoX_w=IvR-OtYK7b2anW**L|eaojy=&ceQ)W(tVYEAc_$PRb59oiSnf~{N6vqG2G;;pGAaa^r z%rHUyJ3!j61cS@cTuRa z+lTcQ?}QnEV!?mPI|I=yup&k%lT)@Ly7&3G6(5r$k&=n4an@7sKD=)WJ>I5s8yU|s z^5pz<92#=v=uX~84qV}L@Z6mc`1D1Lzl_4ODg1V?ZHc>#D4)hXkj7LHV2PR;zlM^h zGFAeM4z=u&(T#&JC*peK1_odTJn+UQbP&z?t#SI$jU?9bJNo!+5zilHgHgcNoktER z)kTB}$r<*t|Fr=bd~&+`%1^JUFjPQzkjDm@Pl#WO&?4LbMRV)7O$TT$ZwIHRL=PCS z%Lg3@YkO)Fm=9vjni>B(bd()c8Wc{Y5*5sVhV4ae7EvTsDm~P(W5-Ekc}EUjC3F=! z(!E%Ex_F*l+ubGadN#yVoGAg|lx1%4S?5;+KGbi3p05Bs*HIZhnNP1aDK)5P%l+cy z44GSZA}>)aSTyF&ea1P21uigL;{dng0qwpPO>Ij!x#^Y3G?BLO8xs9DV{o3j&j=9B zd9>?qPXg}^#?Vjv**gU-YSaU_2Gh;W{{T&nT5yJCO(H$eA9*TXALExtZ{2>LU#>C> z23wF1$#_tJgicjg0jLY6LbL4y$I>Ge17!;zwhj~}bIs+Xs-K!9 zt(0RH8v%D2nudGP98uG$dUIND;(@`q4skxJ(RJCYlN|L6mg_I!E*#aAb z`T}?4RaQLGgAzNwKO22Qx zQP`3bfe)}61WKa8gSYf^7UWpa9;Pk&&_yU}MiL`7=0AnH-#t?r+MYvEcD>*vra;te z*P90HJ*4%2bR|K{O~avJ;>CGqdpIwLg5%kWRw0483Vx|ClKeK$L{{!0Sk2j2)({Rz z^65D3?lslCAPp%0F;?{HSgHeN#9ael;=Mww&tqAx{%O@-;v5Gw}zYJA{)bcv>b^eNNV4Vt@y*#|j4croUuQGGPZ?$E|rnmcke{>REaY6k|GxL_K8asWB5Xw>ry1r@nxob&u2fQ%z_r zN>k0pi!L6c?0bn2o^%vO*kBYfsmXRemB~s~{b5fmAWpJn zt9L_ady?dQe}U-h#Cbl<%lTKY(xK zpIx za`ZGb($&WcJFfJ{VRkVgy#9cd>RkhO%GKxbZkks6>@n5b!`)cJSf{}jP%-t8YYe?L z1g4hIPY68IW-mSRLOUXMXR%d@e#-GI)-{H_)Pf{%)R#%6`voeAN{&KJ@UE7&EkfYOk#Zg!xv0o{U?sRGaWr6je)VTp0k0YnThj% zE;Tp*N==a{!q03S#)5Z0k6?68IcjQMWNn%5iV_vekE)7!%$lhQ%xoh_QPQuMSsHrK z0Nia!SuSnXI2>0K7ai9Vw^U;KVD#vqgRfqL{TVxaLR;zkI|Csyjle~hGhkfBK}pnr z@X}&<9Yq|h7OyohQegiIgY+IynU-X*RKyuJ@H<|#hridz<=qxwcL0}m{2koShwnL> zeiyIj;8x*$tgCr6VUUE-(&~u>rMR;3)8VUmSK4@2x-DhY2Gn>rX>Ux{lZKSLq6X4C z9?j#-1IU%74B_Ej0?muc1nSB2!NKbR^hmuZ;SgvG0(ThPw2NP}tI6N=j-TxWD^~8T zm%~iwfTo&|OES_*O8-cp>*HZiuqREW3O!JE(luON(T`L5RhxdH?!*zs9= zSrE@_gdI`V7_v8<$*HXafc3*{Ilgh=<+_h3 zeWxW;>=)elFZ+c|+|MxJI{kb-1if_KZ~NKE-b+bVADoMMMxKvTP1KI(9RaFW=<+iJ z2hK7~$cN&C5kU1g?gSWOuTeFLF;v(`tudWB4;oxE-<^7eDc;H(*e+XDTR1#cO&xa^X>KJ(YOOa* ztqHA~Ph(DFb7O23g|D2n%7N3sBGluJq0S>9JB#2qS(^p%Qbt~|dt18qBQ@RiF27;^ zx9Rlp$KYZuy6yA7x~uJ&5#)oaa7^U3;G|IB!2eZSu`@QxG5%?53NZix)qm*Y=~@1* zXSKI;bk;L9u&{P<-ew zUb;*v-RFxHn2)&ts$wE=RN@b&sQE66ckav&DAvV$z(8KSV#)nXM|XEHYN%KtH}5My zav9765zJcwm#ZYh-+q!~M`&KvOb3k5pRNgCMPcQ!K%>su{9~zD87E{b(HO+9tUs)VyFlUp$GHxp<2Bl{Sos*wS`E+K{0Y&z!DlCMQu0nu*n$-MY+B_nR;kwd70D?BkJ|)+SAFrNRp`tM*ZrI^n7ODoA`bqX{)}cV z{e5-_jV?}Ctk{oC&|=u*?6CM@R-|?zlpsot9SKB#GNPIStUzd=ry}lGVn_Gt@xiy8 z-=g?Qy&QzS-bP%!=)HJN||#S#)@d z?NSpR+wqei$93~Vy|q2x0c^tM3kt@^jM0jo9UaJnWM&6F_vmnzA1W~dMR_9#nSrvf zV~A5`W_n|FbtVbGt+*9;5v%lrSLE@;+_6%)6L&%V#5U_7UO71zf1!gsQ4w#ch;?$j z1oopVTwYz|)yW;B-|4)3fSpfhj|C9J|&=?pwTezC& z**n@9nmGM8+M{XnKeXp3_kU`SZ1SU`>l^<+?Wtg^wrEEV#6e`3|HEs5K;O4 zjKQ6>%Ga@_xUSWyKz)e*o_06Q^%Vbu=t-{9_3eQ`3V-v;0&knZ`;*fUo3wB}!XDh! zfF3>MIOZ^_ycRa8_Ep#vcC01AznO(vngmxY%SM@N% zKZ&Yj5jyv|2ykY5hli7&cs4z+aX%F{{-=_#UGs%H)^_%%deOM-$kjB^aU3Mn7Xx&X zKOda%Y`?h&5}T?TY`;M;%@ z3lNT3Giz0@?v-iz>4nj%4CsXg*@F&c3aF(!MJ7!{vv+m0OEdS^wOCc-f@7K+YT;lx z_1YT$2!y^zDRO>64KI_h5OK0YmBNBa;;Dz}Yp0Lxlv?meFJ~AR=#(lb)-OnJr}GBX zSY0-F<7A;P3z-V|J8y2rpPDX|%_wO!qU(=xaEej9>H?{;D*ks7tb2i z&I&20OUKB6-1a>3X%Xdff%43`LlrM<2a+^y@5V5Zojh9C^38<7t7H_F?$K;f0h@ea zh(dYp@LvTkCp^uX{c5hA{^|WLU=r@y;<{*QyTC|V%`*bOB-ld>?lT}Jzj&s-*qRcpC+Vcty&or0}^C~_I@@o&}7j`@VSt$qo*gnIB{{ct`_XAtJtyGwYCW)PzXT4 zsB+=y%LU`tsNBV3%WQxWfXXX~;;&&JUDJE)id z-r>n>viQJ9_)|r0ybr^GVZX9r;k-(>kx?(f6LADJ1jfvQA^fz0#%b^ui>73 znPS%6XsL~Gvm0WsXYZwuvwe;RdmS3a*l5wzFNR9(vYn-%AJNw2Hp-@ExL16}D`~r0 zHOxHLSg)*`Zo62nC_!09S?_o21K$2a_VE9_H{F@;f8hN7FZ^CbX8$pQf85*hf1WSN ze~{|`d90O*2d%w{&42QGO;XtAip>#%oqR`5i%@r^?FXXXcFpMB!7#Xo>iX=|(saT~ z;3VbTDtv!`FjeDGp3-h83GYMnGULAx{e(gbA;OC4{lWiey*VwlEj|gW??Vs);Eh?< zHP!aOEdv28&E*d909&|JR`%M^i^UVq1_f4~1V|KDN+?@hJ=YHT5>P5ks1=kbo?20R z&8==5jH-bRGUPq$JDpJeC4wD5A3vqhQFr6FEl2 z)gNuztG5qhGX5MHyQ^=!s=9Hz1^s z{~#LYzyuv%Z~se0%w7(Q1`ts7J1?3=9+U)|HJVV@TY*yrY$0{_H+N#(`qkoZ2WCxK zlJ=8)d39dzPyN`>b-Q6)Lfy}9;2_rc@cDL)Qfk@GdV5++PboDxZ4v-ZgD-c2zy(=# zkRevZa_U`(cG}-Jv(ykl!HS@`K~y_Rr05R=M(nr}ueB-8CYMG4O|5U=))qpVyWqp? z9tUm_a5yTD3rW5zB)yY{sc9soVqEdHR)VaHij^&AIcR*P^& z&>{?E@&Q)@-{|JKyARDNFLsiCtU9`>2xreo8F?1HW2Qpv*d8;feny7{MMkQI_A90RX&b&^FmF;iKQGUfVGzOpS z%HVAo4I+MO+SQe~b@&mLNxO{c@{^g;!gGfQ=?*N2)2Z)t>pkgY0kg0N=aN9~)*d6Z z_ubi?cSGG?#*b}wOAIi?EMetjXKf?C-^LC*@~geZk2*Z8Ryya5G|g8`AGhWDzD@-J zS278KD8etp#5Zw9tat^yf{$#pcG@~HTK=gVY$W1aMFpC>_+aTU2^{&VmoRI~UtG*u z?MoYMHkQpidgkwe!dIIY<}975e+2zauE%KD<&3O`+yIUTb?N9U4_^1082{%GO+&O`bQC6~n-EH* zcbGaX^DYHXkp#5G>m6J!cmjgVk=+9oOwTlux<2e;li3?@heJfO<&A^w0k^);Xn8y} z><`e3*zwl3H3O|gkJTG_hx<4DbDJAT(yt%-H}rpPlqa~)F=>GT0JQ!!%m3&7cmJ=b z8<{vd|9A51lRQydB#t2R<}Gr!fD8toE<(dd7ytrnisw3-zM*eK5KwogrlMH2Lh52o zMUaSV<(aqd*qhZaiVn5uKaS)G3xU%AAx<5B>NuEOEiaWfYTPeG;*pM# z*n_p@R(CfX|7mCXWte>M4gBkeIEt{=A1v}jn!pah^!R6n<0Mu7J57Tu52 z4_NUT)PUGyvwx$KVZ{t_a@={cyhb&nHXR#MyN&7<`BEu{Yw?L*x%S5+j+-&-SXhYe zwMp4rGv{cXrE;6)a6@FjMyI?|se*A%NH`g)O4&QaQgz;Z0*1`4C@|Wl{m!ZOd9b8| zRZXiiwZ{s4^qZW8H?G?7KGoC|oDZg-m`gsS3cr3VpmaIngcUv!py_l`mamZGOoO0S zbJU1y1t8W77fB4*w;0nuuK&J2zLb+0;{T!S9iubdwr$~5Y&#X(sMxlhifub@P_b>> zwv&pDif!9T1(o}*eeSv6w{~lH?Q{RkwETXbG3V%g^genKt8uNRj7M8Oo`m>B4h=xM zKHoN-t%{18!f{XS1i5ZdlDH6c7mnL za{9R;Uq9(SGEGqSAAHux+h90!lY-4iVF_^SSi>stEN?GO1tS38=%f10TE_xaY7(08 z;-9OF$e5y6eS{2;_FAv>fS>9((B7cX0h3SL+kOYzwGw$%(||KfA166aB;kaHA#?_6 zeg&`Etk?VBY~rpzHx<&C=zmqOd>Jl3W!;;5Yi4dYYsrjVRfc@HY^khblet5B5MsOn z_0`4j!z92w-U;1sJ(>ug48OAI-kEDpEB&@0H{bLla}ph&x3wK>jg?WikEsupqko{X zrX=^G6^I-?ALSjft;$t6-~$B#jLX#P6631SvQlMlS837OVMffoxft= zJ~>(I30+V8TA$Ahy62|v?tythMm$)++OWMsv)Y3E-4!(NqZteTyl}ao6{Ys|ab>w1 z;J11Z$eq>_)^%Kiw$($9K;!gMha0?+k{D)$0sd4M-gcX~N7SY^6ic{JTP9fYVD9~n z@b_ZGrUYFO1*V{SgZ>X%At%#+1ycS~E99yyr!c?--~FtS_Or+(Q`xI}fwcFBV}QT# zzFmBWiS%4DS}%U*CYFe11?sZz%#7RBbp(IN-h2n>=9dmV^K{$3-{9OfdJ}9#^d0aZ zRT|7V@cFm_a#TYy3wTw6R72^p*2UmOMm2MpXk4V;sxPLAbocPtV+~7CFPZ`KJmBX) zubL5*P&uLYab|yuyBp+(-0ii5c+*@xpO6`+67{S82vU9^yNPn(L=IAsnrnT*xcBKO z8Sq~T36GA#a9fJL`>t}!HlEmFoe{*WL@C3BjYo1YX;rGyn7pUE2AzyhrOuAjcKS*)LEbUrJ~&Z3aEP^gtS<5e>0{+Y zpFnT%GFnLPm~{NUXmTpnadqY4oy83bd2%vx2UH>!lf~>Cqo2Uvf1#jljI1Kyb?F9- z4E?|RZ(JCendyyfObwm>1D~Lw6q^a;GLJQ)PNd1Z>EI4pE>RBU)wFd9Qd6GY*V5u-kvo2EqqdU4(|>yPo2se;I9@I(A++dZHMW<0$kA6_*2 zj?{%t-dOxQ=fWpUSX!%CQd_B#_cKFnhruoG)azdG$?W1!p|!HO(L5mfqGZ{5+E(UZ z;qoB9?k8aZ8BEJ=pWH|ceKaTc=OIvBt-b>KNc&aXQ|(d%yyeQMZJm!@RL

        dkK+b zmT;-08%+nMUljt|I8T~)N$8v=-5Y#{&=QaBW~FRBX#Cj*^_om34~Lq=aFVsle> zLXpi!$;4EBXlJQ~hm&&xexmN78z&-rwMoFhqO`RzFftMsBNw*Cpj>NV_KRV&J8H1A z>8MN_mv}Nz5o+L`1*Hj5WKcY80UzY*RO(L;c&7EhPh0;=0zxi^4r^*Uf`tss&HoddiHggkr zV}#$2GapGx8#mK^whb3sw@MMTkI$5aI3Ky(6g-k;c1+F{`ksl#-jG6;;ItA%Gsf7g zMxFvWY9t7^+3i?&uy#~kc{BSXOkq1S^CY=vj6QZM;2qCB?3lh(v?MsrMI?-9V%#!Y zt1~8wENUMYB0&}inD)EHq=-d2XD|B{gO#YCtbOnl#4-JVcAunJ$&poM3vg|#?{DmT6?Tbr}07t^H_}zx&G$+ET{rc%hr^3zU z=JDuleQZ>t?!H#c-LGBh_ewzsh{HU1An<7PE& z;BI?#-{o3PMKD={uS4>RE^)gEy0|7t1A1rt)gVGrBMF!hs#J3o$S=LuamxELNe|_$ z22#AO*BiExb$v@@V*+C2z2sx&$$UgUe{6r#)QNUqI(b|aR85#y5}hRZ3T}$aJpY1b z=!iGWYyT5Wyz<@i~0;!DIj%UiI^KL5ZTzSw!6L$CV zf)0Xj$Yt&E^b@YAl$zM3K%+pq_D@)N+xOr~^Wl~QT;~*x^jn9Mf{^Vq%a9K9Ks0!F zDwq!A4K!?9^80BIizuJhw%?T-f(a8^8*LT_ zQC6OK$0P8A7Q4}%rSZwtm_5!MPgh}#cz|!m+x^4`?|VnvpPTEchCeg|TDVH9w|JPVx;-N2I- zVH#SSuz9oTxJCwt<u@HytdC0te3zV!c^=sp!k$^{*!6M?H7uDu-*tenh*f zZSSoXRz({fGbg9*NzeE9n#}5z8yXu7-i`T)n}SRynPx?*_*B$hJ1n#q2wV}(pjRXS;C zoqk8Ai&#bDMdGIp-xf(eRyy`WUl+QE(!rO!q*NeX*&i2*Q}z&nG%y`TF79K-;wiHd z1iW=3RA=5+*e2P;7t>q^2KOdLD+QysdsUKtO$8LrR*!~v3 z1>Tnoe=J$r>%{Nk)S!osLI=l4S1>*62wQ$s*0kJ8snxH-gkEjRj5Q&vF)GxVUt}#4TL;f8 zC2VY!BR6UwpJ#*XER8lwlT278ARi9yrNP2Yvr1FUf0wNUe1p3NCTd(oU70$c`RT*k zdpd1DN}Lp1UoSeJ@Q3&SyvDaR%KXs z7@qUF_Pfg2@i;tU73raLf0{u=C+46OIrT|Yns3_Q>_yP1;)F36ob!=&#Jtsghn8z! zIsigC)~ds_v$cW;u%AEVwxKsV)15krpo2-cO|E|XXu9Jf500Xtg)*b|txO1Rn2{enZuauaBRK_8`p{5rZ zwYZJ2u3`a;pY=|N3uUSX?IwptEa}2vV*e% z4;0~jRKlG5Y_G+m57k(%246m2n-;oOP{vWGcqg!ZTR3;k@s3d+%9S1xAW(BG;*dup zwZTLmv$Dge_ObSk&FEIUcmvlyHVjL;e>-??J8j` zbqEA{?4elGqRpBRN^F@J*A3PX??Lg8sz0pnLHG!GdjM<>KCndgWcszoZ)GH!Z-TG? z#zXA9Rg2OCIS5SZe|}h*SpQos;OuH_Z0h_UtI#%f?SD_~)JZn#we%`@*u!|#HYz?G zxYYuDTe`yi!wk%UuD^R{FQrQpR2zPP=hwo6XK4!iY1P!}B*JqV#zv9qRJRE+u^bM98|0zfEw7Jh3)6ia z*hQvr--mJG#ZQlG6Fz&R69Oil4Uv|7*;1e&{P9ZTZE{09$SQS{>+0i!;2ahAq7`-# zFTTV+PCK(MVV_M52S(v|3TLKpwn%e5ricU%8e5LGUj|v43-XmoPJ3z`% z-CWIxPj)fAF|hJLO}SxKelw^iB$5A-%4X#9(iW~R#o-gE8iiF)s-$nB3rMWFQyFy0 z>bA1Tv{D-Hg|a&wF_9jChI@vaePqIO%y}(QRYvjKGLpyYYhP_YQP<)2lW0^hN92ah z^>IoJt6%AJn`dm{m}{uebt!ILV`(epxm0=)#t$q9ai8p zoMi0Y(c5YJ?ou~!db9;fAWok)M`dU#cP8L@t^pXR!HcAF%9*zFA<(f`qq@APTA6Wb zfG|IXc;{uTg_r)_lCE5AZQcpFB&>%@X8kgvnJG6*UTreoGpE)&xtL;N5s_bO*acGU z?v6%=`CAngE{Dm9F`(I$ZYnj!&=pRKgt-wOvP;+fk`^q%<7aS&^_QG3!HdVI?owfA zS^~eX(O1L9yrVrPGGy30E_PmW&TV+p41zNz6^`RoZ@i6LFy96|x_H4yJvZwKWQ#S0 z2eK|hVruhMJe(GDX@`AXqaawe_{4@O3y}aZs?N6=k-0-d@u>@{T_t;3MQj+1;rS(d zY#4r*idHmwDz_OCSZN-$LE)h=iW@K3>dq#UtCo{v+iPOY?*SsXTJv`qDa%b=M(FJZ zoh65C^o+5)$@6#D3Pp@N2gR19WBd1`MrIYq?L>eY%`p+0vSCE*EV-%(Mv!gqc(;S& z&-MEl-zgret$6dB&>j5C+yqx|{IgH4;opKYY_EG3AfqC!I z3)bIT{l4X;e0^YRu!9c*BJ@wK0sFsN14A2|zqbdb&iZEdPWl%1&MtO_Kp^>#28Bya z8T(8@`muJS1)!;Y@xO~4s6h~=S9aT z#lRzy8Nv?z7(HszNWDloXn3XdewWKQg$*0tz{8U*s0Ip_ZC`1SH5H9pEyG$^vfhzI zD~$^Oo?5v_3)jI3B6)+Gk1sgx&crvJYU^&a!in#;3ktrkB)CZv#(q;%*YekgCZUEZ~*%IN7IhKK?t*36ji}EB(R55uuJ?|sc zt*HP#?d8ruo93T823HTQ3>^DghYe26%ZVC7irfVElHI7NL)D1zLoGAr2DjIA5$$RO za;dD@K{?DKlvsPk98>z)go0LP9nPq!MzN@9AABtzHWI-Zw~KSCN1#n z$mc`Ofk7;)9zh6s_q z?lml2Tpa#qUB9-{f2k8|vBi?1-IG8qU6)li%Fom~OZhfyRIj+40{zQ~rO-;zt6#V6 zFTkOZ&1G;q>Z~;iY6&ZOLgt&LV()m*oh9XGI*7^tgc4;f3R+F#L@R{p{TUoiHqVt6M3s+3^>as7 z2(@oaVz@u^Ct*iAAEilyWwQ%>->$~*_)_hgPoSzGf^Np8qXjDo)= zvTP^};ms7hT{;9l-=0llH&1z1*pYPuksOY?jR^_8X!BNk-q&nv&H4Jl4{vu3IosT3 zDl~WjLt{G-*bl5k5?9{?gtlz(nalDCN0i=wgLqO;g;f5C4{5wyTUmszT0Dj>Q~_`FjYBe0~cjcyT^UAot&DL8zTPqf>o#6<7%vk-ip1{$lxks|X*L^PX| zyGA>6G`qRA|vyLiXjLVAButXL6XoKm7*ZAX9*hI)S@znFzX_KHrY?Pi> zMmq&P+{xKql2h%94a$~@9UbNX$oQUQg=ZtoJ9bnU);z~J3!Eu^9t;#kmxuA{0{;dg za*Rpr9@CAn)i%^XdrCt|tv6<@81kwf(^3$wW^$?}<05&>Po$L~g`IGGNZJmy=>)|Z zXnsUU{hN0llxz})FNFXzb}@MiY#B8A-70Yyjat-Ht$uQYgyd5s9PEvfKqYk9{Jw9( zf$K}ie`vK;&GNLyemXIv`ybIFupUAl?E(})w>5?ocHM)wF??$Swr4PW1<>Dno~spK zbR(`r7t0ZTVo6pa^gUY*<#&4tH7H-*Bv*EJ@61-0Tp7tN$WI0DpfkSN(PPfLe{ODU zY;9JHTsu>5F&dt$8Goy+&rYqoTuLgE)cxW1WxKL%xaugwzHUtC9)UgG+Rfg)MJMsx zOje5Hn={HcGY=4*3rnUO;WW8AxG_Yhkh0^;uU3K%H5vGe7iDG=?5d58mZw6jJivdy zDmSV)1C+nf@rye)_u15|>n>lviI3>?5Ux~`7dglTl%*^UR2rHiYG+@^RL@0K1bPPQq8Xf#bbZh2pNba(7pT!tY`E4{(wDn`FOdzxjpaWZbJUnWNd1ncJpf( z_o@=TM!AiKl233hdUp z2>!#J&hg)Ox{I;H|4?zRX&bu-3?BVU^{Gyxvr6Z$ehK;OtckCgutKGHzw=NIJ{;O@ z$&o!&{{1;78^lm*PPeAB+KX8kWF|5fs5p;JFo(yaQ|bA1#JHd7`IG{;4)OaOJ)Im^ z^3n*pQREatK{QWMgKjrtapwJ?l=}-q;!OHMjwu5%Bx_g}aaZ8jb6LY`zgh&r(GSG0 zwq#B)8ojj1TQEIQ8dRPhFGgQt$V$nI`!Z^8btEo<=$Q7bAGUNuA|oK_GF7dwcY|Cv6th|vCP_LG{$%FQdEbK zu36N>hI?|ujr&E=Wf2e~yqhKw6}**NC&cOFhXE4cm(JktMa z@093%OmuCs1cmquJl<&n!DETtc}ltK#KIb-cp5H6vIMRu{=_01_OfFgzk4jJO{Ry)-}>ZB1@iJU}XUd9y7GWBOm%@fQ#~@*DLuNSFh$ zW4ZMyu8pF~`T`I;S|9_lW5CW=Aa>L;xBVA(L<_7`Rhz4u6#`<%VEJ6Hg_ixNCpU0S znYPdT9WV%pL#x>8sJwZ#6sRu)MzfCc>-m>!VqBjb4$IT;+k(pjHllJLp)rzpyK;QCcIqstY)9GWL3WAkft!r){YO9G{_bP zk;hNmwFu4FA5xp@6_ES$Ok@$`7%DNBa=tDjFSmbTM}ANON>W7@b2+{<^64(i%5N9# z<&vn!E^gNiyVBMJmIQ8|CKoc@_&-v%Dgt^|bdj#OGHu5*?Xo|NGzHc0IAu!!^PcgQE9|A~edQMgra-fTgDf$8qIcZ)=}PDKA@q%$2+P6nGZ6xt`TQmFIS z0cxRTX#B3EF9ZP%g+~pP3^hB`?4>~-7+%_eYtjDR=cUrfzhN z!uOL7y|#|0eRwK6Us$mqm(DB~q`J*{TDWwJnG1l~_P4i|TLHtT{Z!_8#K)Xfj-ead zZ*QNNIDP>8Pga3NoSrCU+==C*gf#)IgEmn(>euh;8kGF=h3O-ha zrTp8tkEi-pXO3Kto~~yrD!Cc}u4jrXucv#Oi(|&Ww~gLZJIB+&E5-v41cd0H+D0aN zJ5%@ng!eZ8;-&n6E|ySrtCKGP8vr4wV`)We2Y=S ztjax{V(DhR!*_t&T#N|bUMb@AD=w$13C1Kc38A$3Q-E(Tl1xMlhVzeBUYx!mizcMiYw8p@d$qx8#e}@JCPjl? zNZWdoped&#(7PM;6!!ucx_C-A9QybrN*;v)L{IB`A=CQ#Y83TKg1jZNv^ir`^(Q2% z2N9I^7gosy5|`d(LVl02j~H7;9bNBfxCX*^SH9x1aq2`G%eX1kEGXZI!DO?d__p8; z)wrsxzQtcSU2)#gj6HBmcpD<{mE zB~%u_OZ$*pT%n%W6gr=Y%s581k8a!;uK!qOZ}qiYwi-#{*uv91mTDq@3G#1d5+4-{ zX0MI0Y=k?M&9)cGrm;P7$U(5a@>HD3TIM;LY?q|khpP}1Mr|Q+gL0sl`9>6rw>YVq zGxIDfuk0CgOq0kw=|m_PtR`@+NnTVJko5s2V>iTktYzfx%{71i``5Y4E&$ef>vB^J zVc5x*`A>d-w!Yhj63dvs1WL(*H+Mo%2nE{;6+~vK#q`DR>$(x+i>lc-)o27HHIpHF z{*H?f9yjwre6Q#)94#aB>ndDxH@vEqpBy?zQ5wcwR0CPRtfH1c>4%IjG=eT7VPh6? z)ES&}(yZ+7&TD!GgWOh=Cp~1#Ii@b!OR}n{q2|CvZ{uy`gIBwS+h%Y^&L)s7*-Sbr znGmWp3&-g;pT}l!_jnrsVFEop-1LGLV(Cj%gLt>^9$kjKAu{ypI5ZSH#;H1MAI zfcfW{64N(&pqu#bq|ra$8bfNE_G?Uk-5QSLH1syCwiR;Pfh(%6i%(3K#a-GIT#}-7 zftPUwK! z%ysS5Q;&WTJQO9FEuW zeq>U;Zmipl2W9HOjxVU8S&Z~E1eYkAxcYRnefOIBob$K3@d(|Tj3HIdpAA94nlziq zJ{0xcuFv@HKfqEV-gR=bQ)yt?Tuszu>d8~sj|LZ{oE4*;+ZL4(P? zqZ;uHX~i&!t39w&MXO5=GiRxt^UKm=V!lB79chDtdNu4ALhT4{_R}Ktv>r|=B^-ql3vpeGV5Nv_q4naDwGfI37N?s18ga|7t4zA?5u~OE(pOSfw zAj)JX&8&!Ar=T?Joa*#sN0uG9@cx9G-Y~VNSV~BZmz{7Vj2#B5!EYXAC@Bq80hmeN zC{?x{W(ife|#wi|E1 zneuMi*5-=QaaR>f;AXVgyEZBp^D5#ZFq|&9$XKb_Ihy?ojgjX})CI*-#;doUGO}lz z{sf!U!rU@&pQjG~dr)@C+P8CGpkhuLXds~chj*Eoi~hd`xAbnN|3R2lt-5WW$%O3t ztnX-)EL@Zna%G67#Hyy-%qI)5+@gnK_yNjZFGp2z*Lw|5jUY1@Y8ium(0P5;^LDsK zR5iOo^_qQjM;?)*)+N^QrvgH@Xits!z@*QrpN}2p0$0E;;WVIL{FL5mKh%XfxW24C zp=UmcenT}#7gUHRyn=1xy{W}Z)hV&GX!TMLXO`Nkj~2yVEaIUV$7A}TW^~zo8&sJA zbIS-ML)JppBVh!SE}lXNMt@@BN5dhXSPUR($>k?v`ou=h=@Pwr$a%nANRINc>`=iE z$nf%E-E3=c1(iYelgoguQVwirV@2)WKB6n-Dz=`0!dCEWdy1_nD#h+6bR(F1qM!j6 z2eSFppGV}u$V<611}YOPM>P##2iXQ81$A0k#PNh_uM#Dc($@)&r9yHcl%s-1`@J!< z__RtL-#u#1+GV>}aRQtUOR1JK;p!N|h-W>cYNC?P32G}gPsdtrh)1&>qicncj>)#= z=|2H=kr~dXnoiiQGtptx$h3a^>a!1O6;uo{5o^;R&F4?`*YT35cER$$UiCaa$RfDko(DHNX9g$y%6}VR_S1vE%p9@Ts)!S{vB8N2f9(b@E!w;1$<6FwBFj9H=LV$zFG%fqJ`bB9WJf zFHd3}U8b>2hg%8nT)Gi1uCnI2ywZ}BJ;=!9N>+<2LyFH8ZuwOa6duXs2$>j71ut(_ zSDGFGc!K(S+v)yw=qM5RdDp@Hvz3vVlit+L)XqiU!qCpd=0APmm#RwE>wkUWwVbkq zS@9i?*fyLVg)XK5Y=G8Qu+g>=%7_ygR}J;=Yo0O7GcvnYAt*dmNWAUM(zh#@SF30y z3ERDwb>$>?$9LTl+M?}Rwp>p(Px}mbcDXqARf{9fXjNKO?yw65wHs9Ud^R#dLBn96 z;>eO9Y6qbU7zPiAg<%^9vI`a&-y9IhIIPOi@J^g^uxfL~m#4OnrN*m!OFp-Ju%msV zMvSRJDB?72_U8h*%(;fiI9)Q@STKaGOCJfUN_*+ntZgV^W$h<^yprqKS3im9dWGeC z>tS9-Db%!W-&_%Nuo)J!w(kY`6}W~@T6m+y?Bx3UX%%`C^aK(=D6qn4YKmC44iCu> zRGDNtBOE8-uJcbKIel$Hj%9HdF)>_xHDYEVWzxidxjb1UQF92VEc~PeK1Z%dhN;3T zw8C-}v3c3c-R?1}BO^SUQxEI~$kMhe1*R?Ka1q+s2lca{xMXGha`fl{wPmHYE04Sv zURQ+vZ6!2vvj!{iWf)t5eEs0>^YeEjL}9@NE%5+U@j5&NrqOsT3I%7f5Gb0y9O;=; zB>(KtjGTv77#W*@eWS~UV>{2aK9s{b!dx*$;wnPZP-mh^^88A$V=kRoWzv?HqICui z^{X1ik`HWnGVY+xweKEo3Y{@;A0{VPr>d^M_r@Ukj+cxl#>$c=Iu)_P)ReJe^4_mE|e{=A{dBNC$0tEpoKtksq*9UO^A412{*3{nhKPc_~ zTFBw@ONkGV>oUDp8aqUvt7UBHf1DLMu{culh+wFUk)V?2w;($hLI~7mAgEg_ zgHm3^I|f<9b3lt9WvzhIu?qVFcmOhCfh^hbhHlTkkcB@@D2gb!wV30rWn)6PJxy-F zOjN1gXa3q2fbC3Tn2ru8DsP-RCxL$Y&Fv<|{Ckuv$t#jm3AimFR(8=XlB9?m<*T(z zG3#P`g(r5r>@jM0$Uyh}FtwjO9gxX!{wlW&nh#@FJ@%22*0EW*d1pbmpCQ~gtJV<8 z8xOA&ZmRues`b%Ob>)wi5+ZepG07-&kl+LEEUn@Yi!_KY7(hjqg0Lv!2cGE*7LfK! zb;NKIc^5H4L#Uj3lft~RuR!74(0dy8nn{Z^t46k=<}^BpaicFBfcXWMcJeNv zh|WlnRvS3+?)0|9*MFSt*_O_&BB=Q)N8R(XQ<+phIe@a%+AV>G;7nZ4h?Z@jjmxU+ zkNLwM37ViGM2S2F%wR66!pZ`IH6pCUsjulcW?102$UDyN0xQxmh(`jV;6xVNR5v3G zdxsrDh%+ym{|&D=~DoKtGU-#e}3DCa8M=L1lxS+`nA|*Z?879 zZi=(w^*GHFGsF3v0#(%`Z%@0_jVaX#g3npV3(|UKX&>#a1!P#n6W8GU1?NY!F5V9A z2tJ2{PNU_cIt45hfdZsh)YoR62lu**juSGxdQM6pRhV6pkBD>Gc#Y&zs@;-7kxZ}G568ro~&dyozwK5BET3GzOENa$HTwz#&3Aya-}z_{t~q^`$3RbyTcv+Ct9q-!dh!`! ztZpt-S6K^`&u79IH_yQ>499N79%9z07v+Fj`cNt*hg3>fYdlN;p<$V;TAqPFNlxCO zji(b0d00N$;aXPGnv}=|QR7=<=39M={UXO!X3OiFb`jV?SxK!8@{{xW~qpD%>uiN@Kc6HH^wgTwv_0> z&4%-(+S~)$#m$N40OLw@6p#k~ARe2(o{DqX6){SI4dAGer* zg{0=4340qP+La!5?Y%=1*(L^xflpT>@=KnZ?)I|)rsa6`EYA1s$l+EY$=oo=*~={u z6D7o-XRrXGuQ)Ay6({) z4LrIuQVU-Q35Oc3U;eGhGzNI#a{x!;Q?wu;-2a53*#5;?xLG>6xEk90rEU3t;V&+= zwCwjJ|7tp^wBngMgJ@+y#V@Q_dDc8NtJQJ0s)w(BVC=iCT_UrN`{kasa77`>8nJ|0 zkj3y<^j27D_~?of2U8;iTmIp2|1h6_@4tYG5crH9)r@p_*dO;FNj9v_I?|k1j}lNM zz~%c*dJ~2Osu9Zc8;uiz3I&t=so$dC-e8h938UgVkm4QDN2ft&KL9VN zHT-ZW-X?##c}C(#Mrv^l?n#Uw27QT~BXW(tbm*+zZfG6XvLnWfyn663>mbYHPdM7< z6B&-60yhG}Of+NA8vUX?BlYIGh3LB(ZA}t}%`pA=FnsN6V6|mQ|M@VVO8p?4reX-w ze<(o}7FHBe#m zy@dl0y(+NAaEJ6*$TR4$)X@+X@H2y{r4&^a93Oj+#)UwPq2erlafZ@7=LM(DnCjV> z>bz3Sc%~v9Y}&OLA<-;f2+IYSlhCJm?Hv->n6n*+*3H0FGsFN0@*K?-xCrQ0#VGLc zt-PF8&FDN^$~U7meq9_(Q`7q~WtvJEjgCJ$_NHceBujtjJawE(%H?icMBI=}NwwP) znjAFQ5A9sN982ZBnVi_yfG2`zar&PF0f=sC+sJh!Ju3#0Y*ZK+;w^H& z)|F@Un441f@<%@j+Jf;lp9ddrObGlLGMz-5PZ`@uAzq$>;cz5C z8=;O!fwtv8zq(&C^mt+SzjklsL2$>{e;^m{Zm!+d(#E9}|H^?zR(Dbj0 zo?>4;P^l4agqd@3>cl-LX{Ji(2Cicglflm#7uIGvajWO>ta`Y|IbGH`%JDEPXP0U# zC%GMQ8qhm=z5QuE)d+TJ(TB+Ho^3^L;e-c3AlW~c9z(UD0WrmItni(;Ge@ohQY1r4FfQJYf@|$k+Y~!z0 zT6W8o$J=rxpMns!f{LO`7_s7A9?WDXU9}OvaPu2A`nof>rIQ_$5ArPa(5gm2bx(ll z7~#yzp0mYBwz;-fb?<-FskbxEeovl zE;i2k|1ax})c^mM1qcdcl49|uh`BO8)$X7-t9q!e{uML#Xk)S8*V(%FTJY5sEfK0m zpih~rp5J)x^Aw?tRk@*irbJ)Af-7ioN_f40zsXlYepM8&qwB7V`Ytj;{f$eayer3Z z6fGy9!~&uaiAjn|)sx->$_Eq+L+22k2Nd=(3ZO38_FCZjC$3n9nbf4++wlh-HRpsU z=kd291^hs3;m+_h7lBuB#Lz2;^D-j_W}|rMW;uCAOovvWO+mG)WYQs+8ig&UR#Y65 zx^aP?ZJ2*E_8P}|RJ;%+iZFM07Vbyr_bOzkbUDRl!yz;jAz5;&()rUBd%CJhAyBY3 z*Z|$&rP2^vSH{~%J*WMurL!1z}4)c<5*Lq(xTqLkD|7bNOH-duin&i?9(w;4!O zgYI}I{cvss7tUy4X9|85i?!1tgbXLON%FFG-Dh3tFBHZ5%MqKDc|-YjN%cvvm3oDQ zirQgjUZMU0F_JCiE|_|Wt&wOheVzXB^f99BgMkzB5x?(E44iFja~oim&+(II2?6fj;2 zW4+;XvH3b@vjv%CHob&~bwg#8F@a+>bE%8;zIFMt7jXStV{dr{6`0B1hDPu!X0Buw zxi5(l7AaQVe5OYl+4;H#qKJ?j3%4XasPEeeFt}=c>1*iS_<9{Nt~I-?WewaTZP9mn zCQGSE2Y|%dZx@)Zn9E0H5SZgFWGzby99sbuHV!3 zo%`QOxP2UGq*WkHct!*PA^xZSh=bl1SattA#<^5e`fH5iJ5$RkmW)}#UbRl3j+|Lh zvqsR-o#Z}2rcuWfZKA09SBFPWNdf{J>6iwSysa`M-~uXey}0&QJV_Q1(E?1fQ!k(|i_g0A4$ zAWc<=g3});ioOR<4gEjI+^rwgO0EWzNG1+NK$=YY(vQZn6ypC}!V9Oar5eqBqN+G> z!iVx0@b5#aq(lsQK{nO@P>6Kq`Hq#qfiHByZuTQA+Nx=F;~ zGyMX!9m*i80DE~_A`|h3H)EGvvLZ<_t`25^MQ{kZs$(@%4Ms>bS+C4~( z0Vy=$G6gS$5FPwj{!Qo0*QM^f;DD41`B6wDdgE4%mOL~s0U-{ldZ8Tg{7WqL(Voa5 zT&xE(YuJnCqM7=$@e)PSc5*<7&YBjBNX1c%T@&@X9+M5AIW{kUkk6#5E-Hg?n0^SW zhA7{fI={rmkbLnTL_FF2O>w!Co=Mvh$x(O7O`2h~r0S{`UlA(?nAD^Pt*gViu#2fs zrDabR^>n3g#B$Ac zpPjS$W)_{57vXVNL`&o0GIUZLp9B7WiRLVC@e>k9tA!@`cFo8EZf3QPIgJGS*c{DG zI$-OuXQDF?-Nl@ZgR8{Xt&P}=wW*hd6}RS1@>9(DchI8}>l3;8fx54Q1f$b{9gj^; zS+r)m-9OD`n3(^X z%Q(3JPkV>|P&u0aX8;=Uip(yVzga{bCa0!hi!`q{zRs#e(Kbi|bY)}|RD8Zo%m27p zY_Q7cVDKf1xgL1tdGqavvxXEYkwf>%b#TNITV}~&@%~7e?@29uXuuxmg5ERQI0NVr zFD6mrKU~P9D1U*hW`pMsFv2MeVZx;pg4+^glKz$Ic7KoA6-`_<{(O zu1v6QX4zdGGogud7w$Nsuc)aWEA(=toZ$s*;-xAP6E`NZ2&tg;xwuq)>;5#51Ehrz z_0T%})_cX4ut06q&=Hpgymv4NrXdUy-IrH9aaHROWLV4;p@WFcz^P@B_sfMB+a2s+ zVRWji5Izr!%Wxt?RyL_DrYHvtX~ke=wAZ(&Z18WKWt}h(v;lH;ZqVF~20j339ohG% z7)VfJS9$>2Q(3#*&0hZ%0fR*SZtF$fN({>i1aF1Oh#4qS1^)$P``7lD{mjeciyFI{ z3qAmsEttyFOGRK*g>qSC#F%cG%5Xy-UQROzTwdN+ndTj4u*5pG4GG|KTL*Wj4(J1y z&sW5zygSSkbGZt1v;lJuoN{atq@n-@r{%O0R{V`&=Z`CHu4SwSDgm^-a$f*Q{v3O0Bm(+aToPDiTk$wI znTO?!&YB(9>};D04^L&3H;n2J6P3r8Zo9=V9&EM~ucZL^*6zp(M(XETIrljxt@-S) zkFKs^qP}mH=ZlInq(S=y^_NQ-o?Eq@G}>C6)+?okxgkEdPoCVApWgbcZt-3K9c)3HC%?P=9B#?-DxjkFhcLW4>N;M(<_v50t6gA6UY$@=Fsrr$JCJ1j|qF*#7e(!YZ~kR z;jz8!8z)cD92TO_Z=ddHMy>uJW7jPBUxdA5jA&7_Cfv4d+qV03_i5X|`bNt5#NJCwo_|2d}}13bv=68&>cg&N*JhL97%pui13o;)bGc zB(aXIf@p?Z-iZw(ng17DE1i_>C+W8wYrl*5e=G-`v5~oxt;7GS%6}?}aa12vKLf&_ zo40U3_C7S8uw8PudkWIZt77fQjQTaX#|!E3$Ni_N&a|v9!gX|wHT zGZN7z^osbo)Q@iF(ZGmpPy-qsH{ndVMRx;;X!|$Piaa6u5+J2tvy3p51(>ho7v#VF z9O*oCsjBlF$;$h@GR$S8Qn{}+ey6H0rL=Jy8$ne0Ek z016_4f22izO?&^-X}dJ6fBkw9zo&Km75Ork;>^^W4M)${sZ;elLo9S$}>KnqEw)3P}?00qHBUa{@s|D$B+&S20M(zRQza zgT+M!>q{UvXdJ)qx4;IGM=@s3;strAry0EQ?*;dYy3)c*ibTdu$sZKuQ44(-ddPxC ztGHugcO8#h>Kd2Wpjk|#yHHJ7N`01|$LajSyY(bX9$`Y2B-Mc^_*OPA_!H8E)E<>r zu(q}yyYuvq39KWTD)Paq+%!oZyoY__R9LD9A@%+-XSz)x@H$s9K6{6b6^6b=pMJB7kY+3iS|4Q!=%7 zC_uXXG0wj1W@%dWkx~^DnT&n2S@tW>^+2e6pb;3XS=8r;tLi2z+h;tpIEK`~9^33je+GCncOR>RmXfQQ=-#gh(aw8*3X zI4LaYL+7;c!z@y}%jfN^MCXLn8dWiYo+EvEgWZ+LE<<=QLX<8?QK$gzt?OOX_^Ug% zpOG?o9;$`r&14I%+h&WdC(v!<>bjF=5j4>a@cZkAh+R%ntTJcPQPYPyIUxTh!fgA= zW$c(VLO%@?2^AEnF`7!SzJGs*v`qUPNzAeP!Sp*#!8T%Ra5IO>&GGz4@lhK|c}+W+ zp1CAp3U^H-_5EXImm1Y87(jJ=SQH&4M$nFXrMVCKSK)$|8p_xY7p)Y;`B{`ALP$e$ zXG3iuoCkSc%4rPcFBo*;9o39P0&$I0nGGBGq%k0e!BAJS&;ln}4(6{ps=~JX$`kd7P2RctC%|OO7q1XjW_WT*sTiBQ}lBDK7 zLcDn(d9A}4%1x6L_hMi;wMI1aUtna+ohd1uJ{Dy2J08~=>pTPArNdkg|4+>n%=~w- z%F9MW+W|SzAt=e*&k&-eIyy*De-oN9rYOKbYhVxOrMGPb*evKlwE}^HzlMf%7Bcd* zBF~+_!;rqel%}UVlFr409Thxi;&`giW}$C(#lb~4dfT%ZnkOOLd(P|x98w^A2o%~y z1~CV*t&J5lXvnPK#iG1@Yg`d$k1BS4i(N9Ru?QA~Oa(1Ha$tHKG)U?a2oxz2#@mT< zl#k7&R+xJ5OM~3l;Tvr~5-eycMTJLFeQNP@U~!QYC#yAtSp9SV z3$hGJOZqND9+L5nkcq4g--}vkWl&Im+D$>qdBEt%IeESsfN>~h+eGWw02ScddT_=t zTVEvD0Mfyt|L$Ih##E52UZX`$SAn+6KRI<-E9sOIik(C?mIr#N18qtluuI?MplvSP z7hp5*MG42Z{TZ5Y($t>&hFKELQH3OPSw#y$(QqE1Hn97y9q>0#A@|kn7B<=vu9lcB zJUZobIo2lD`*t(;kpkT|R9*9A4uzk{KK5`e#t7ZiydD!p0tXp6bOtmNQXLob4kBjFXvx}b9ab<{F?g!2oDm7`V1u!+YP~vaP;03 zdhD35H9cYe^DXnz13kq`{p=TalLsNW{UOae@k{Oz(DrZ!vC6fsF1XaL1{^^U`NY6j1Q zykwDN8}?LVDAZwRXsEnQ0luLPPb|o4mr=9E;iwLyu|%irUGQu;gYt0R+1Ob5WLtp# zR=5C8S+?tWq#nWwRw7vu9nea&H+Z`n6AsL~nCyoNYLSN2trrId9NVJ$aMd@R=bHGEhLhkZX;Z1BK)KwH-<3~$&X(-hwIT>dsBWkv z*dIKS`uNo2RTC`JOrDL4$OKcbV4qe37Kq(27IE_sNgtKC;mXuXuWt%Mij9T+!95>x zxD|4`6RHC*zk-`4*$7s+8~HNy6@2mLhc6>;TN$FBoO31K6I(l)MdQWsWEb4cL{^VV z=+p?8)LSEdi%Nr*_I`8%z$((x0j`S@zg?KXwEkBvi63jE;>p$x8-R~&j z1n<|2xm7_eIB>Gqys!D;ja2vJ#?DB~%Vgwk&a3@u)6AiQ&mo>Hf{ATty}mXrH~qOW zUs{KcRvGNofZhBWhcL2`RGt`;{&awG0B1M=PkLYRNM*%ym-q%j(BJZIIB;}HhSGp0|Po#_B4Z8}AghuC8SyR0fkfWTyPjl-|IMj*- zKJSnSNKcuX^Vlo%ZO?c$qso95EWEI>LzKYZz>gQ!Tc*Nw@kASoU#fnlOV7cvkUM?)WA=PrU3%&k^ROGyJV+||ap1eq%uXr14*th1uM3}+50M42r} zW<&jfNS*Mgm+!AtwWDR%?}>1t!N20DG=2j%1`MyPzyws?ydSO5Q(8bn-P5o+F3DMs zwQTC3D6lqI7BJ{twSqbBJG5()6A%@ib|(kemq4L`hbArHcuTu*p48?;jRXB~h47*$ z6i9-8c0rr|)lGB>MAkm|Q|^N`1jg{HWahrL^=Rh1Z+;T)F!?$_sz-*R*^36r@4ZhW1BHc=h_?< zaP4lJA$=B)!hRS7u^m_8LTz>e4*mZ^sAc+UA@WoUE3k!lkHMa`KXqn1#YVJzCr>t@YS59JWytm$UYTYTRsjMFv}E%K|Yg1*Z!_ z%wW1t3MMWeM2S8km(L9xeA7EDA>nqn|FWK`9i>6>_<7a^gY3EXPJ<>7rR2j26W50Y z83k39f>{~shXDqqB33IlF(fdXg#AKYk??%~ROPkSBVM%@P1w)h{xd09HnyIAcCUII z1zdYK0%w}B>~%Pn>2{b7Rz!*tZ7Kg@@zcCe0i7#G33+<%CT@ULLR;!+ z^@E*R?$}kbNxd-DUTyPApTtc zqcrt;S1;h9!kVPRYmZ8UOndsOic6BFtkXAX6Ck54c;+V{(L!HGRch||0%4Nd<5_z^9pBPm6J{hv92SK zP%q^~C${ggh)7Q_wo;&Td+~I&0iR0Af_-;B@H<4^X-lDkGc6jVIjh%O#tVKV44Ye%f=szjIY(BpEkBjoLI#ylj<0X9;QMc_ZtA zQ0SU1>WO_9&79qG_4!y)K%WAc;g4jk_~Kv84fC%%DP8LLCb;-G3wri>Ruy{~3ZMg! zzP0_Yso#Gaf`(AYB>WPj0RC(J&vNc`rslsIT22n;2F^~#j{m6>rJ{2purjwXbvAc& zqP5ch4Sq%Sc$JLZoS^>Cjeqz1-w)RR*za4J8~i`D>HqsM|2yGt@IO<&{wt@0v7?jw z|1;I>UU3q^v61T+N-n>)C zt~rE7Ql8gsljL|w7~58@YOaM?lI|QyEXxx+kVK_q>YD_|W2_w8{=9?cGQum&-|ugd zQe{X^14M}y%$c#YV{>z7ylu=8_Tna0wOzfed z^Ng8>jonjGN@u5?lQBjxb<0Tkfb!4JtMDc6b02fy?W9uCRVR#Z7A=&uh1nuMBw?OJ zR7!+9=siofVZ4gEqLwU(#1UXCsVFCQ$6y0d9VOT{J5^5sU%T1>%C9_2IDc}hLC_EQAcEJ??a5Q8u8GdXE{g$0#{ z6oust5=s8l|1FkGkcqF9sF^o1pi5cx&`4d`$#7$$tR?*8lZ~DiwP{6?pUJr4$bJP~_A*{@S$}Nu66&)^y@K%CN6c50tWTBA;@|SUsAA=5hFXGO; zAt;4_v&%KrVELJnCn$*yrWQ0-I1B#T0;p3Y+@oc3kU=yv-vgv`*E(vSdB0E0hya6A zyDF`PiDPB4kZA708-&NImu?V8nU1?d1gp(0l9@aUguu`ytp{g^J^LpA63pvEcwL1*49O1ySf&fK7#8Hi1zkX>Fh|cy^C}_RO-0?Lx??}F520Xv@zWC zvu}rEt!u3*LacjMREQFh4gfI9aYtiknCbBq5TEItk)Dk0e2#=85R1foPHNIOgY<|v z*yKG@FfN2I!{)|%`tgCAMKr$`gBV#yH-106`*RLGe`i9-t8%uF(7j?{82T#aO zfw>Q@?QRB7jh`_z0dHrm?vXTCjACl9*u6-0v@eyvcC=pmmvJ1LIDJ0&08_U_{x0h}9{icTk^h}tq#H+i(A!h2ZifU6ez)%< zYh+bGjPlr=GE_oFnIehsC+Q8{f&@DRzcwpL`Oqa_Rca(e-~Ir6k(0U%D*+973;@PN zTxF#}?{@U7FHR3PZdT{d-ay5{+zWp6&+Ge)9OpeuxyfG==k3lP6;xhD(&;E#U?LOT zR$pfH^QG(igcwC~ORGVPA3iLlJr*z<#|_ zHiXK2E^Gm+O~TTivej>&HjW!QQUz zoejkrS=~9!4-b6F)0LVXykD2HMBN)^C+@JFZl1S=(-?Z)p0~XLM%31Pe$||9_PUPt zdH|iSpS`WKyO6n#6x*0Okm!N5^Lb9VGOrl0XHu8bO!?^B4qsqz&Xnmtx7uC1ItV;( zaP<6pYL|Y85(4IaL2xXgoTsR@dO$Wz%ryx#l>1a^v5y1!==FNAtYQP_iDV3V91ba^ zaku-*baWH;as9~ZV;EGDj&q>#A8u^mGVM~uZ(FfVsPo2F<~mq%o?$^}$M#{cy(qwE zS$qFLIMf6iapKO8iL!KICPAxj(JRZediS;G-o?H+*G}{Mp+S&^21#gbT%xa_H38tJOm@u1cdkkNCMYLf4f_E2i)UR8dDzfJFSt))i@P?=k`YV&6-uw;f1HOl;Uy z%#08V7EaEDa7Hkq+yhmx9gwd}N)N>NUUc5yNQ0~baD(tF00t-i$Te9KA8v(t0&6t8 zjn+ynQ66Z4|CM%j+z)9IqRY+=91FY);<2gRaX#O47Kj~-)hUuGO$XNQfTSI$46DoR zBf?<=;hN&z_Q0GAk!!bA^h;Q8Rvjh_g|{UH;T;0l6QmuC>DEk-T#qc=7QQaj?bR&I zi8;*2jcRU2?ipmpf^=kogw9U=DnlF&J{87vk*m7%(HL4DnX7Af=x1w!y&z#BrKA$T zizri(NEG(~R?l3%qRA_n$Y<7Qg4!*dUQu*&9bfMt4ZS`v{m7)7;<%Q&xt)&YhnNm9 z5C<4|19$XK%)2-Kv0M`clcOv!niY(dGdxrZ+zg}gp2bT({tKLsCmL)>G-CEm27S{u zpDcM-_gjDGY2ah3pg0BiPU-eG&AH(R803uhCs~`8R>v?1VoqN{t=3>=Y{nzkg#L{~ZO=>w=sNy9T$mL&q8TeH#mr;P zDq@e7N9xB?n;WvBZ~J$Jkv0gS>m3yrky+_u$B6-$h*JWr{guWbj(14A8+~S2H4^{wq3jPqQCX z0}$an5b$YU5o!bAwUU8;{`?(YS$B`G7UDD|s|V=pHppTRzX(DH!Ulrmb!8EGfB(4* z*#rB271cxM8E{tydo$kyzdkbec4mKgWZi-6Ab?mQ=L8gvID3f$BUBw!b%src#l^%~ zEu30~+u?<>VJ)+3_vYZH>3aGX`RgC4B7J|EVP$#`u!3N@vjwkul4&`{wTieKC=7%v z|H7*@q?0cLiOU#?bD$<(B(<&A8`#T#^K2lh%~WV%#%S-cm^>cT4*@QfH$CpYq0C(Z zzjpyps~g3>MUb}R?(tG;$9D&VR}6vGT%U zjcC{N29!lTDg!~Eg=Q4INpLysRQ)}VLVZ@#gJ{L+ho<7?@qXLeI9Qm=SRpS_Lw3cK zo1Ze^CS9QXYdaYDlC#)oKw)ed9_X%1-7k8`5ucMxIXS%o`zsLicErcGjo#$4D)p z3t}B>-^v}G9x{N|*GLCOO=xD{(-A$;KqOY4ENZPVT5nL}0?kd(i`Fwzzo-HOL=t>s zj*IlTG-R@gh)5bHO(0sqSqHbv{pVcPJ7!Jm0#EG~0TrH$8j@k5i}=g%8YMQm`xHyw zkR(H~(o)4kgmG&B41#-lCzfUtkylX5R3n_*K-(WK&vY6s14?DFV7m$CPqg7xl2G2q z9?^iWNBC^|bm4rYzBH{|HtxMiVCkqQbfae;0ro8!sOTY4Qj_FFvnpYX+{>6a1BR%Y(NkI?8?_Wm z+WZ>y(7N~QTup@eY#nI97B@eluDf~7f1WyCN=AD4{yb0JTbYJE zJ2p$5s3&b2Mv(le)byLg;vrL8FL28?m_hP%No~@r^_3q=hbNDUhVk zA(q}Z-{Buvpf!SdF6$lndkqNzc=y#?z-fOh&=Pz|53P30x*g4d^2y^^uID9rBTNvvq)b5D9#Y_8;b2Xg22eEomS(bThkJ$4s35)yO^t8TG_V#;gWP z(DQa?T253jc$?^GGnib>_g%2;`JVH(ln4n1Fqgt$K5$~m3&-L3FfuSeViAJPa3Omy z2(P_e&Y?Yq`cJepyfi~VCI0h{c$J9IDiu_XG8%dRNUJk|L8I_{xrxR-M^l9WDr&E{ zUK^tKe*i@91AGDS8wT0?92z1>naSa9drt^4;N5^p>BFhfVSVZ*jTv>iZ9%qLPSPcJ4zSix+G-J%!Z+WO!=3hLEATZdCJwMKi`9 z=7C1g6X11zQ`?dwyyzgWJ8o)n{!9d#6JyCxb%VN6<7S?Klm-d2nBH7+V_ircg% zh*0QybJuF~&7FJ2EYLLCNL_=2uQgVzyDkz(!J$Kf4p&%lTY481Qx6gn2dD=b8DJuE zQ{g-lC7$V;fZ^cOm+29DY84&}m2H!_jK9S2OC`cAHx5uJdeA!aMMzwt6Faj8eXJB> zZ6ySd>O6C36aM8C`{T|_JMjs4{BmvH6julNNhadcVwC;PlrBnm3KATO%5p`UgUKZ( z>gw-*aLmqlN+XN_AC08vo#ivMlZ2;&i*fc|V%MH%#YWQZ@+oQs=cs{$_R$rcE5jZr zx!`I`nD@13(IT@skyi}Ko7vl9QbnuQWv^1U(u~=wt_5evV8d4}LHDjZ(+8+dPx1L#it*?RmjD@|U;jGU5Kvq0BjHm@%)5x;?D z@9-dg_;ykVK0P~41z%spXF8%dFge&Ie#gPH#0$+U#SMpa)}8<=nc73q?F|)Hby+DAQxSce?HgI(hLij66p?m?f%`mHiRHpdf*J$G;{+aQjEoB%8dr z->boj3ocry#D~lqDS!r|14OWZp?jpa?bP}%b)$YjPD!TM7cj|)N&<;}24RZ5;7aN^ zla_PtwtCOne`zyO2+SpKhyvgL_@JXFFcHvTT6Mz-(a^4H7{u)TG}dMn zH#Ej8Dt#6&WqZgKM;R+G#Q|3w@j#Lh+j&`Fh96`w-oAf->lg#gHu#e~p(?VZksN|< zo}DhGf~+q7O-BpEj@0gW&EJN@`7VUZokJak$3LvPLGG#@hc93fmpo*b&;;^slc+%l z6`PDDFhU9j1gLV5nHz=o3G0wHZ2eFdO@RDpUzl4|kaXg8poezPG- zl0=Oco@QD^c}535veM__xL$_as38-RUfF>Z&NEb-G5ht1Y(99x0Vz2R2Z; zXkjVTH6fiGDil><^GOyhRmUm|cKNjg^s6IFhDLBiS8Hmme6(I~BjVL+fJjz>P|rwQ zBln|<+e9hglUKHO5-aIZj>+GkS7jeAw5y0u1p)gdz)(}0cFcu7xdHaEe9Z6O4FX9v zZT0n_vJ>^!-kUyu8DQMza1hLn0qXLqI76DcRVQl>vEKy+gI9lmtNyFu(P17nRkh?r zj%9JlPaz7_7ol()!86D8z+KOI>{6Z?J%4lVNf6X&$5O0rIu!dhw0a9K8x(Z$4l-fz z^LvzDGaXKb3gqCXJ+jI2UKlg)uUgx;<)j4*Ll68xM?f;RN%>L~HK?BUg9`}$qL+02 zqL|dUVZ!;zPCIEKF1&)4N@$!7dLWyQ-7FaF4@Jy!TlM-GX2Cdr?{pPu6)QhD`IpzL zIcYOzT+@XX8)xqB$0f0_nVu1P zBlLoBU5IDBM-^4;W3hOdQjlgD7a<|SuJ zQ|Qr}%S;J?Y6$NUDS19IQ-ipUhuO3Chlz%NS$w@~yO9|TX~03Cf|?ms`I&vJntUDP zhwC8vxE%XrI}!8zy7H>3jHmH}Vg|z&K|g6L{bb`UJEn5Wz$(Gmx~!-XS%!gd9U#up zOMAz1ZSj-`9e#7(DS`!a%O&pELDoj25S5^lnB0&iD6`!AjzGQ6826v%;82gz(2tNh zVG}+H^(BiY`v!JrlZg$lnF%For=dfimRP|f&xleAMve@~2VD;`UpC2QGaRj<2a43S zdb%W+!nmee78@qjZ%gGKk@>_;kf3FOyU&a{U*E6gydsHT4I(wwrb1RxtS8$d4hd{c z#N3)@6;uQs z4(AXm7rfum+NfpeVPIz=eQCn$L2Buw9%k`wW?wkEMisy4NuDG1i}=N6UDF&|f)Gm_FvTfNWNQsFH{CA|4Y zL}0AVUzV%7vc)g)T6(0b=v&HyX;+QV!%?v`aLIy5hU_o~*&e7`M0u`VXyUH%EfbZ6 zZn;H$a3`9e%G%QZ>s*8vubcrd2XAf(+cguE%L zZX&RgRDAGqFr?m*C}|rz5^0~mm+-U>jxXn;?0)TJk=CY7*1j9E%UUyk4M;4sq-~Vv zk%3pQ|ACx+$^8|t-&aqnh}*SAoid#e2zQ!?A=S+h!$GgG48N&f6CrZZj>acTStPo; z!^*~$Q)CXCnQ{qKY#SRFuYg%WIzc;4(}j3j^Qp5aCJP74h#6e*hVXuMHh9>Mp=cks}rLj z7VD&p-zWn$oxsUS0`niFGkN!}0HGQcvXdW~oPKviJ1ELTu2mhkNYe-Qy~&cz!(?t* zR<1`?<}JVkmXUJnnWcJAWy0WKVp}dbpwj+yPAa(NeBItm9Blo6PtQsx6`byiE6~5j z_qKynfT7oVL`fIO{o{4%%GdUllNaVV;~w-r#j2^U40qkXI$IJgy(B@*;c6h*LB}E<)ajwa8!5ovNb6W@AnM5s_uq;G z#tl^|0E;j2Dt7#A)p<$2F}cDVsFer?mF;BZ&wG{-B?$@a!+A5q>1Z<=QZg&&V%!YS zC^bTVyJ5?~d2g6eW(xHQwoFdyGQ|D4G)a6h8BJ4gJBR6?d<3DuJq1@*Qy^(mYopl38R7OR$9B3(#_=OoYR|o$lCq2*h|+2OOwVz4|2~ zS8N123?39B$S%N;roE%WBxTlszDLo&`}Nhk_K)!;SOOyyPr*fu4m!hw2${uwQG?J_T3bv)pvVl*Q|#*GAT2-D(j<@67z1 zwR)(q_HAH}Lfm<5h27e!rrdH_vP$@euQeYwq6dM^qbGDf>`=NNL7<#=oa1c60Qb)j z&yzYm^^bkRKUEOSPG-m(#mk{<8)Y4{_gR8b#dag-cB>r@!hdlBr)T)ywk{9 zMq7uwTuIj0sR$}>%k?Nu=uQ2}&EBjd+~0^%-j>#S>~({N0vXlJeGvzuY$3e2kvAY^Uz{JQu<#kq-0f#|dPY3J}&Y z8=b40yftN{ade+fWHrOBfiY730)4?BM6$DZ8w5y(Y6sFcis24UQWmYJ|G@G4?i6>m z+ha+J^5vjSB(M z29$BgroY_x9|<+sXlo>L+UgZYey2fno%#TNput(d?bQjEJioP0a#Fv<82XKO8f@vNb3Eza!->@c5+upFFJ}c zl_$&^*BSG6qA70=LpbubzPDN29sx6m-fmLu-vcDjl?n4Ypo86Z>L__elGFM?Z-KJ$s$}uSrt0s0hSq@X~a7Qt&8pmIeGYo6j`_Kgbn?Q`Jg? zy3Jh}TI%kfy(*z*YZMO*T=c|1?lG#QVfQ~msVN3wSC0$v1^d`x#7igJKmBwXGb(a1-haY+G=YIVC-)fga~{ z+xD4?oY|2RS?_eTTe3;`7^Zg5fkFOBo4&0~eOfz2(i3%*jMN|oY-lD?!g+UZw7@;} zfEUIlAR`J?`^(ePwV;wKANQ$M4N7i_Z=n<3@ba-_Wb#6{6(`5;=s@m^F7dXQlP?l< zxK1@DJZXJ3S$&l+MoFy4_-ecikaAx((7uSD0c^uWor|n)d_5#v_bgS8NHnMvgpM~( z0!qv|6O*qd@L23X@PXm!d||=~?f_pbZU*#gZ1EIVraN?%Ag4%Y#n#}pyqw?D--WB? zJk1pu`^4h+CHG+Z7~#gHuQ>)1XH+*%a?@F#OKWXoIOrd<_mRrBbKPFLzxu%?4~Tg@ zDAnM!QbWob*9KnjHjn99Qeb;94|LWwzr_Q$Fr+Zg)ee$6rYnU_y*?-Y&croI-w=7v zL^FtaX~#JpW5im4x%p>}`0}3Qqg+&@?^V#-{7Uc=i?twk!O3ZANJ&>xLDa_vIuF}t zzBPqoXQenBzr0J!iVbnYnys6)GgwGC5TL%CttO_eM~Z2G-EKcpk6hFG%M1Fzsf&wad{089MWbI_WSf8M!8rA-pYuV- zOGb>(%bO4V*@t%2C|S@z8}hX@Z{ZYfZXg1WuO1p0sw!)%Idrc8#8h_O5-1&G+ip6F zzN3v#>VQ^yEsmp3BUFEQ+ld}*N8F}mLjiuSx@WzYt(+3!464GV5j4LtTMCM8(cqTk z*|-yg8=uomI;a7}J8Zb){lkv)sCuP64TUmO1zVC%rU|(v7WbWWt+gc|R;~DCvH)u+Z0j&- zLX8PTifYPbLRZomR48bfKr^gG6?WA{0-;+s6rM^DeCkes+aYrGg^nGozq+Z=R5K|kr7;TMd4XPA=FPF=~% zwU_1>#-(n}^&NBIT2hek!tdqgjB}66zO{G0-44Pz<=UIG5riCJ+;42jQ^A5ch&jS?uUl&PGKoK9Clg1h9WqV<|TaIjK+y$_5YbaZ0jd=0JbnA7u>Lr2ZOsuZ@b{n~%t$FiS5z^qC@=55FhyTQQOK7(5dusP{#)Ae-GHn`L}GpJ@P zl60nblRIAXY{5m-KD{URR`{3;u?*}rcDK}DlF_xv!((%KzT4cYaKcO}EulLZiylj&yVSooSL|AJSNKD~tyhgxkp>_YB zlOubq_cV;%?05jrj9}K-arvsF* zkECI2U9{~h2~whiOJQ%U!YO352PibsTZhgsrSpY4PCjzw42)lzVGo#y*WegHkDo75 z(a3&(=@T2tGuZG;2lpV758$rLl*5H86eV1#XhON_*Al zm>{YvoZ3h?Db-!}B;E3D5a8ov)XK6sD1NXiLM zm3e%cYh4}>mByzE3MCGwPoRj_BGa7w0}aJ`}e8*>lLvWARiy=ig}|MnQ~1# zh_%vn0+OvBn^9>aG;0rA>2Cuwy>$=GI$b;2#yKxI;@6l+qTK~$gq5>1Z!juu0+~#6 zcwL767X>f3gg2JzRk~h?_su9M-2z0F3LDS^d%y)sr>-`o)^X_`(+fVUEDwxQ_|Q!# zMYfNqKI+N_{&mLz8$I7z05&_9pMmlTD;TGehtVfxJxZX!_LS_Rk?F z#F%kx7J(SDv)1<7=S3rfz>gj6k~MEyWy)BX!w^vVIXeIy`UnnP(-K>m$U!-IaF0p* z;A;sQ3REgsSKYn zkh%b(%B6t{V`Ql#mF58j3rM(}i7KrZI_S+9O+ro)u-#a`0pLJk*So8`Gtd!$QUNlj zx5);CJ(qHY-}-zXMNJ^Pc+P?L^E*-vNcU&JV5G{hOtD#3 z*`^i}EOdcrV@1NV2XXideVM%0&b7R^Cu(xlW0CrSJqrfXjghh9miqO%p*+>Z%5FNt z>W9;HeXR-bsx_uOM46$g__O*Drfj7`6qsEmx5|L?g-;8-2QFaJ%`&oSO@O zkUOl=K=JuFp+~_#@|>`$y|75u7UQQ8aO+{nh5hPF|E8MIrEDsgz0pn?e(Hl|6Wx)% z^U?SPm3+1w&aQg5W%?*>F~Bq$7!`RKpdcE2JU_7U-ndq$rof@!98;T|!Po(9vo$r> zS1~$V5KNFisW52|tkhMVFT1SoNZ)y%;`g3z3A3!lrTJnxa_x0uWkMvUoporu5i+Jc zm4@(<+ceVFRmDg3p6n?rG#`{x3hQ;tKaR#hSd}T*wKZrL@#KTD@23i=d=*$dxCbp` z<&#J*8m)+ggj<&OQnRA&Y^f=ImvXB zRN9}LW#U8#0cgqEXXz@qXYs`Fr_CO0k9R?TyI`{#>g#8;+Ns`#lR$LcGcR3Q>E@Zg zJ2sEG`#B3XtsTvtW0e+(4UVzGH%~(OA=GFQ)|H$ts z$#5PReZ~DkFECoh1#qXGG(SO9YwtRvk71s9ijd!dsoaE9?2FLI`Kte2U1`guzM+z(nJJi){%i`px9!F9p*x{U(xUSEa)rT zxAdHU457)tc%1P~1>xbdByg%aJpkZIpxIbFCh-~$Xq=p#-p5`w^Xvlzra7**W$6I6WPgNM{kEtJYf|alhsH@KU`4Q z_9Gd%LDoAHXR((71@24*u1(DuC!2%;Zr`W;o-p31ehWZKuDfRLx z5p9L6ar<5FeSDb>uUxp;$8+^J{#+qg{*m2_&ydFgeM`>SC^j>O@aX>lYVI%5#MX~& zDBWG5JRt_PqGMIUxEZBeqqra=u03KZm(-H*H0wIBkoDqwhZBV|;=UUS+sDY`_tv)> zb?^lpG`QC4uc=$MZAD5er>BZlw~Kloa3jw%GZ6(t0X*BuoE)_GowK~rE(@rNJCo8Eeh{IVD1VS-mto!+1=W=HjGLyw)&l2r)qr;;rI9z-VTazq5C`)@`%+Q0TjnWlBNu zt+QvKA@<_sPH=q!;uxkhW^I}vhR(re`1v(V@8zoAxO@eLO;q_%Am9IX6oRirTPWih zLTeQOC%#5m%~tGqOxvW%p;NS|>ts}8+Q)hm_WdDS@RQ5|eyoB-CuMj?nwD%SSnCIh zeXj|sNQ!6(=5h=AP}|Z_lADwk%NtPQi#;y6L&rGh&z!_$RJ0>mWc^wPk2rr?Q^CJV zgO1@e4FHy~ai>?*f28zVfYj73?_y^x3h(DOgmFbF7?cauQ<-&`#1B%IJ*h)yIHPlL z)4;vx6`Qs5x|QsW=XWkA`t{6+R>(sI6g<1t_ z(L+70SSt}M)5&Sez`($;No|VhdF;R=>(A<1yL}RZf?l}hc?Hq!kMe> zawx3Ig{@W!Qi^fH_E|IH3q(fEJu4`W!`K8j#ViF9pcl*~%Wtp;EC=AGya1W;9CuNd z&!fNcVX%;*!GtENH~axUrZ?P9yH)T1zuK-lp6dPoAA1#<*)p^Dj)=%kh?J2%kG)q} zDcKQ12uVbQ%C77R$*#*Yh>r?@!V9 zghhcS&e3j`lZuWH*S9_8+K^0M;soBlcnaQW+W8GjaCri}m7|&jU>NhowL}^ z7glvU!yjZMUg~Q4R^$B|uN&`R&=CKl@Kv?Yc3=7NK6d#@;c4C0hQ=CVHkX z@7}BW05^0*P+Dmus3JD%ec z{r&C7FEV&oH%%{v9Um!v639F+*`*^+>cbwd$3;)T+}lYmB5?M^*)w6msc1EBIOUL_ z=n6@6(dG<(zu*}%1FWkL+;vl5SWRF-pP?t;%&!Zj5H1)mP#m0OZO~Mr-e)g85mkIl zE2dbmbAxnhXeSaiTaQt6x}E1?&8KhoFIXCBOOs5;#UDj7Ao`hE1X zvFXuMgRfPNRc%y-#?;7hT+q8hNZxZfV_t@~}U-AqQ zd0%|iAuyKHZMMPjS!$uj?G?4cL$kr-lMKjj3bC9w?O6c&UBKP#9WGus7ZVQ~S6*+Z z{cftv(S^d^;Q}9abDq-dwkgT#a8Jo+ShqrWos%Mj^j%B|X;N18K$-VN@vJ7<#bL0tQeiFB-`fB80(d;qmd9dqSS@>QTA-+<_!$a zvIZg?MLF3C$&oXd7a-DgMmAT5=zmCKzstqzaOTW5;%gldI9{Oi4O_ER>lrzRI?L%5 zdY*%IHa*3exi#nN?#C1#d0x!rBj$UPm?J1qE~ml&o7$H*!LA=$ zHPbVlUbx9jnXr9i)6>$pa?95D?mH+Zhw`P$aSd6avmOm3sT`g+hvJ3KS^VhqtP=6C zvl0~?CsM$jX|nrXn2hsU@qpmBhM1tJ?{jO4FX9AJW%J92OkM=JO&k+;8z&y)8 zGn2y8F6=dPGnR&t7yW6hgm;2g?&<2nhn2Z{3*R1m(Xak6kcmk#vWji$8jTy)dF%_i zPT2lqg~v{&Lk!8N*it*y7UcV~D?+6t$`bDl^sc<5jyw1Iyx0Nd`cXBl*LIvX^^FOg zwz{$pbNrfXlkv=qixR3uu1J)G9S;%cV=$fb4u} zZs!Ybzx<->A6#&|AN4898F=G9V6BQfUdP+|<=^{dL0*1deiS`(Xq>>wXZ#03m)66z zEpL<>#>CvIzf^y*{;d0>`V*$z#gYDmcMtsVXw%?OKp%PXEN#fMy?wEd?*8FNp{*uL$Dt8%BNgBGQtxu%mEiJu9iS8nv2@bW;Tm+sGv!1mCbjPZLfD=wGK zZ&OT3CDGu0lSvV(glGToIhKCy00teJi5n*sI($>T&_6&w~yn)Ln zlM}i<>2uKHyicPi;*$4kT#eNpJ-9L49f}dks&4)Pd$u_A<-UUBmTofq_Kqfo#)(U- zc`rTj^-Fk9zF~uQ_2M2~F=ac*8mWJz$-kV@PLEv2-#gv^dtL=pCI7K?x1cXu_4BHe z$gX)U^~h6Spl~Me@K<(Z1X z30EhY*NWHJ7@fnhOJ}04UAG{7p!D8)b>qhyjk>uY(kG;)qGks<=E_p=c-qI*#&V## z;VoWzob=tNq}`gzm5(+RUk(s~#ANaGCre~6`O8q<{Fb3uJ8xW-vQqh$eoQ3w=F zlF+x9u{Bmt2-s+>^w-5{+!AoG6ue62>weuswK zdCtj`Xsl>O1c~IfE?LCPtBsCP^Us!K=oXIrFwo|VO{|gZ9O5b*x@Zuy{8P;$m8pLHo53{9rt7}p&Js3bx_z5 z999iK{`e7v_mWmg%Aga+fk>8(Xc=WJ+IDebDOo!qk604pisX*3QfeQtzGlfzHsf08Lia=E#u^Hfx8oW ztC_=;xxJR>j;Ac&e&UdCXGE=T6TvoXSuUySt++=|kBszL{vgcbuCU zommVHoid2rmqaO%`wmlpTJ`MtCNsR|$_ZRG^pJ)rdps}2OT_PJ=sXAugBqWlVfMIm z)jUnqAxNoRfA*;>wQi{*v*R3Dl`0D!wkW{_ojEN5B~**mN%Hik2gdBvCjxq7%b(m^ zjyF0l85Ke2*Jm^`JOV;A0IO;?8`5*9ipA&Tp?TGUeQLA#TT_Y z@lj&1?DKgqwY15#uJw5bYe$!J`_`-}Yzedlutlo6DkIEjkKVXP41Jo5ol;ilb5FS8 zXi~D?g9d%I*C8Wu4KnL0Og|RU=&f981^4&KUy;ATjPn$aDYT*aVr;DTTeSmO#o1UM zQXel?r9{;omrbhDxPI}3`S>gUPLr`=ChO@45!2jCr5>9xY~1;HOg^bxlVf(mU#X)X zc*e3ksk3CN4va8E%ZlX0(a0V3n=X=+&4Po=0JAn6o|lb7+M0?lhvk(B+S z0q)sW3~gl8XPa9vXV>T#C&3HG!{^YZLpoT^7AoWUTy5fl1wRb<8IIu?4Holyxqtqq zQxi5Z5NBJYrShX7`;AsYU*9s3>^XdVHf^JZ1sY`YkEvTx_4{k{7vKfHe^2J`I-z?A zs{*T8bMpY$!w&VmhTs4=u$4+h3q^h^?}r_L zHt{>b6*GiTQh$6i-j#O{Z~#tNeH00pSZ$e2#THq~x$xCt($U3t-VuZ*~(bZ zj?MQJa1MkJ?e`EV@`egG2qY!s_m6Vlws_*y(hxnvN_^A*;tbbghGgvN%<-6>=0K-F zB@-dR^XBi#JW1=VxoraJqV;1uUlS+c0{7pt>Hdj;`+tkM>HZI1fw})cqqyPFPr~-r zk&1;r2Gv(t(6+2!M_)WG{98X?>6a7hMawmoRCYB#8-gsCI8C$1$c%2!1@uQKD6QpZ45l@ zW03T|qU~%f?a5YFO1*{lvT`ijFzFdLiNwl7nT>MNd9NPQr$VKoQ@p05s$yB?K>1Oh z%Jl`+pF!fsJg++(S85du=rexh| zF74td+m-&KWT~0Uchd@QKYUpxkkqIYC}!7vqV@T$u^h4sE9c^dQ=(^CGgi-9*U{V3 zx+u|};)~%gWEo@WNqy~ru76E|`Tkfj=KFWLN_Jnb$}JmuoG^`N~2KK7Nh>mw`sYsb(xJ{zS8p7wU<+^05lYWMH=PgtQ*zS17)R;y#dma>jEZYS43 z)_U4tW81UznZl6a|3w%O22Nlv^iPsnoDmGOli;uvW=TAXw8>Jw79*~JG2q#x{9G_h zpT(tg?ZP_r@UU0Im0`TIDaBXgmwR4@T>s>Jdu~9KtHHdAi~PibZd!%*8UD^v^H~kO-#$cVQIZCJiD5 z7z|&2ZLA#Rc=Tza>=(P8DZnRJyk_5VBnrTVKmk%VDZtPJm=h=fP5^!ult}j3Q*<6_ z7N_7J$W|G4$!oK!)$EJ@VSHi$`{Agz5BWmV#kh;tBptR`;0%lfa{Xk1UO`ztSsFNA z#%MT<`})tUooIcinG&C&_E==7XE`X*(}BzA-D*|WjLD^%+bfmvBr-G;#X1(EqH7o( zy+Pp|@m9`j+rj`nuZ2v=|9}XH0v3qs2|y$Q#(c72x3sc|(PCxpW0Sa!B@hR}b@>V~OQ- z`=ax-tJ4Qg%v@Szd)UwsZJAllPj069%(eQtc;iTQKlLj%KkNJYQ+In+w5NlUy+zKY zHVCUz^6LLsD`I)d$1vQa_QmQpnlHPvk}F6Bk7eJdq~aHJ2~XL~1EQK}e+GhUut0Em zpL7M0q&=+xW@H-31KbY6@&HKSO?V*S24}!%%z2SF4&(u9*<$cK;M9k@37ypFfLq>2 zzFwfSp?e#Caw4I7$TA+2wkyC8kqQhQP2~_Sjk$(5JOZeI2A#)j8N@?wJPwu#AW(rL zd!Yh%K8D|cC4qmJ2?+c_*1s(iXrTa9fPDNX6#!gNIf4CP@1bp6@ZuQN;h@9T&_-S@ z5DiGr-fT*4bbV7mc5SXu=WSlFq?;%7nqQO|v9#E)B(UipFW?BgfI6ZVkolY&WR^OZ z0wsoyhGAFUEBuuS_D^!dGC_{fB9I9Ntl$n|JZ1<5RG^VDtUlH$@VK*x;Ca*csUE+m z0PCTd+syoU2PldjuntpEDY`MZ^+`O!r^Ydh?4#n1b02tw$F$ZObnxcu`?o}>y|3mD z8!{>L-`7`~IW%^r&iWc>;Ojie^ffsX8Q2M+ft`TUUB>7t=wF5?5ShTkXsd9#dw>bF zpLEhBnykSMvo8qHo@LeXz-x6n*ADfa(eZ97*Xk~-)?OpG9Jjk$tHM!S9P6NZ+S2T` z48))3PLiBAac>ba3$(`In}V@GhQDG0{D1|bTJTATla7AFzfab41OKTj&*AFN7x%rx zf2)~c7ZZ5>%(=_lk4Zi-@q8VMj&psquSGNZhnEjab;i*#C8vu$7Sl|uHEyJtW6CG9 z+o2D%)I^uHinV>gH)f&NAD_>5)?- zNN%PxHngbvgo-A8(ygotS{Rv`on`2yolnovF4U-Mv%cORH>`Jg){JrELB~amF7fxQ z&Mc{nrwr~zTm#8p3MMGbE5eh%r4C$s^bm=pX%BOzuV3<;Gjk31!grGR5z!*3Zvy@s z>!xzk?&@bYG!^SK9^-b$F6d>{TaHOj>m}b)mGkZy>J2*Cdiw3qwx|A!V?5ga7X<*( zKhXV_>wkQ`h@&G@7T=94CR4We+5Xaofu}8TOs5EW6$Y#0r?bTj@O7A5HpWhBp}|ss zrH+9J!|eL46hNPYZOkxF{7(C1hrfNwdso`4&O+oln=J`OTuuoS@lsx|45NFUVwrD_&O;}$+oBtj^`5*0$g5}qH6M#}uDH06+@nKHq&7dM)>7+s+9#rs}qJZSCB{I`5_ zpS3+X-62kCSKsCoTkyJ3{lJ6(fsvQSv+y44_dGX)Jg>lVK*s7|SPrOu7gHm5(r=L< zbgg;AJsax8bu>rs)CB@|yhpxp$4>)tKrxU59%g7KJ94(A1=DzD1Xk`~q6O8T(@zpL z`&rqaL8Tvlu0HjF?v2Xn-bIZCy`;>dymii;`bNbHp3}*Z>Kl;xvJ6xF4+;hw+mw$H zEkg6Z@cDmY_=w7PMfCY+;rajXd_L9!)d}G94L5y0vE(`viSvC+SsyrvRKgx_Q2gkFseZv+Z<0$}Kv87fMuwbrY=V`p;i`w(Kk%QZ0@r>yJHY-zv7iIcR&uovoLt3`k;Ac*oF^Uo~6 z4O0~DDXp3ZrW9ka;>NMCwBY;<=lgBpX(LSfjs*;iokiSUBiY{o$?`MfB#wY%@*5O{50f&5p^&X_hv>J zRi)CEqIYW6-t<<~d@!%ySQ$5W=lxU^Jlb=tbVFpL%I^rdD$T402ugiA78x*6>m|0} z`B=KqS(IxbA&)ZaMnHKOi1tSCXrF>z$y2l~+VeD|Z(L_t{>Uta<6=wzDnF6(gjbkH z(65Zzn%C7gOMGU#{OU|6`jIph*+Ih-=SdE68(FE=2?<7w502k=bs`rgY@{w0GEjg= zd;fOiXixmRx-Y}`+Njdax^H(ndN}07!hIeC-8(IjiV@Q^wGy2~41AkrKRbs<-*v=Y zsxAy)9M`0|aZ)D7Z-1FGUTgfhZ_vmhFYny)(e8Wgu4~dSI`r~tW8;_mm6m=5ef7Pkqz#|eqiR-Qtni;bA=3E_&FwbAoFi;Qk{Je#5&tJOb z`Hw|gNi#~x***Be#8%@bWc&P^_?VYN3b`56j7q~Xf6XOe{_xtb@jta+?V*$PuJm2w ziR=0B+V7NjTd66i{krSCZeJ`M-q+yy85a4CXo77b$h;ot`e#>hOM261p{%f;C25@7Rn?F;Ukx63C_F$IiK5Ikn^4IJGo+fxc_I)r`-{E zG3+6z`tGlYt#7WG%V%Djor@@bMxLfN`wh^6Z9kR}8H;-N4*KIQ1);VE9=A4xbuArhWl857faT0R3Mb z0)BYw2h~F$?Fc?!f5~$#Jow{fmm546!yK?A$;NHH#INXG;a4bme=3k>x%QKi#AO^` z_NG;@FV#zwPEsSs?}znR0`7OzQz-L;cdCt4*-I4xDsTie{Ix5;Jcb_u^_hQ;K-tXU zXPZYL7aV~^#PYxKs*ck@eV9wd*+J1nMQ!RVyej2r^O3%^VI^?tQ}=FTLXqhe`j z2%SMwz$I(4wFN9-s%&HGY|4mfCYu;Fafw8LYnB+pNw^0=^b zMAsZE`*Uc8=+e==uY}N?yXWwlpr2MmUVt2b{kH6tr2U=`jX~0n_uyG&QN{=J0oyV@Y0KSue-k?J-XB5?m~;fa zKTm|_Us2Tj`(K@(DQViS!|r+^@}j!6h@Bs?T{=I%%>G}UAJYjey#Gz~-&@HK-tjFJ zVCqnOge(jkA3#4RziQ@Jdvz10q1Q^qxuE- zW6xXFf9zNfKM&yess76GZ}$9AwSH+wc;gqI^M5eoia~#eZjKwX;DiA!E_W(G=6!HPJ2$?u7swLxzL8g z)4omae|qZG*LYu%5PcaenaO;ENav(!l?r&)Uj)zkWy;_^f4hA-j02(Y;{UgZIfMFW znB9lqwiA3$l*1qdJ^w^d>A7^=xlB;wXQhXZDv*v4RC?|*-^V-f@q7vMFdh~5eEO!{ z`%V?~O|-`?G_DXcRm1F_=FBVo;Hrk#Bz~|;Pg=|Lp(*{k@i@-Fv0};er)tjwIyWR+ zz*jiaJM-_<^0DJBpLtZ!5yMabOZc3#(;08Fdz8i{;?+pkMONr}D)d=Ry=uYmB?@v*!1BnTG*&pj2Wjx@) zFLhgEA;6ZQnr0y7Erqp9;d?^e)WtmV=+cBW1pIu*EKhDIQ-=O^scgl>L<=K9a1wo ztXjt`I6r=L{Jygjv9Jf={jUwdodahKOdl0E@6P~LzR0HjN~O5-=Y?ViWgCkxzEVuU z$klc}x^m^?XIQrH4$t;=FQkBuujZ;d==`V`ikX_fS?-PvSarJH(IKb7b}`JiFaL;p z-;-YSKDQK4xpUu>t10S^u}ko7Vq+L$dRi_45MjU#@V{ z&t#N2^Rx9QU!^&1&B{kg8||c--X%}G2nv38ho`g<>OE-#DyJVVwk_!O8TH23D8uT! zUqH$S%lRvQKkspv+(OFde?azO9bZ&^esL72^Zp$6nHd0sK5(*6ZJxo}L&2na_xNL_ zt5v=n5@F9{_H`?*|5(1JbKJi%c^Fd7CC<|7M@1pmbk#^Hebpf=9y)t^?$*(~#F4TY z7k1^|TeWnM`1iIRCQ;wNiML%(6y%`mnL9W8Z8_$MY3J2<*7j-*NnJm3H#D6aZ|8bk zVwz{yK-^fbE2Qn zv51~n;=Z{v7+>in(NoY&_TLsqH;Oryb)j>l@rinexV}|Gn_FywSWkP*xE^uUY*pa? zG?8Xssit8e<)FaNDj%^+Zem67=2MHO-Ech|dEDUPEbA@G!03|V>t~;;%z1p?sOYJI zX}#zk%;|e(4Kb#1>@ST?L?gtx6Xugp-s`~Gll+iojn29D`I87XZ?$RjMzB(nY|D!fu}PzMMIm-mX~@M)>o+hXN;^#QJLWdml)iZph0{=SZ z!*u_5)1B}!kGGAbuQta#K7%ojcPLMHX8DY7mimad(!AYDePu|cKG4{6*c|cv+1UFz z;<<`zjPclU=QJ-(1JJ!fq{f~jLYi0jv$5BVT+YXk{ivz;D!0SxRXRRI>%PPh!S=aAD9$spNu8P8 z#avH$-)nD7$4@-aJeL$MaAzcE5KMW^zOs+BCOsBvpBE`4~vCo5pfrvtv*sriQjZ) z@(Sr;n+Ee8Za?dM4__+6<~tgX`Rfq9^M7+Z{b|xw*ob$~%U8+Xk?F8nPcFRP?<2M# z_E`Fovnb6%XdY!SsP}tPHHlY?)GZ2b<_>A4$xZjPhc zW#Kg#^uuw#v*iUqv1jyIvfK$8Fy{M>nYJs;Cy$XTe?w>1f@hk$)v3(t-5|~B!D1dA zcj$~xKq%D;$z+k7ioH(f=btm4#w43Fp4$ljnbJIh0O0@sZm@%&7dG63YV1d>^YOtt zyy)9Iyg%#ww?Uo%I6{XP40-;(!+UbFu+naOhxhG29p3lbI=n000qo+96J`Aruh?pA z?!TG5@w3vKe6I~w=}lx>UpW~!92Pyx6SHdcYuFDDJfWb+jKQYZW+QeiqA`vFus`&# z4wC5CASQE?=g@D)njBwJ+-&pl_gsdx`Enyu4t2UxRL!PdBER>l`J5Ei3<+R=FXeo2 zsG86fQ*`iBHM+k~AH-jc9op*f;(-pYUKlL;t2+q8qCd#_R=jS5=&!-%<##c7<<6I@ zPN2|Ri(eD+PsX3XInxI+{<|wNGIK{(2m&qN1Hp_<%prKLh?m$j(1NUw!C*5&dD8 zdG_M`jrq=XkTvUO$X_fBqp`kP7J7~K#4Q7Z;3{ACer3l;WOo$fHk#72EvMVX4@k*E zNks#hjBmtteC>3~qF%UGIlg%^nR?k}+y`-=bP$#{l zN&(vYS7x8%c-?4OdjZ<}tb{gEw77>alTk2ZC@N|=1L_Y~ei8;u`J`wjUnTX?>0>ib zEGdUY24Br676Quu0#iQCp$=CPnDViJ^6jMB1{LBkF;cF~V+%%MMTp*|g^l>WIw|Z3 z8}YpdxBQY7XGP_@5&z%k`;J8S^*kr<2Nz-8CzVT#=$=8$_Zd*l_tDVlF}9pP;(abQ z2qpZ>XuuypvIAlNi|XwyEC@#Ryp6HD7Xbtfa{>H)_$?AmRT$RqKf6Zp6AJ=40Qr3E z75FQ^#R50ecKhwEb$mH8ecGM~0x{%906=;quwv4kz|>9b zEG%v8El|a{agySDFu-^X{xBk8tb&dG{hq-cC*KLi z+|dknD{(h~E#2d)hkxWEfk3EDArMxib?{Q`3IG&i;l*q12D?v;8q|aLv~O3jAdph< z!^}ufkJxvGvIi^THTy69|9;bc0c~#D0lr;GxV@`U$_O<#)Qf!n-*-+5CZp*!m=DJ_pO35r;tdk(Mc; zhYF61g^4-Tf){Fz!sh;db(Bb}bFkS7)eU8FGlT`W)sZ1ke+p9O%aq7Vot5+Ih(E`Sm|CT4Cn z9u~$)auK$Ji`;+l2T5q0v{R=ze!Tbu!$3si_V$&husqcUz!h*D<`~L-DXW`B3 zWC2C>XtYR;Q}KdM={c~0+rm%ie=Nq#!o}@RpePTccO3_ezz=X5#6^7$ti1dmP%akm zXou>eLWamwv%#V~KwSOhnE){0g@)lfBMAubnAuyHxct{i*oxf=OT?je0Ouii`2+c= z8%FJd!_Q-7;$jLu8EocgZ*O7t2e!5U#$`7~00;$Kmlx?=@Wkx`D9Gb#;r7#zja|V< zj@|$BxNj|Tc=g_K9&kD+IJPgcJYi={e&JvC?CNf22Dl^YyZWTJC({@#a+nSRIfk@I zr5if~3W&lNX<~1Wyif~QV@pRDV{1oOHwP0C22ggIkNd87+5sE%Re*gB88NwFy1BC$ z{Gz|sZSCgf^v9b`O`>&l1+2Y_emm%5)-Ir8zd+s0oc`Q_`mqmxY6GBN;QQEzkXEXH zXBSX@9tR6g)c13>^XwyBfU<_S{pRtWJs>m{0Njn%zp|dWqMb1X zBzQKBgvZ0;54?dVZNtZF0PH>>qsVcnqhuE_aUKf~3kNr2YZC`^`#-@W@WFFH2ZpH) z&I+>kX)4wtP9``Y2B zyWk7+xY@fJ{}Hv=^0i}BxJ3P6hnEPqC$*;=cfl0nfdWv}&0=sm^d1pdS}fRAQ z2+|M!{K_~tBR+21P9VG%<~DAQE}K}W0{y@@?BWL6A!XaXnw1@av=ojVS62WD$nGFp z3^R;f;~MbA5ZE6?rj#9c;oXj_v9bYy-Oa_u)ZNYEKPoj_NC4oUf2e;dXOqh=?}%h? zWBMON_V>U4E3|C>zS`hUD}nt!JTv_dlr6K7IaR-Zk{<#oc)2}l-bMihG5ZGvqTQ9W z1&aK%AEJNyPK35Y0qVD9see86hgvlU`Pn%{{CC7V;Uhglw;K#98ss7031;gBEmSuX zv68EZ7W){`;M--B!G%SmZ~e5I5HA;D4|9_poyISvH&Fh%GYIWI=v=0Z|rj=Do|Z{Urv}WI=ud08tiy*1gIC#)h_y z3!)|q@}+b{S-cMKRhEBWQAbS{kdND*bR=lL61kcZ1rlLh%YBcd!0e0!H=`=~2wvLN58LzKl;V6U?L`;HxIvLIjc zLX^cpXz#LYzx0KgEXdc65M{9y*{dv@@#xR}F7gE!L|N>`_AU$T3CusYBjjrth_aYU z>{XUc{_^Mhf_y&!Q5K6MeZ#IoQLX9ZPS(&}c0z13NLsh8R z5%NecqAa?{{zew$5g*iKL7px}ltoW|ud={~&GsS-@|Yu{EN2z=F3a|zN7VKOdF&2R w7Tptjmu3499%`~64}l@dqNBW5SvGkcD*govl$9Y64e)Oi3j}gn6;^TjKl=0QM*si- diff --git a/lua-resty-healthcheck-scm-1.rockspec b/lua-resty-healthcheck-scm-1.rockspec index 610a44b3..309958b2 100644 --- a/lua-resty-healthcheck-scm-1.rockspec +++ b/lua-resty-healthcheck-scm-1.rockspec @@ -1,8 +1,7 @@ package = "lua-resty-healthcheck" version = "scm-1" source = { - url = "git://github.com/kong/lua-resty-healthcheck", - branch = "master", + url = "git://github.com/Kong/lua-resty-healthcheck", } description = { summary = "Healthchecks for OpenResty to check upstream service status", @@ -15,8 +14,7 @@ description = { homepage = "https://github.com/Kong/lua-resty-healthcheck" } dependencies = { - "lua-resty-worker-events ~> 2", - "penlight ~> 1.7", + "penlight >= 1.9.2", "lua-resty-timer ~> 1", } build = { diff --git a/README.md b/readme.md similarity index 70% rename from README.md rename to readme.md index 89ac5b2d..78065a1e 100644 --- a/README.md +++ b/readme.md @@ -1,8 +1,7 @@ # lua-resty-healthcheck -![latest version](https://img.shields.io/github/v/tag/Kong/lua-resty-healthcheck?sort=semver) -![latest luarocks version](https://img.shields.io/luarocks/v/kong/lua-resty-healthcheck?style=flat-square) -![master branch](https://github.com/Kong/lua-resty-healthcheck/actions/workflows/latest_os.yml/badge.svg) +![legacy version](https://img.shields.io/luarocks/v/kong/lua-resty-healthcheck/1.6.1-1?style=flat-square) +![Release 1.6.x](https://github.com/Kong/lua-resty-healthcheck/actions/workflows/build_and_test_with_resty_events.yml/badge.svg?branch=release/1.6.x) ![License](https://img.shields.io/badge/License-Apache%202.0-blue?style=flat-square) ![Twitter Follow](https://img.shields.io/twitter/follow/thekonginc?style=social) @@ -88,61 +87,14 @@ programmatic API using functions such as `checker:report_http_status(host, port, See the [online LDoc documentation](http://kong.github.io/lua-resty-healthcheck) for the complete API. -## Async behaviour - -Since this library heavily uses the SHM to share data between workers, it must -use locks. The locks themselves need access to `ngx.sleep` which is not available -in all contexts. Most notably not during startup; `init` and `init_worker`. - -The library will try and acquire the lock and update, but if it fails it will -schedule an async update (timer with delay 0). - -One workaround for this in the initial phases would be to replace `ngx.sleep` with -a version that does a blocking sleep in `init`/`init_worker`. This will enable -the usage of locks in those phases. - - ## History Versioning is strictly based on [Semantic Versioning](https://semver.org/) -### Releasing new versions: - -* update changelog below (PR's should be merged including a changelog entry) -* based on changelog determine new SemVer version -* create a new rockspec -* render the docs using `ldoc` (don't do this within PR's) -* commit as "release x.x.x" (do not include rockspec revision) -* tag the commit with "x.x.x" (do not include rockspec revision) -* push commit and tag -* upload rock to luarocks: `luarocks upload rockspecs/[name] --api-key=abc` - -### 2.0.0 (22-Sep-2020) - -* BREAKING: fallback for deprecated top-level field `type` is now removed - (deprecated since `0.5.0`) [#56](https://github.com/Kong/lua-resty-healthcheck/pull/56) -* BREAKING: Bump `lua-resty-worker-events` dependency to `2.0.0`. This makes - a lot of the APIs in this library asynchronous as the worker events `post` - and `post_local` won't anymore call `poll` on a running worker automatically, - for more information, see: - https://github.com/Kong/lua-resty-worker-events#200-16-september-2020 -* BREAKING: tcp_failures can no longer be 0 on http(s) checks (unless http(s)_failures - are also set to 0) [#55](https://github.com/Kong/lua-resty-healthcheck/pull/55) -* feature: Added support for https_sni [#49](https://github.com/Kong/lua-resty-healthcheck/pull/49) -* fix: properly log line numbers by using tail calls [#29](https://github.com/Kong/lua-resty-healthcheck/pull/29) -* fix: when not providing a hostname, use IP [#48](https://github.com/Kong/lua-resty-healthcheck/pull/48) -* fix: makefile; make install -* feature: added a status version field [#54](https://github.com/Kong/lua-resty-healthcheck/pull/54) -* feature: add headers for probe request [#54](https://github.com/Kong/lua-resty-healthcheck/pull/54) -* fix: exit early when reloading during a probe [#47](https://github.com/Kong/lua-resty-healthcheck/pull/47) -* fix: prevent target-list from being nil, due to async behaviour [#44](https://github.com/Kong/lua-resty-healthcheck/pull/44) -* fix: replace timer and node-wide locks with resty-timer, to prevent interval - skips [#59](https://github.com/Kong/lua-resty-healthcheck/pull/59) -* change: added additional logging on posting events [#25](https://github.com/Kong/lua-resty-healthcheck/issues/25) -* fix: do not run out of timers during init/init_worker when adding a vast - amount of targets [#57](https://github.com/Kong/lua-resty-healthcheck/pull/57) -* fix: do not call on the module table, but use a method for locks. Also in - [#57](https://github.com/Kong/lua-resty-healthcheck/pull/57) +### 1.6.3 (06-Sep-2023) + +* Feature: Added support for https_sni [#49](https://github.com/Kong/lua-resty-healthcheck/pull/49) (backport) +* Fix: Use OpenResty API for mTLS [#99](https://github.com/Kong/lua-resty-healthcheck/pull/99) (backport) ### 1.6.2 (17-Nov-2022) @@ -174,24 +126,6 @@ Versioning is strictly based on [Semantic Versioning](https://semver.org/) serialization API. If it is unavailable, lua-resty-healthcheck fallbacks to cjson. [#109](https://github.com/Kong/lua-resty-healthcheck/pull/109) -### 1.5.3 (14-Nov-2022) - -* Fix: avoid raising worker events for new targets that were marked for delayed - removal, i.e. targets that already exist in memory only need the removal flag - cleared when added back. [#121](https://github.com/Kong/lua-resty-healthcheck/pull/121) - -### 1.5.2 (07-Jul-2022) - -* Better handling of `resty.lock` failure modes, adding more checks to ensure the - lock is held before running critical code, and improving the decision whether a - function should be retried after a timeout trying to acquire a lock. - [#113](https://github.com/Kong/lua-resty-healthcheck/pull/113) -* Increased logging for locked function failures. - [#114](https://github.com/Kong/lua-resty-healthcheck/pull/114) -* The cleanup frequency of deleted targets was lowered, cutting the number of - created locks in a short period. - [#116](https://github.com/Kong/lua-resty-healthcheck/pull/116) - ### 1.5.1 (23-Mar-2022) * Fix: avoid breaking active health checks when adding or removing targets. diff --git a/rockspecs/lua-resty-healthcheck-0.1.0-1.rockspec b/rockspecs/lua-resty-healthcheck-0.1.0-1.rockspec deleted file mode 100644 index 2954ba83..00000000 --- a/rockspecs/lua-resty-healthcheck-0.1.0-1.rockspec +++ /dev/null @@ -1,26 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.1.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.1.0.tar.gz", - dir = "lua-resty-healthcheck-0.1.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - license = "Apache 2.0", - homepage = "https://github.com/Kong/lua-resty-healthcheck" -} -dependencies = { - "lua-resty-worker-events == 0.3.1", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua", - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.2.0-1.rockspec b/rockspecs/lua-resty-healthcheck-0.2.0-1.rockspec deleted file mode 100644 index 7b71f84a..00000000 --- a/rockspecs/lua-resty-healthcheck-0.2.0-1.rockspec +++ /dev/null @@ -1,26 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.2.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.2.0.tar.gz", - dir = "lua-resty-healthcheck-0.2.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - license = "Apache 2.0", - homepage = "https://github.com/Kong/lua-resty-healthcheck" -} -dependencies = { - "lua-resty-worker-events == 0.3.1", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua", - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.3.0-1.rockspec b/rockspecs/lua-resty-healthcheck-0.3.0-1.rockspec deleted file mode 100644 index ac5eb885..00000000 --- a/rockspecs/lua-resty-healthcheck-0.3.0-1.rockspec +++ /dev/null @@ -1,26 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.3.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.3.0.tar.gz", - dir = "lua-resty-healthcheck-0.3.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - license = "Apache 2.0", - homepage = "https://github.com/Kong/lua-resty-healthcheck" -} -dependencies = { - "lua-resty-worker-events == 0.3.1", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua", - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.4.0-1.rockspec b/rockspecs/lua-resty-healthcheck-0.4.0-1.rockspec deleted file mode 100644 index 1461e642..00000000 --- a/rockspecs/lua-resty-healthcheck-0.4.0-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.4.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.4.0.tar.gz", - tag = "0.4.0", - dir = "lua-resty-healthcheck-0.4.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events == 0.3.1" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.4.1-1.rockspec b/rockspecs/lua-resty-healthcheck-0.4.1-1.rockspec deleted file mode 100644 index 1cfe6e6b..00000000 --- a/rockspecs/lua-resty-healthcheck-0.4.1-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.4.1-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.4.1.tar.gz", - tag = "0.4.1", - dir = "lua-resty-healthcheck-0.4.1" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.4.1-2.rockspec b/rockspecs/lua-resty-healthcheck-0.4.1-2.rockspec deleted file mode 100644 index c39aedcd..00000000 --- a/rockspecs/lua-resty-healthcheck-0.4.1-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.4.1-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.4.1.tar.gz", - tag = "0.4.1", - dir = "lua-resty-healthcheck-0.4.1" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.4.2-1.rockspec b/rockspecs/lua-resty-healthcheck-0.4.2-1.rockspec deleted file mode 100644 index a1f96a60..00000000 --- a/rockspecs/lua-resty-healthcheck-0.4.2-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.4.2-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.4.2.tar.gz", - tag = "0.4.2", - dir = "lua-resty-healthcheck-0.4.2" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.4.2-2.rockspec b/rockspecs/lua-resty-healthcheck-0.4.2-2.rockspec deleted file mode 100644 index cc20fd7b..00000000 --- a/rockspecs/lua-resty-healthcheck-0.4.2-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.4.2-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.4.2.tar.gz", - tag = "0.4.2", - dir = "lua-resty-healthcheck-0.4.2" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.5.0-1.rockspec b/rockspecs/lua-resty-healthcheck-0.5.0-1.rockspec deleted file mode 100644 index 751f9a45..00000000 --- a/rockspecs/lua-resty-healthcheck-0.5.0-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.5.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.5.0.tar.gz", - tag = "0.5.0", - dir = "lua-resty-healthcheck-0.5.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.5.0-2.rockspec b/rockspecs/lua-resty-healthcheck-0.5.0-2.rockspec deleted file mode 100644 index 1f75c99c..00000000 --- a/rockspecs/lua-resty-healthcheck-0.5.0-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.5.0-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.5.0.tar.gz", - tag = "0.5.0", - dir = "lua-resty-healthcheck-0.5.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.6.0-1.rockspec b/rockspecs/lua-resty-healthcheck-0.6.0-1.rockspec deleted file mode 100644 index 66faf5d1..00000000 --- a/rockspecs/lua-resty-healthcheck-0.6.0-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.6.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.6.0.tar.gz", - tag = "0.6.0", - dir = "lua-resty-healthcheck-0.6.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.6.0-2.rockspec b/rockspecs/lua-resty-healthcheck-0.6.0-2.rockspec deleted file mode 100644 index 73e2a024..00000000 --- a/rockspecs/lua-resty-healthcheck-0.6.0-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.6.0-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.6.0.tar.gz", - tag = "0.6.0", - dir = "lua-resty-healthcheck-0.6.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.6.1-1.rockspec b/rockspecs/lua-resty-healthcheck-0.6.1-1.rockspec deleted file mode 100644 index 4c30c81c..00000000 --- a/rockspecs/lua-resty-healthcheck-0.6.1-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.6.1-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.6.1.tar.gz", - tag = "0.6.1", - dir = "lua-resty-healthcheck-0.6.1" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-0.6.1-2.rockspec b/rockspecs/lua-resty-healthcheck-0.6.1-2.rockspec deleted file mode 100644 index 62da40ba..00000000 --- a/rockspecs/lua-resty-healthcheck-0.6.1-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "0.6.1-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/0.6.1.tar.gz", - tag = "0.6.1", - dir = "lua-resty-healthcheck-0.6.1" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.0.0-1.rockspec b/rockspecs/lua-resty-healthcheck-1.0.0-1.rockspec deleted file mode 100644 index e1d71c58..00000000 --- a/rockspecs/lua-resty-healthcheck-1.0.0-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.0.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.0.0.tar.gz", - tag = "1.0.0", - dir = "lua-resty-healthcheck-1.0.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.0.0-2.rockspec b/rockspecs/lua-resty-healthcheck-1.0.0-2.rockspec deleted file mode 100644 index 3a043c50..00000000 --- a/rockspecs/lua-resty-healthcheck-1.0.0-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.0.0-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.0.0.tar.gz", - tag = "1.0.0", - dir = "lua-resty-healthcheck-1.0.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.1.0-1.rockspec b/rockspecs/lua-resty-healthcheck-1.1.0-1.rockspec deleted file mode 100644 index f30df58c..00000000 --- a/rockspecs/lua-resty-healthcheck-1.1.0-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.1.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.1.0.tar.gz", - tag = "1.1.0", - dir = "lua-resty-healthcheck-1.1.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.1.0-2.rockspec b/rockspecs/lua-resty-healthcheck-1.1.0-2.rockspec deleted file mode 100644 index 17a48085..00000000 --- a/rockspecs/lua-resty-healthcheck-1.1.0-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.1.0-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.1.0.tar.gz", - tag = "1.1.0", - dir = "lua-resty-healthcheck-1.1.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.1.1-1.rockspec b/rockspecs/lua-resty-healthcheck-1.1.1-1.rockspec deleted file mode 100644 index 3c3843b2..00000000 --- a/rockspecs/lua-resty-healthcheck-1.1.1-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.1.1-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.1.1.tar.gz", - tag = "1.1.1", - dir = "lua-resty-healthcheck-1.1.1" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.1.1-2.rockspec b/rockspecs/lua-resty-healthcheck-1.1.1-2.rockspec deleted file mode 100644 index 051380d3..00000000 --- a/rockspecs/lua-resty-healthcheck-1.1.1-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.1.1-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.1.1.tar.gz", - tag = "1.1.1", - dir = "lua-resty-healthcheck-1.1.1" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.1.2-1.rockspec b/rockspecs/lua-resty-healthcheck-1.1.2-1.rockspec deleted file mode 100644 index 03f4dcf5..00000000 --- a/rockspecs/lua-resty-healthcheck-1.1.2-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.1.2-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.1.2.tar.gz", - tag = "1.1.2", - dir = "lua-resty-healthcheck-1.1.2" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.1.2-2.rockspec b/rockspecs/lua-resty-healthcheck-1.1.2-2.rockspec deleted file mode 100644 index d936377b..00000000 --- a/rockspecs/lua-resty-healthcheck-1.1.2-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.1.2-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.1.2.tar.gz", - tag = "1.1.2", - dir = "lua-resty-healthcheck-1.1.2" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.2.0-1.rockspec b/rockspecs/lua-resty-healthcheck-1.2.0-1.rockspec deleted file mode 100644 index 82cc1b0a..00000000 --- a/rockspecs/lua-resty-healthcheck-1.2.0-1.rockspec +++ /dev/null @@ -1,26 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.2.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.2.0.tar.gz", - dir = "lua-resty-healthcheck-1.2.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.2.0-2.rockspec b/rockspecs/lua-resty-healthcheck-1.2.0-2.rockspec deleted file mode 100644 index f643454c..00000000 --- a/rockspecs/lua-resty-healthcheck-1.2.0-2.rockspec +++ /dev/null @@ -1,26 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.2.0-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.2.0.tar.gz", - dir = "lua-resty-healthcheck-1.2.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.3.0-1.rockspec b/rockspecs/lua-resty-healthcheck-1.3.0-1.rockspec deleted file mode 100644 index 80125d5f..00000000 --- a/rockspecs/lua-resty-healthcheck-1.3.0-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.3.0-1" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.3.0.tar.gz", - dir = "lua-resty-healthcheck-1.3.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2", - "penlight >= 1.7.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-1.3.0-2.rockspec b/rockspecs/lua-resty-healthcheck-1.3.0-2.rockspec deleted file mode 100644 index 92c0a27a..00000000 --- a/rockspecs/lua-resty-healthcheck-1.3.0-2.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "1.3.0-2" -source = { - url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.3.0.tar.gz", - dir = "lua-resty-healthcheck-1.3.0" -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 0.3.2, < 2.0", - "penlight >= 1.7.0", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/rockspecs/lua-resty-healthcheck-2.0.0-1.rockspec b/rockspecs/lua-resty-healthcheck-2.0.0-1.rockspec deleted file mode 100644 index 0be1cc87..00000000 --- a/rockspecs/lua-resty-healthcheck-2.0.0-1.rockspec +++ /dev/null @@ -1,27 +0,0 @@ -package = "lua-resty-healthcheck" -version = "2.0.0-1" -source = { - url = "git://github.com/kong/lua-resty-healthcheck", - tag = "2.0.0", -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/Kong/lua-resty-healthcheck", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events ~> 2", - "penlight >= 1.7.0", - "lua-resty-timer ~> 1", -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - } -} diff --git a/rockspecs/lua-resty-healthcheck-api7-master-0-0.rockspec b/rockspecs/lua-resty-healthcheck-api7-master-0-0.rockspec deleted file mode 100644 index ffc080ff..00000000 --- a/rockspecs/lua-resty-healthcheck-api7-master-0-0.rockspec +++ /dev/null @@ -1,29 +0,0 @@ -package = "lua-resty-healthcheck-api7-master" -version = "0-0" -source = { - url = "git://github.com/api7/lua-resty-healthcheck", - branch = "master", -} - -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/api7/lua-resty-healthcheck", - license = "Apache 2.0" -} - -dependencies = { - "lua-resty-worker-events ~> 2", - "penlight >= 1.7.0", - "lua-resty-timer ~> 1" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - } -} \ No newline at end of file diff --git a/rockspecs/lua-resty-healthcheck-iresty-1.0.1-2.rockspec b/rockspecs/lua-resty-healthcheck-iresty-1.0.1-2.rockspec deleted file mode 100644 index 9c81148b..00000000 --- a/rockspecs/lua-resty-healthcheck-iresty-1.0.1-2.rockspec +++ /dev/null @@ -1,26 +0,0 @@ -package = "lua-resty-healthcheck-iresty" -version = "1.0.1-2" -source = { - url = "git://github.com/iresty/lua-resty-healthcheck/", - tag = "v1.0.1", -} -description = { - summary = "Healthchecks for OpenResty to check upstream service status", - detailed = [[ - lua-resty-healthcheck is a module that can check upstream service - availability by sending requests and validating responses at timed - intervals. - ]], - homepage = "https://github.com/iresty/lua-resty-healthcheck/", - license = "Apache 2.0" -} -dependencies = { - "lua-resty-worker-events >= 1.0.0" -} -build = { - type = "builtin", - modules = { - ["resty.healthcheck"] = "lib/resty/healthcheck.lua", - ["resty.healthcheck.utils"] = "lib/resty/healthcheck/utils.lua" - } -} diff --git a/t/18-event_handler.t b/t/18-event_handler.t deleted file mode 100644 index c203cbc5..00000000 --- a/t/18-event_handler.t +++ /dev/null @@ -1,111 +0,0 @@ -use Test::Nginx::Socket::Lua; -use Cwd qw(cwd); - -workers(1); - -plan tests => repeat_each() * 4; - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - lua_shared_dict test_shm 8m; - lua_shared_dict my_worker_events 8m; -}; - -run_tests(); - -__DATA__ - -=== TEST 1: add_target() without hostname, remove_target() with same ip:port ---- http_config eval -qq{ - $::HttpConfig - - server { - listen 2112; - location = /status { - return 200; - } - } -} ---- config - location = /t { - content_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - http_path = "/status", - healthy = { - interval = 0.1 - }, - unhealthy = { - interval = 0.1 - } - } - } - }) - ngx.sleep(0.2) -- wait twice the interval - local ok, err = checker:add_target("127.0.0.1", 2112) - ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - ok, err = checker:remove_target("127.0.0.1", 2112) - ngx.sleep(0.2) -- wait twice the interval - } - } ---- request -GET /t ---- response_body -true - -=== TEST 2: add_target() with hostname, remove_target() on same target ---- http_config eval -qq{ - $::HttpConfig - - server { - listen 2112; - location = /status { - return 200; - } - } -} ---- config - location = /t { - content_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - http_path = "/status", - healthy = { - interval = 0.1 - }, - unhealthy = { - interval = 0.1 - } - } - } - }) - ngx.sleep(0.2) -- wait twice the interval - local ok, err = checker:add_target("127.0.0.1", 2112, "localhost") - ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - ok, err = checker:remove_target("127.0.0.1", 2112, "localhost") - ngx.sleep(0.2) -- wait twice the interval - } - } ---- request -GET /t ---- response_body -true - diff --git a/t/21-run_locked.t b/t/21-run_locked.t new file mode 100644 index 00000000..aae19330 --- /dev/null +++ b/t/21-run_locked.t @@ -0,0 +1,347 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3) + 1; + +my $pwd = cwd(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + lua_shared_dict my_worker_events 8m; + + init_worker_by_lua_block { + local we = require "resty.worker.events" + + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + + _G.__TESTING_HEALTHCHECKER = true + + local healthcheck = require("resty.healthcheck") + + _G.checker = assert(healthcheck.new({ + name = "testing", + shm_name = "test_shm", + checks = { + active = { + healthy = { + interval = 0, + }, + unhealthy = { + interval = 0, + } + } + } + })) + + checker._set_lock_timeout(1) + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: run_locked() runs a function immediately and returns its result +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local flag = false + local ok, err = checker:_run_locked("key", function() + flag = true + return "OK" + end) + + ngx.say(ok) + ngx.say(err) + ngx.say(flag) + } + } +--- request +GET /t +--- response_body +OK +nil +true +--- no_error_log +[error] + + + +=== TEST 2: run_locked() can run a function immediately in an non-yieldable phase if no lock is held +--- http_config eval: $::HttpConfig +--- config + location = /t { + set_by_lua_block $test { + local checker = _G.checker + local value + local ok, err = checker:_run_locked("key", function() + value = "SET" + return "OK" + end) + + if not ok then + ngx.log(ngx.ERR, "run_locked failed: ", err) + return + end + + ngx.ctx.ok = ok + return value + } + + content_by_lua_block { + ngx.say(ngx.ctx.ok) + ngx.say(ngx.var.test) + } + } +--- request +GET /t +--- response_body +OK +SET +--- no_error_log +[error] + + + +=== TEST 3: run_locked() schedules a function in a timer if a lock cannot be acquired during a non-yieldable phase +--- http_config eval: $::HttpConfig +--- config + location = /t { + set_by_lua_block $test { + local checker = _G.checker + + local key = "my_lock_key" + + local resty_lock = require "resty.lock" + local lock = assert(resty_lock:new(checker.shm_name)) + assert(lock:lock(key)) + ngx.ctx.lock = lock + + local t = {} + ngx.ctx.t = t + + local ok, err = checker:_run_locked(key, function() + t.flag = true + t.phase = ngx.get_phase() + return true + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + } + + content_by_lua_block { + assert(ngx.ctx.lock:unlock()) + + local t = ngx.ctx.t + + for i = 1, 10 do + if t.flag then + break + end + ngx.sleep(0.25) + end + + ngx.say(t.phase or "none") + ngx.say(t.flag or "timeout") + } + } +--- request +GET /t +--- response_body +timer +true +--- no_error_log +[error] + + + +=== TEST 4: run_locked() doesn't schedule a function in a timer during a yieldable phase +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local key = "my_lock_key" + + local resty_lock = require "resty.lock" + local lock = assert(resty_lock:new(checker.shm_name)) + assert(lock:lock(key)) + + local flag = false + local ok, err = checker:_run_locked(key, function() + flag = true + return true + end) + + ngx.say(ok) + ngx.say(err) + ngx.say(flag) + } + } +--- request +GET /t +--- response_body +nil +failed acquiring lock for 'my_lock_key', timeout +false +--- no_error_log +[error] + + + +=== TEST 5: run_locked() handles function exceptions +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local ok, err = checker:_run_locked("key", function() + error("oh no!") + return true + end) + + -- remove "content_by_lua(nginx.conf:)" context and such from + -- the error string so that our test is a little more stable + err = err:gsub(" content_by_lua[^ ]+", "") + + ngx.say(ok) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +nil +locked function threw an exception: oh no! +--- no_error_log +[error] + + + +=== TEST 6: run_locked() returns errors from the locked function +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local ok, err = checker:_run_locked("key", function() + return nil, "I've failed you" + end) + + ngx.say(ok) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +nil +I've failed you +--- no_error_log +[error] + + + +=== TEST 7: run_locked() logs errors/exceptions from scheduled functions +--- http_config eval: $::HttpConfig +--- config + location = /t { + set_by_lua_block $test { + local checker = _G.checker + + local key = "my_lock_key" + + local resty_lock = require "resty.lock" + local lock = assert(resty_lock:new(checker.shm_name)) + assert(lock:lock(key)) + ngx.ctx.lock = lock + + local t = { count = 0 } + ngx.ctx.t = t + + local ok, err = checker:_run_locked(key, function() + t.count = t.count + 1 + error("LOCK EXCEPTION") + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + + local ok, err = checker:_run_locked(key, function() + t.count = t.count + 1 + return nil, "LOCK ERROR" + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + + local ok, err = checker:_run_locked(key, function() + t.count = t.count + 1 + return true + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + } + + content_by_lua_block { + assert(ngx.ctx.lock:unlock()) + + local t = ngx.ctx.t + + for i = 1, 10 do + if t.count >= 3 then + break + end + ngx.sleep(0.25) + end + + ngx.say(t.count) + } + } +--- request +GET /t +--- response_body +3 +--- error_log +LOCK ERROR +LOCK EXCEPTION + + + +=== TEST 8: run_locked() passes any/all arguments to the locked function +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local sum = 0 + local ok, err = checker:_run_locked("key", function(a, b, c) + sum = sum + a + b + c + return true + end, 1, 2, 3) + + ngx.say(ok) + ngx.say(err) + ngx.say(sum) + } + } +--- request +GET /t +--- response_body +true +nil +6 +--- no_error_log +[error] diff --git a/t/lock-failed.t b/t/lock-failed.t deleted file mode 100644 index 9837ee4c..00000000 --- a/t/lock-failed.t +++ /dev/null @@ -1,79 +0,0 @@ -use Test::Nginx::Socket::Lua 'no_plan'; -use Cwd qw(cwd); - -workers(1); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/deps/share/lua/5.1/?/init.lua;$pwd/deps/share/lua/5.1/?.lua;;$pwd/lib/?.lua;;"; - lua_shared_dict test_shm 8m; - lua_shared_dict my_worker_events 8m; -}; - -no_shuffle(); -run_tests(); - -__DATA__ - - -=== TEST 1: acquire lock timeout ---- http_config eval -qq{ - $::HttpConfig - - server { - listen 2116; - location = /status { - return 200; - } - } -} ---- config - location = /t { - content_by_lua_block { - -- add a lock manually - local resty_lock = require ("resty.lock") - local shm_name = "test_shm" - local name = "testing" - local key = "lua-resty-healthcheck:" .. name .. ":target_list_lock" - local tl_lock, lock_err = resty_lock:new(shm_name, { - exptime = 10, -- timeout after which lock is released anyway - timeout = 5, -- max wait time to acquire lock - }) - assert(tl_lock, "new lock failed") - local elapsed, err = tl_lock:lock(key) - assert(elapsed, "lock failed") - - -- acquire a lock in the new function - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local ok, err = healthcheck.new({ - name = name, - shm_name = shm_name, - type = "http", - checks = { - active = { - http_path = "/status", - healthy = { - interval = 0.1, -- we don't want active checks - successes = 1, - }, - unhealthy = { - interval = 0.1, -- we don't want active checks - tcp_failures = 3, - http_failures = 3, - } - } - } - }) - assert(ok == nil, "lock success") - ngx.log(ngx.ERR, err) - } - } ---- request -GET /t ---- error_log -failed acquiring lock for 'lua-resty-healthcheck:testing:target_list_lock', timeout ---- timeout: 10 diff --git a/t/req-headers.t b/t/req-headers.t deleted file mode 100644 index c0c2f6b9..00000000 --- a/t/req-headers.t +++ /dev/null @@ -1,116 +0,0 @@ -use Test::Nginx::Socket::Lua 'no_plan'; -use Cwd qw(cwd); - -workers(1); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - lua_shared_dict test_shm 8m; - lua_shared_dict my_worker_events 8m; -}; - -run_tests(); - -__DATA__ - -=== TEST 1: req_headers: {"User-Agent", "curl/7.29.0"} ---- http_config eval -qq{ - $::HttpConfig - - server { - listen 2112; - location = /status { - return 200; - } - } -} ---- config - location = /t { - content_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - http_path = "/status", - healthy = { - interval = 0.1 - }, - headers = {"User-Agent: curl/7.29.0"} - } - } - }) - ngx.sleep(0.2) -- wait twice the interval - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - } - } ---- request -GET /t ---- response_body -true ---- error_log -checking healthy targets: nothing to do -checking healthy targets: #1 -GET /status HTTP/1.1 -Connection: close -User-Agent: curl/7.29.0 -Host: 127.0.0.1 - - - -=== TEST 2: req_headers: {"User-Agent", "curl"} ---- http_config eval -qq{ - $::HttpConfig - - server { - listen 2112; - location = /status { - return 200; - } - } -} ---- config - location = /t { - content_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - http_path = "/status", - healthy = { - interval = 0.1 - }, - headers = {"User-Agent: curl"} - } - } - }) - ngx.sleep(0.2) -- wait twice the interval - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - } - } ---- request -GET /t ---- response_body -true ---- error_log -checking healthy targets: nothing to do -checking healthy targets: #1 -GET /status HTTP/1.1 -Connection: close -User-Agent: curl -Host: 127.0.0.1 diff --git a/t/with_resty-events/00-new.t b/t/with_resty-events/00-new.t new file mode 100644 index 00000000..03bc1f97 --- /dev/null +++ b/t/with_resty-events/00-new.t @@ -0,0 +1,229 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3) - 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: new() requires worker_events to be configured +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + events_module = "resty.events", + }) + ngx.log(ngx.ERR, err) + } + } +--- request +GET /t +--- response_body + +--- error_log +please configure + +=== TEST 2: new() requires 'name' +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + events_module = "resty.events", + shm_name = "test_shm", + }) + ngx.log(ngx.ERR, err) + } + } +--- request +GET /t +--- response_body + +--- error_log +required option 'name' is missing + +=== TEST 3: new() fails with invalid shm +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "invalid_shm", + events_module = "resty.events", + }) + ngx.log(ngx.ERR, err) + } + } +--- request +GET /t +--- response_body + +--- error_log +no shm found by name + +=== TEST 4: new() initializes with default config +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + }) + } + } +--- request +GET /t +--- response_body + +--- error_log +Healthchecker started! + +=== TEST 5: new() only accepts http or tcp types +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + }) + ngx.say(ok) + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "tcp", + }) + ngx.say(ok) + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "get lost", + }) + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +true +false + +=== TEST 6: new() deals with bad inputs +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + + -- tests for failure + local tests = { + { active = { timeout = -1 }}, + { active = { timeout = 1e+42 }}, + { active = { concurrency = -1 }}, + { active = { concurrency = 1e42 }}, + { active = { healthy = { interval = -1 }}}, + { active = { healthy = { interval = 1e42 }}}, + { active = { healthy = { successes = -1 }}}, + { active = { healthy = { successes = 1e42 }}}, + { active = { unhealthy = { interval = -1 }}}, + { active = { unhealthy = { interval = 1e42 }}}, + { active = { unhealthy = { tcp_failures = -1 }}}, + { active = { unhealthy = { tcp_failures = 1e42 }}}, + { active = { unhealthy = { timeouts = -1 }}}, + { active = { unhealthy = { timeouts = 1e42 }}}, + { active = { unhealthy = { http_failures = -1 }}}, + { active = { unhealthy = { http_failures = 1e42 }}}, + { passive = { healthy = { successes = -1 }}}, + { passive = { healthy = { successes = 1e42 }}}, + { passive = { unhealthy = { tcp_failures = -1 }}}, + { passive = { unhealthy = { tcp_failures = 1e42 }}}, + { passive = { unhealthy = { timeouts = -1 }}}, + { passive = { unhealthy = { timeouts = 1e42 }}}, + { passive = { unhealthy = { http_failures = -1 }}}, + { passive = { unhealthy = { http_failures = 1e42 }}}, + } + for _, test in ipairs(tests) do + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + checks = test, + }) + ngx.say(ok) + end + } + } +--- request +GET /t +--- response_body +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false diff --git a/t/with_resty-events/01-start-stop.t b/t/with_resty-events/01-start-stop.t new file mode 100644 index 00000000..152355f7 --- /dev/null +++ b/t/with_resty-events/01-start-stop.t @@ -0,0 +1,182 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3) + 1; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: start() can start after stop() +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + local ok, err = checker:stop() + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:start() + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +[error] + + +=== TEST 3: start() is a no-op if active intervals are 0 +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0 + }, + unhealthy = { + interval = 0 + } + } + } + }) + local ok, err = checker:start() + ngx.say(ok) + local ok, err = checker:start() + ngx.say(ok) + local ok, err = checker:start() + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +true +true +--- no_error_log +[error] + +=== TEST 4: stop() stops health checks +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + local ok, err = checker:stop() + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +[error] +checking + +=== TEST 5: start() restarts health checks +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + local ok, err = checker:stop() + ngx.say(ok) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:start() + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +checking diff --git a/t/with_resty-events/02-add_target.t b/t/with_resty-events/02-add_target.t new file mode 100644 index 00000000..0815f613 --- /dev/null +++ b/t/with_resty-events/02-add_target.t @@ -0,0 +1,183 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 4) + 3; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: add_target() adds an unhealthy target +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 11111, nil, false) + ngx.say(ok) + ngx.sleep(0.5) + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking unhealthy targets: #1 + +--- no_error_log +checking healthy targets: #1 + + + +=== TEST 2: add_target() adds a healthy target +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking unhealthy targets: nothing to do +checking healthy targets: #1 + +--- no_error_log +checking unhealthy targets: #1 + + + +=== TEST 3: calling add_target() repeatedly does not change status +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2113; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 1, + }, + unhealthy = { + interval = 0.1, + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking unhealthy targets: nothing to do +checking healthy targets: #1 + +--- no_error_log +checking unhealthy targets: #1 diff --git a/t/with_resty-events/03-get_target_status.t b/t/with_resty-events/03-get_target_status.t new file mode 100644 index 00000000..f85e7d3a --- /dev/null +++ b/t/with_resty-events/03-get_target_status.t @@ -0,0 +1,106 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 4); + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: get_target_status() reports proper status +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2115; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2115, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true + + checker:report_tcp_failure("127.0.0.1", 2115) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- false + + checker:report_success("127.0.0.1", 2115) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true + } + } +--- request +GET /t +--- response_body +true +false +true +--- no_error_log +checking healthy targets: #1 +checking unhealthy targets: #1 diff --git a/t/with_resty-events/04-report_success.t b/t/with_resty-events/04-report_success.t new file mode 100644 index 00000000..06952980 --- /dev/null +++ b/t/with_resty-events/04-report_success.t @@ -0,0 +1,316 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 28; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_success() recovers HTTP active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) + local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2116)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2118)' from 'false' to 'true' + + +=== TEST 2: report_success() recovers TCP active = passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) + local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2116)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2118)' from 'false' to 'true' + +=== TEST 3: report_success() is a nop when active.healthy.sucesses == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 0, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2116, nil, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '127.0.0.1 (127.0.0.1:2116)' from 'false' to 'true' + + + +=== TEST 4: report_success() is a nop when passive.healthy.sucesses == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2118; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0, -- we don't want active checks + successes = 0, + }, + unhealthy = { + interval = 0, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 0, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2118, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '(127.0.0.1:2118)' from 'false' to 'true' diff --git a/t/with_resty-events/05-report_failure.t b/t/with_resty-events/05-report_failure.t new file mode 100644 index 00000000..229c20a0 --- /dev/null +++ b/t/with_resty-events/05-report_failure.t @@ -0,0 +1,261 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 26; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_failure() fails HTTP active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy HTTP increment (1/3) for '(127.0.0.1:2117)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2117)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2117)' +event: target status '(127.0.0.1:2117)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 2: report_failure() fails TCP active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' +event: target status '(127.0.0.1:2117)' from 'true' to 'false' +unhealthy TCP increment (1/2) for '(127.0.0.1:2113)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 3: report_failure() is a nop when failure counters == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 0, + http_failures = 0, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 0, + http_failures = 0, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- no_error_log +unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' +event: target status '(127.0.0.1:2117)' from 'true' to 'false' +unhealthy TCP increment (1/2) for '(127.0.0.1:2113)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' diff --git a/t/with_resty-events/06-report_http_status.t b/t/with_resty-events/06-report_http_status.t new file mode 100644 index 00000000..6686682b --- /dev/null +++ b/t/with_resty-events/06-report_http_status.t @@ -0,0 +1,499 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 41; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_http_status() failures active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy HTTP increment (1/3) for '(127.0.0.1:2119)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2119)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2119)' +event: target status '(127.0.0.1:2119)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + + +=== TEST 2: report_http_status() successes active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +healthy SUCCESS increment (1/4) for '(127.0.0.1:2119)' +healthy SUCCESS increment (2/4) for '(127.0.0.1:2119)' +healthy SUCCESS increment (3/4) for '(127.0.0.1:2119)' +healthy SUCCESS increment (4/4) for '(127.0.0.1:2119)' +event: target status '(127.0.0.1:2119)' from 'false' to 'true' +healthy SUCCESS increment (1/4) for '(127.0.0.1:2113)' +healthy SUCCESS increment (2/4) for '(127.0.0.1:2113)' +healthy SUCCESS increment (3/4) for '(127.0.0.1:2113)' +healthy SUCCESS increment (4/4) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'false' to 'true' + + +=== TEST 3: report_http_status() with success is a nop when passive.healthy.successes == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 0, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' + + +=== TEST 4: report_http_status() with success is a nop when active.healthy.successes == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 0, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' + + +=== TEST 5: report_http_status() with failure is a nop when passive.unhealthy.http_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 0, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy HTTP increment +event: target status '127.0.0.1 (127.0.0.1:2119)' from 'true' to 'false' + + +=== TEST 4: report_http_status() with success is a nop when active.unhealthy.http_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 0, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy HTTP increment +event: target status '(127.0.0.1:2119)' from 'true' to 'false' + + +=== TEST 5: report_http_status() must work in log phase +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + ngx.say("OK") + } + log_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + } + } +--- request +GET /t +--- response_body +OK +--- no_error_log +failed to acquire lock: API disabled in the context of log_by_lua diff --git a/t/with_resty-events/07-report_tcp_failure.t b/t/with_resty-events/07-report_tcp_failure.t new file mode 100644 index 00000000..f6dd4898 --- /dev/null +++ b/t/with_resty-events/07-report_tcp_failure.t @@ -0,0 +1,242 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 18; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_tcp_failure() active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2120; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy TCP increment (1/3) for '(127.0.0.1:2120)' +unhealthy TCP increment (2/3) for '(127.0.0.1:2120)' +unhealthy TCP increment (3/3) for '(127.0.0.1:2120)' +event: target status '(127.0.0.1:2120)' from 'true' to 'false' +unhealthy TCP increment (1/3) for '(127.0.0.1:2113)' +unhealthy TCP increment (2/3) for '(127.0.0.1:2113)' +unhealthy TCP increment (3/3) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 2: report_tcp_failure() for active is a nop when active.unhealthy.tcp_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2120; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 0, + http_failures = 5, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) + ngx.sleep(0.01) + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2120)' from 'true' to 'false' + + + +=== TEST 3: report_tcp_failure() for passive is a nop when passive.unhealthy.tcp_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2120; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 0, + http_failures = 5, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) + ngx.sleep(0.01) + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2120)' from 'true' to 'false' diff --git a/t/with_resty-events/08-report_timeout.t b/t/with_resty-events/08-report_timeout.t new file mode 100644 index 00000000..b418baa5 --- /dev/null +++ b/t/with_resty-events/08-report_timeout.t @@ -0,0 +1,244 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 16; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_timeout() active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2122; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2113, nil, "passive") + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy TIMEOUT increment (1/2) for '(127.0.0.1:2122)' +unhealthy TIMEOUT increment (2/2) for '(127.0.0.1:2122)' +event: target status '(127.0.0.1:2122)' from 'true' to 'false' +unhealthy TIMEOUT increment (1/2) for '(127.0.0.1:2113)' +unhealthy TIMEOUT increment (2/2) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 2: report_timeout() for active is a nop when active.unhealthy.timeouts == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2122; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + timeouts = 0, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) + ngx.sleep(0.01) + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2122, nil, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2122)' from 'true' to 'false' + + + +=== TEST 3: report_timeout() for passive is a nop when passive.unhealthy.timeouts == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2122; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + timeouts = 0, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) + ngx.sleep(0.01) + checker:report_timeout("127.0.0.1", 2122, nil, "passive") + checker:report_timeout("127.0.0.1", 2122, nil, "passive") + checker:report_timeout("127.0.0.1", 2122, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2122)' from 'true' to 'false' diff --git a/t/with_resty-events/09-active_probes.t b/t/with_resty-events/09-active_probes.t new file mode 100644 index 00000000..b307c847 --- /dev/null +++ b/t/with_resty-events/09-active_probes.t @@ -0,0 +1,536 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 59; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: active probes, http node failing +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 500; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + ngx.sleep(2) -- active healthchecks might take some time to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false + } + } +--- request +GET /t +--- response_body +false +--- error_log +checking unhealthy targets: nothing to do +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' +checking healthy targets: nothing to do + + + +=== TEST 2: active probes, http node recovering +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: nothing to do + +=== TEST 3: active probes, custom http status (regression test for pre-filled defaults) +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 500; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + http_statuses = { 429 }, + } + }, + } + }) + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking unhealthy targets: nothing to do +--- no_error_log +checking healthy targets: nothing to do +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' + + +=== TEST 4: active probes, custom http status, node failing +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 401; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + http_statuses = { 401 }, + } + }, + } + }) + ngx.sleep(2) -- active healthchecks might take some time to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false + } + } +--- request +GET /t +--- response_body +false +--- error_log +checking unhealthy targets: nothing to do +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' +checking healthy targets: nothing to do + + + +=== TEST 5: active probes, host is correctly set +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + content_by_lua_block { + if ngx.req.get_headers()["Host"] == "example.com" then + ngx.exit(200) + else + ngx.exit(500) + end + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 1, + }, + unhealthy = { + interval = 0.1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false) + ngx.sleep(0.2) -- wait for 2x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: #1 + + +=== TEST 6: active probes, tcp node failing +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + tcp_failures = 3, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + -- Note: no http server configured, so port 2114 remains unanswered + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false + } + } +--- request +GET /t +--- response_body +false +--- error_log +checking unhealthy targets: nothing to do +unhealthy TCP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' +checking healthy targets: nothing to do + + + +=== TEST 7: active probes, tcp node recovering +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + tcp_failures = 3, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: nothing to do + + + +=== TEST 8: active probes, custom Host header is correctly set +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + content_by_lua_block { + if ngx.req.get_headers()["Host"] == "custom-host.test" then + ngx.exit(200) + else + ngx.exit(500) + end + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 1, + }, + unhealthy = { + interval = 0.1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false, "custom-host.test") + ngx.sleep(0.3) -- wait for 3x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: nothing to do + + + +=== TEST 9: active probes, interval is respected +--- http_config eval +qq{ + $::HttpConfig + + # ignore lua tcp socket read timed out + lua_socket_log_errors off; + + server { + listen 2114; + location = /status { + access_by_lua_block { + ngx.sleep(0.3) + ngx.exit(200) + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + test = true, + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 1, + successes = 1, + }, + unhealthy = { + interval = 1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(1) -- wait for the check interval + -- checker callback should not be called more than 5 times + if checker.checker_callback_count < 5 then + ngx.say("OK") + else + ngx.say("BAD") + end + } + } +--- request +GET /t +--- response_body +OK +--- no_error_log +[error] diff --git a/t/with_resty-events/10-garbagecollect.t_disabled b/t/with_resty-events/10-garbagecollect.t_disabled new file mode 100644 index 00000000..885e5a48 --- /dev/null +++ b/t/with_resty-events/10-garbagecollect.t_disabled @@ -0,0 +1,105 @@ +# This test is disabled +# +# We need to understand if it is a false-positive or lua-resty-healthcheck is +# actually leaking the event module memory when deleting a checker instance. +# +# Please rename this test if a solution is found or remove it if it is a +# false-positive. + +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3); + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: garbage collect the checker object +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2121; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local dump = function(...) ngx.log(ngx.DEBUG,"\027[31m\n", require("pl.pretty").write({...}),"\027[0m") end + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + assert(checker:add_target("127.0.0.1", 2121, nil, true)) + local weak_table = setmetatable({ checker },{ + __mode = "v", + }) + checker = nil -- now only anchored in weak table above + collectgarbage() + collectgarbage() + collectgarbage() + collectgarbage() + ngx.sleep(0.5) -- leave room for timers to run (they shouldn't, but we want to be sure) + ngx.say(#weak_table) -- after GC, should be 0 length + } + } +--- request +GET /t +--- response_body +0 +--- no_error_log +checking healthy targets: #1 diff --git a/t/with_resty-events/11-clear.t b/t/with_resty-events/11-clear.t new file mode 100644 index 00000000..6fce9327 --- /dev/null +++ b/t/with_resty-events/11-clear.t @@ -0,0 +1,298 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 27; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: clear() clears the list, new checkers never see it +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 10000 + i, nil, false) + end + ngx.sleep(0.2) -- wait twice the interval + checker1:clear() + + local checker2 = healthcheck.new(config) + + ngx.say(true) + } + } +--- request +GET /t +--- response_body +true + +--- error_log +initial target list (0 targets) + +--- no_error_log +initial target list (1 targets) +initial target list (2 targets) +initial target list (3 targets) +initial target list (4 targets) +initial target list (5 targets) +initial target list (6 targets) +initial target list (7 targets) +initial target list (8 targets) +initial target list (9 targets) +initial target list (10 targets) +initial target list (11 targets) + +=== TEST 2: clear() clears the list, other checkers get notified and clear too +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + local checker2 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 20000 + i, nil, false) + end + checker2:clear() + ngx.sleep(1) + ngx.say(true) + } + } +--- request +GET /t +--- response_body +true + +--- error_log +checking unhealthy targets: nothing to do + +--- no_error_log +checking unhealthy targets: #10 + +=== TEST 3: clear() resets counters +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 21120; + location = /status { + return 503; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.2, + }, + unhealthy = { + interval = 0.2, + http_failures = 3, + } + } + } + } + local checker1 = healthcheck.new(config) + checker1:add_target("127.0.0.1", 21120, nil, true) + ngx.sleep(0.5) -- wait 2.5x the interval + checker1:clear() + checker1:add_target("127.0.0.1", 21120, nil, true) + ngx.sleep(0.3) -- wait 1.5x the interval + ngx.say(true) + } + } +--- request +GET /t +--- response_body +true + +--- error_log +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:21120)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:21120)' +--- no_error_log +unhealthy HTTP increment (3/3) for '(127.0.0.1:21120)' + + +=== TEST 4: delayed_clear() clears the list, after interval new checkers don't see it +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 10000 + i, nil, false) + end + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10001)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + ngx.say(checker2:get_target_status("127.0.0.1", 10001)) + ngx.sleep(2.6) -- wait while the targets are cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found + +=== TEST 5: delayed_clear() would clear tgt list, but adding again keeps the previous status +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + checker1:add_target("127.0.0.1", 10001, nil, false) + checker1:add_target("127.0.0.1", 10002, nil, false) + checker1:add_target("127.0.0.1", 10003, nil, false) + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10002)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + checker2:add_target("127.0.0.1", 10002, nil, true) + ngx.say(checker2:get_target_status("127.0.0.1", 10002)) + ngx.sleep(2.6) -- wait while the targets would be cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10002) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10003) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found +false +target not found diff --git a/t/with_resty-events/12-set_target_status.t b/t/with_resty-events/12-set_target_status.t new file mode 100644 index 00000000..9576f7d0 --- /dev/null +++ b/t/with_resty-events/12-set_target_status.t @@ -0,0 +1,207 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * blocks() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: set_target_status() updates a status +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:set_target_status("127.0.0.1", 2112, nil, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:set_target_status("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + } + } +--- request +GET /t +--- response_body +true +false +true + + +=== TEST 2: set_target_status() restores node after passive check disables it +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:report_http_status("127.0.0.1", 2112, nil, 500) + checker:report_http_status("127.0.0.1", 2112, nil, 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:set_target_status("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + } + } +--- request +GET /t +--- response_body +true +false +true + + +=== TEST 3: set_target_status() resets the failure counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:report_http_status("127.0.0.1", 2112, nil, 500) + checker:set_target_status("127.0.0.1", 2112, nil, true) + checker:report_http_status("127.0.0.1", 2112, nil, 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:report_http_status("127.0.0.1", 2112, nil, 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + } + } +--- request +GET /t +--- response_body +true +true +false + + +=== TEST 3: set_target_status() resets the success counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + checker:set_target_status("127.0.0.1", 2112, nil, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:report_http_status("127.0.0.1", 2112, nil, 200) + checker:set_target_status("127.0.0.1", 2112, nil, false) + checker:report_http_status("127.0.0.1", 2112, nil, 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:report_http_status("127.0.0.1", 2112, nil, 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + } + } +--- request +GET /t +--- response_body +false +false +true diff --git a/t/with_resty-events/13-integration.t_disabled b/t/with_resty-events/13-integration.t_disabled new file mode 100644 index 00000000..0e7b9274 --- /dev/null +++ b/t/with_resty-events/13-integration.t_disabled @@ -0,0 +1,207 @@ +# This test is disabled +# +# All the test steps used here take longer than the request timeout because of +# all the ngx.sleep needed to synchronize the events. Running them invididually +# seem to work, so the solution is to split the integration test into smaller +# tests. + +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: ensure counters work properly +--- http_config eval +qq{ + $::HttpConfig +} +--- config eval +qq{ + location = /t { + content_by_lua_block { + local host = "127.0.0.1" + local port = 2112 + + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + test = true, + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0, + successes = 4, + }, + unhealthy = { + interval = 0, + tcp_failures = 2, + http_failures = 0, + } + }, + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + timeouts = 2, + } + } + } + }) + + local ok, err = checker:add_target(host, port, nil, true) + + -- S = successes counter + -- F = http_failures counter + -- T = tcp_failures counter + -- O = timeouts counter + + local cases = {} + + local function incr(idxs, i, max) + idxs[i] = idxs[i] + 1 + if idxs[i] > max and i > 1 then + idxs[i] = 1 + incr(idxs, i - 1, max) + end + end + + local function add_cases(cases, len, m) + local idxs = {} + for i = 1, len do + idxs[i] = 1 + end + local word = {} + for _ = 1, (#m) ^ len do + for c = 1, len do + word[c] = m[idxs[c]] + end + table.insert(cases, table.concat(word)) + incr(idxs, len, #m) + end + end + + local m = { "S", "F", "T", "O" } + + -- There are 324 (3*3*3*3*4) possible internal states + -- to the above healthcheck configuration where all limits are set to 2. + -- We need at least five events (4*4*4*4) to be able + -- to exercise all of them + for i = 1, 5 do + add_cases(cases, i, m) + end + + -- Brute-force test all combinations of health events up to 5 events + -- and compares the results given by the library with a simple simulation + -- that implements the specified behavior. + local function run_test_case(case) + assert(checker:set_target_status(host, port, nil, true)) + ngx.sleep(0.002) + local i = 1 + local s, f, t, o = 0, 0, 0, 0 + local mode = true + for c in case:gmatch(".") do + if c == "S" then + checker:report_http_status(host, port, nil, 200, "passive") + ngx.sleep(0.002) + s = s + 1 + f, t, o = 0, 0, 0 + if s == 2 then + mode = true + end + elseif c == "F" then + checker:report_http_status(host, port, nil, 500, "passive") + ngx.sleep(0.002) + f = f + 1 + s = 0 + if f == 2 then + mode = false + end + elseif c == "T" then + checker:report_tcp_failure(host, port, nil, "read", "passive") + ngx.sleep(0.002) + t = t + 1 + s = 0 + if t == 2 then + mode = false + end + elseif c == "O" then + checker:report_timeout(host, port, nil, "passive") + ngx.sleep(0.002) + o = o + 1 + s = 0 + if o == 2 then + mode = false + end + end + + --local ctr, state = checker:test_get_counter(host, port, nil) + --ngx.say(case, ": ", c, " ", string.format("%08x", ctr), " ", state) + --ngx.log(ngx.DEBUG, case, ": ", c, " ", string.format("%08x", ctr), " ", state) + + if checker:get_target_status(host, port, nil) ~= mode then + ngx.say("failed: ", case, " step ", i, " expected ", mode) + return false + end + i = i + 1 + end + return true + end + + for _, case in ipairs(cases) do + ngx.log(ngx.ERR, "Case: ", case) + run_test_case(case) + end + ngx.say("all ok!") + } + } +} +--- request +GET /t +--- response_body +all ok! +--- error_log +--- no_error_log diff --git a/t/with_resty-events/14-tls_active_probes.t b/t/with_resty-events/14-tls_active_probes.t new file mode 100644 index 00000000..d9e44902 --- /dev/null +++ b/t/with_resty-events/14-tls_active_probes.t @@ -0,0 +1,155 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => blocks() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: active probes, valid https +--- http_config eval: $::HttpConfig +--- config + location = /t { + lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; + lua_ssl_verify_depth 2; + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + type = "https", + http_path = "/", + healthy = { + interval = 2, + successes = 2, + }, + unhealthy = { + interval = 2, + tcp_failures = 2, + } + }, + } + }) + local ok, err = checker:add_target("104.154.89.105", 443, "badssl.com", false) + ngx.sleep(8) -- wait for 4x the check interval + ngx.say(checker:get_target_status("104.154.89.105", 443, "badssl.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- timeout +15 + +=== TEST 2: active probes, invalid cert +--- http_config eval: $::HttpConfig +--- config + location = /t { + lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; + lua_ssl_verify_depth 2; + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + type = "https", + http_path = "/", + healthy = { + interval = 2, + successes = 2, + }, + unhealthy = { + interval = 2, + tcp_failures = 2, + } + }, + } + }) + local ok, err = checker:add_target("104.154.89.105", 443, "wrong.host.badssl.com", true) + ngx.sleep(8) -- wait for 4x the check interval + ngx.say(checker:get_target_status("104.154.89.105", 443, "wrong.host.badssl.com")) -- false + } + } +--- request +GET /t +--- response_body +false +--- timeout +15 + +=== TEST 3: active probes, accept invalid cert when disabling check +--- http_config eval: $::HttpConfig +--- config + location = /t { + lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; + lua_ssl_verify_depth 2; + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + type = "https", + https_verify_certificate = false, + http_path = "/", + healthy = { + interval = 2, + successes = 2, + }, + unhealthy = { + interval = 2, + tcp_failures = 2, + } + }, + } + }) + local ok, err = checker:add_target("104.154.89.105", 443, "wrong.host.badssl.com", false) + ngx.sleep(8) -- wait for 4x the check interval + ngx.say(checker:get_target_status("104.154.89.105", 443, "wrong.host.badssl.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- timeout +15 diff --git a/t/with_resty-events/15-get_virtualhost_target_status.t b/t/with_resty-events/15-get_virtualhost_target_status.t new file mode 100644 index 00000000..2921a397 --- /dev/null +++ b/t/with_resty-events/15-get_virtualhost_target_status.t @@ -0,0 +1,322 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 5) + 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: get_target_status() reports proper status for virtualhosts +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2115, "ahostname", true) + local ok, err = checker:add_target("127.0.0.1", 2115, "otherhostname", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true + checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true + checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false + checker:report_success("127.0.0.1", 2115, "otherhostname") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true + checker:report_tcp_failure("127.0.0.1", 2115, "otherhostname") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true + local _, err = checker:get_target_status("127.0.0.1", 2115) + ngx.say(err) -- target not found + } + } +--- request +GET /t +--- response_body +true +true +true +false +true +false +true +target not found +--- error_log +unhealthy HTTP increment (1/2) for 'otherhostname(127.0.0.1:2115)' +unhealthy HTTP increment (2/2) for 'otherhostname(127.0.0.1:2115)' +event: target status 'otherhostname(127.0.0.1:2115)' from 'true' to 'false' + + + +=== TEST 2: get_target_status() reports proper status for mixed targets (with/without hostname) +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, "ahostname", true) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true + checker:report_http_status("127.0.0.1", 2116, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false + } + } +--- request +GET /t +--- response_body +true +true +true +false +--- error_log +unhealthy HTTP increment (1/1) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2116)' from 'true' to 'false' + + + +=== TEST 3: active probe for virtualhosts listening on same port:ip combination +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + server_name healthyserver; + location = /status { + return 200; + } + } + server { + listen 2117; + server_name unhealthyserver; + location = /status { + return 500; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, "healthyserver", true) + local ok, err = checker:add_target("127.0.0.1", 2117, "unhealthyserver", true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2117, "healthyserver")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2117, "unhealthyserver")) -- false + local _, err = checker:get_target_status("127.0.0.1", 2117) + ngx.say(err) -- target not found + } + } +--- request +GET /t +--- response_body +true +false +target not found +--- error_log +checking unhealthy targets: nothing to do +unhealthy HTTP increment (1/3) for 'unhealthyserver(127.0.0.1:2117)' +unhealthy HTTP increment (2/3) for 'unhealthyserver(127.0.0.1:2117)' +unhealthy HTTP increment (3/3) for 'unhealthyserver(127.0.0.1:2117)' +event: target status 'unhealthyserver(127.0.0.1:2117)' from 'true' to 'false' + + + +=== TEST 4: get_target_status() reports proper status for same target with and without hostname +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2118, "127.0.0.1", true) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true + checker:report_http_status("127.0.0.1", 2118, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true + checker:report_http_status("127.0.0.1", 2119, "127.0.0.1", 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- false + } + } +--- request +GET /t +--- response_body +true +true +true +true +false +true +false +true +false +false +false +false +--- error_log +unhealthy HTTP increment (1/1) for '(127.0.0.1:2118)' +event: target status '(127.0.0.1:2118)' from 'true' to 'false' +unhealthy HTTP increment (1/1) for '127.0.0.1(127.0.0.1:2119)' +event: target status '127.0.0.1(127.0.0.1:2119)' from 'true' to 'false' diff --git a/t/with_resty-events/16-set_all_target_statuses_for_hostname.t b/t/with_resty-events/16-set_all_target_statuses_for_hostname.t new file mode 100644 index 00000000..1385e611 --- /dev/null +++ b/t/with_resty-events/16-set_all_target_statuses_for_hostname.t @@ -0,0 +1,233 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * blocks() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: set_all_target_statuses_for_hostname() updates statuses +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:set_all_target_statuses_for_hostname("rush", 2112, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + checker:set_all_target_statuses_for_hostname("rush", 2112, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + } + } +--- request +GET /t +--- response_body +true +true +false +false +true +true + + +=== TEST 2: set_all_target_statuses_for_hostname() restores node after passive check disables it +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + checker:set_all_target_statuses_for_hostname("rush", 2112, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + } + } +--- request +GET /t +--- response_body +true +true +false +true +true + + +=== TEST 3: set_all_target_statuses_for_hostname() resets failure counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + checker:set_all_target_statuses_for_hostname("rush", 2112, true) + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + } + } +--- request +GET /t +--- response_body +true +true +true +true +false +true + + +=== TEST 4: set_target_status() resets the success counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + checker:set_all_target_statuses_for_hostname("rush", 2112, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + checker:report_http_status("127.0.0.1", 2112, "rush", 200) + checker:set_all_target_statuses_for_hostname("rush", 2112, false) + checker:report_http_status("127.0.0.1", 2112, "rush", 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + checker:report_http_status("127.0.0.1", 2112, "rush", 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + } + } +--- request +GET /t +--- response_body +false +false +false +false +true +false diff --git a/t/with_resty-events/17-mtls.t b/t/with_resty-events/17-mtls.t new file mode 100644 index 00000000..c0d0afc3 --- /dev/null +++ b/t/with_resty-events/17-mtls.t @@ -0,0 +1,145 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 4; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: configure a MTLS probe +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local pl_file = require "pl.file" + local cert = pl_file.read("t/with_resty-events/util/cert.pem", true) + local key = pl_file.read("t/with_resty-events/util/key.pem", true) + + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing_mtls", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + ssl_cert = cert, + ssl_key = key, + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + ngx.say(checker ~= nil) -- true + } + } +--- request +GET /t +--- response_body +true + + +=== TEST 2: configure a MTLS probe with parsed cert/key +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local pl_file = require "pl.file" + local ssl = require "ngx.ssl" + local cert = ssl.parse_pem_cert(pl_file.read("t/with_resty-events/util/cert.pem", true)) + local key = ssl.parse_pem_priv_key(pl_file.read("t/with_resty-events/util/key.pem", true)) + + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing_mtls", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + ssl_cert = cert, + ssl_key = key, + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + ngx.say(checker ~= nil) -- true + } + } +--- request +GET /t +--- response_body +true diff --git a/t/with_resty-events/18-req-headers.t b/t/with_resty-events/18-req-headers.t new file mode 100644 index 00000000..696f5600 --- /dev/null +++ b/t/with_resty-events/18-req-headers.t @@ -0,0 +1,285 @@ +use Test::Nginx::Socket::Lua 'no_plan'; +use Cwd qw(cwd); + +workers(1); + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: headers: {"User-Agent: curl/7.29.0"} +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + req_headers = {"User-Agent: curl/7.29.0"} + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.1 +Connection: close +User-Agent: curl/7.29.0 +Host: 127.0.0.1 + + + +=== TEST 2: headers: {"User-Agent: curl"} +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + req_headers = {"User-Agent: curl"} + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.1 +Connection: close +User-Agent: curl +Host: 127.0.0.1 + + +=== TEST 3: headers: { ["User-Agent"] = "curl" } +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + req_headers = { ["User-Agent"] = "curl" } + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.1 +Connection: close +User-Agent: curl +Host: 127.0.0.1 + + + +=== TEST 4: headers: { ["User-Agent"] = {"curl"} } +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + req_headers = { ["User-Agent"] = {"curl"} } + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.1 +Connection: close +User-Agent: curl +Host: 127.0.0.1 + + + +=== TEST 5: headers: { ["User-Agent"] = {"curl", "nginx"} } +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + req_headers = { ["User-Agent"] = {"curl", "nginx"} } + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.1 +Connection: close +User-Agent: curl +User-Agent: nginx +Host: 127.0.0.1 diff --git a/t/with_resty-events/98-get_target_list.t b/t/with_resty-events/98-get_target_list.t new file mode 100644 index 00000000..b916c519 --- /dev/null +++ b/t/with_resty-events/98-get_target_list.t @@ -0,0 +1,164 @@ +use Test::Nginx::Socket::Lua 'no_plan'; +use Cwd qw(cwd); + +workers(1); + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +no_shuffle(); +run_tests(); + +__DATA__ + + + +=== TEST 1: healthy +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local name = "testing" + local shm_name = "test_shm" + local checker = healthcheck.new({ + name = name, + shm_name = shm_name, + events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 0.1, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + checker:add_target("127.0.0.1", 2116, nil, false) + checker:add_target("127.0.0.2", 2116, nil, false) + ngx.sleep(3) + local nodes = healthcheck.get_target_list(name, shm_name) + assert(#nodes == 2, "invalid number of nodes") + for _, node in ipairs(nodes) do + assert(node.ip == "127.0.0.1" or node.ip == "127.0.0.2", "invalid ip") + assert(node.port == 2116, "invalid port") + assert(node.status == "healthy", "invalid status") + assert(node.counter.success == 1, "invalid success counter") + assert(node.counter.tcp_failure == 0, "invalid tcp failure counter") + assert(node.counter.http_failure == 0, "invalid http failure counter") + assert(node.counter.timeout_failure == 0, "invalid timeout failure counter") + end + } + } +--- request +GET /t +--- timeout: 5 + + + +=== TEST 2: healthcheck - add_target with meta +--- http_config eval +qq{ + $::HttpConfig + + # ignore lua tcp socket read timed out + lua_socket_log_errors off; + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local name = "testing" + local shm_name = "test_shm" + local checker = healthcheck.new({ + name = name, + shm_name = shm_name, + events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 0.1, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + checker:add_target("127.0.0.1", 2116, nil, false, nil, { raw = "host_1" }) + checker:add_target("127.0.0.2", 2116, nil, false, nil, { raw = "host_2" }) + ngx.sleep(2) + local nodes = healthcheck.get_target_list(name, shm_name) + assert(#nodes == 2, "invalid number of nodes") + for _, node in ipairs(nodes) do + assert(node.ip == "127.0.0.1" or node.ip == "127.0.0.2", "invalid ip") + assert(node.port == 2116, "invalid port") + assert(node.status == "healthy", "invalid status") + assert(node.counter.success == 1, "invalid success counter") + assert(node.counter.tcp_failure == 0, "invalid tcp failure counter") + assert(node.counter.http_failure == 0, "invalid http failure counter") + assert(node.counter.timeout_failure == 0, "invalid timeout failure counter") + assert(node.meta.raw == "host_1" or node.meta.raw == "host_2", "invalid node meta") + end + } + } +--- request +GET /t +--- no_error_log +[error] +--- timeout: 5 diff --git a/t/19-status-ver.t b/t/with_resty-events/99-status_ver.t similarity index 67% rename from t/19-status-ver.t rename to t/with_resty-events/99-status_ver.t index d6e9da39..ce3192c3 100644 --- a/t/19-status-ver.t +++ b/t/with_resty-events/99-status_ver.t @@ -1,10 +1,11 @@ use Test::Nginx::Socket::Lua 'no_plan'; use Cwd qw(cwd); -workers(1); +workers(2); master_on(); my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); our $HttpConfig = qq{ lua_package_path "$pwd/lib/?.lua;;"; @@ -12,13 +13,20 @@ our $HttpConfig = qq{ lua_shared_dict my_worker_events 8m; init_worker_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + ngx.timer.at(0, function() local healthcheck = require("resty.healthcheck") local checker = healthcheck.new({ name = "testing", shm_name = "test_shm", + events_module = "resty.events", checks = { active = { healthy = { @@ -30,16 +38,23 @@ our $HttpConfig = qq{ } } }) - ngx.sleep(0) - we.poll() local ok, err = checker:add_target("127.0.0.1", 11111) if not ok then error(err) end - ngx.sleep(0) - we.poll() end) } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } }; run_tests(); @@ -63,3 +78,4 @@ true checking unhealthy targets: nothing to do checking unhealthy targets: #1 from 'true' to 'false', ver: 2 +from 'true' to 'false', ver: 1 diff --git a/t/util/cert.pem b/t/with_resty-events/util/cert.pem similarity index 100% rename from t/util/cert.pem rename to t/with_resty-events/util/cert.pem diff --git a/t/util/key.pem b/t/with_resty-events/util/key.pem similarity index 100% rename from t/util/key.pem rename to t/with_resty-events/util/key.pem diff --git a/t/util/reindex b/t/with_resty-events/util/reindex similarity index 100% rename from t/util/reindex rename to t/with_resty-events/util/reindex diff --git a/t/00-new.t b/t/with_worker-events/00-new.t similarity index 88% rename from t/00-new.t rename to t/with_worker-events/00-new.t index 31cb8a44..d8d11c6b 100644 --- a/t/00-new.t +++ b/t/with_worker-events/00-new.t @@ -119,7 +119,7 @@ GET /t --- error_log Healthchecker started! -=== TEST 6: new() only accepts http(s) or tcp types +=== TEST 6: new() only accepts http or tcp types --- http_config eval: $::HttpConfig --- config location = /t { @@ -130,41 +130,19 @@ Healthchecker started! local ok, err = pcall(healthcheck.new, { name = "testing", shm_name = "test_shm", - checks = { - active = { - type = "http", - }, - } + type = "http", }) ngx.say(ok) local ok, err = pcall(healthcheck.new, { name = "testing", shm_name = "test_shm", - checks = { - active = { - type = "https", - }, - } + type = "tcp", }) ngx.say(ok) local ok, err = pcall(healthcheck.new, { name = "testing", shm_name = "test_shm", - checks = { - active = { - type = "tcp", - }, - } - }) - ngx.say(ok) - local ok, err = pcall(healthcheck.new, { - name = "testing", - shm_name = "test_shm", - checks = { - active = { - type = "get lost", - }, - } + type = "get lost", }) ngx.say(ok) } @@ -174,7 +152,6 @@ GET /t --- response_body true true -true false === TEST 7: new() deals with bad inputs diff --git a/t/01-start-stop.t b/t/with_worker-events/01-start-stop.t similarity index 69% rename from t/01-start-stop.t rename to t/with_worker-events/01-start-stop.t index 88b59de3..6ca00f9d 100644 --- a/t/01-start-stop.t +++ b/t/with_worker-events/01-start-stop.t @@ -43,52 +43,15 @@ __DATA__ ngx.sleep(0.2) -- wait twice the interval local ok, err = checker:start() ngx.say(ok) - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) } } --- request GET /t --- response_body true -2 --- no_error_log [error] -=== TEST 2: start() cannot start a second time using active health checks ---- http_config eval: $::HttpConfig ---- config - location = /t { - content_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - healthy = { - interval = 0.1 - }, - unhealthy = { - interval = 0.1 - } - } - } - }) - local ok, err = checker:start() - ngx.say(err) - } - } ---- request -GET /t ---- response_body -cannot start, timers are still running ---- no_error_log -[error] === TEST 3: start() is a no-op if active intervals are 0 --- http_config eval: $::HttpConfig @@ -118,10 +81,6 @@ cannot start, timers are still running ngx.say(ok) local ok, err = checker:start() ngx.say(ok) - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) } } --- request @@ -130,7 +89,6 @@ GET /t true true true -0 --- no_error_log [error] @@ -158,18 +116,12 @@ true }) local ok, err = checker:stop() ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) } } --- request GET /t --- response_body true -0 --- no_error_log [error] checking @@ -198,17 +150,9 @@ checking }) local ok, err = checker:stop() ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:start() ngx.say(ok) - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) ngx.sleep(0.2) -- wait twice the interval } } @@ -216,8 +160,6 @@ checking GET /t --- response_body true -0 true -2 --- error_log checking diff --git a/t/02-add_target.t b/t/with_worker-events/02-add_target.t similarity index 60% rename from t/02-add_target.t rename to t/with_worker-events/02-add_target.t index 376a0bad..2f8fffd1 100644 --- a/t/02-add_target.t +++ b/t/with_worker-events/02-add_target.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * (blocks() * 4) + 5; +plan tests => repeat_each() * (blocks() * 4) + 3; my $pwd = cwd(); @@ -39,10 +39,10 @@ __DATA__ } } }) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 11111, nil, false) ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(0.5) } } --- request @@ -51,7 +51,6 @@ GET /t true --- error_log checking healthy targets: nothing to do -checking unhealthy targets: nothing to do checking unhealthy targets: #1 --- no_error_log @@ -92,7 +91,7 @@ qq{ } } }) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) ngx.say(ok) ngx.sleep(0.2) -- wait twice the interval @@ -103,7 +102,6 @@ GET /t --- response_body true --- error_log -checking healthy targets: nothing to do checking unhealthy targets: nothing to do checking healthy targets: #1 @@ -148,7 +146,7 @@ qq{ } } }) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) ngx.say(ok) @@ -160,95 +158,8 @@ GET /t --- response_body true --- error_log -checking healthy targets: nothing to do checking unhealthy targets: nothing to do checking healthy targets: #1 --- no_error_log checking unhealthy targets: #1 - - - -=== TEST 4: calling add_target() repeatedly does not exhaust timers ---- http_config eval -qq{ - $::HttpConfig - - server { - listen 2113; - location = /status { - return 200; - } - } - lua_max_pending_timers 100; - - init_worker_by_lua_block { - --error("erreur") - local resty_lock = require ("resty.lock") - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - http_path = "/status", - healthy = { - interval = 0.1, - successes = 1, - }, - unhealthy = { - interval = 0.1, - tcp_failures = 1, - http_failures = 1, - } - } - } - }) - - -- lock the key, so adding targets will fallback on timers - local lock = assert(resty_lock:new(checker.shm_name, { - exptime = 10, -- timeout after which lock is released anyway - timeout = 5, -- max wait time to acquire lock - })) - assert(lock:lock(checker.TARGET_LIST_LOCK)) - - local addr = { - 127, 0, 0, 1 - } - -- add 10000 check, exhausting timers... - for i = 0, 150 do - addr[4] = addr[4] + 1 - if addr[4] > 255 then - addr[4] = 1 - addr[3] = addr[3] + 1 - if addr[3] > 255 then - addr[3] = 1 - addr[2] = addr[2] + 1 - if addr[2] > 255 then - addr[2] = 1 - addr[1] = addr[1] + 1 - end - end - end - local ok, err = assert(checker:add_target(table.concat(addr, "."), 2113, nil, true)) - end - } - -} - ---- config - location = /t { - content_by_lua_block { - ngx.say(true) - ngx.exit(200) - } - } - ---- request -GET /t ---- response_body -true ---- no_error_log -too many pending timers diff --git a/t/03-get_target_status.t b/t/with_worker-events/03-get_target_status.t similarity index 91% rename from t/03-get_target_status.t rename to t/with_worker-events/03-get_target_status.t index 695d2d3b..d7e0b4d5 100644 --- a/t/03-get_target_status.t +++ b/t/with_worker-events/03-get_target_status.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * (blocks() * 4) + 2; +plan tests => repeat_each() * (blocks() * 4); my $pwd = cwd(); @@ -63,17 +63,11 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2115, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true - checker:report_tcp_failure("127.0.0.1", 2115) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- false - checker:report_success("127.0.0.1", 2115) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true } } @@ -83,9 +77,6 @@ GET /t true false true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log checking healthy targets: #1 checking unhealthy targets: #1 diff --git a/t/04-report_success.t b/t/with_worker-events/04-report_success.t similarity index 96% rename from t/04-report_success.t rename to t/with_worker-events/04-report_success.t index 0d71fe10..8fb5fb6e 100644 --- a/t/04-report_success.t +++ b/t/with_worker-events/04-report_success.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 32; +plan tests => repeat_each() * 28; my $pwd = cwd(); @@ -68,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) - we.poll() checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true } @@ -86,8 +84,6 @@ GET /t true true --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' @@ -147,14 +143,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) - we.poll() checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true } @@ -165,8 +159,6 @@ GET /t true true --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' @@ -224,11 +216,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) - we.poll() checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2116, nil, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false } } @@ -290,11 +280,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) - we.poll() checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2118, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, nil)) -- false } } diff --git a/t/05-report_failure.t b/t/with_worker-events/05-report_failure.t similarity index 92% rename from t/05-report_failure.t rename to t/with_worker-events/05-report_failure.t index e3227dda..47f8c7e9 100644 --- a/t/05-report_failure.t +++ b/t/with_worker-events/05-report_failure.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * (blocks() * 11) - 1; +plan tests => repeat_each() * 26; my $pwd = cwd(); @@ -40,9 +40,9 @@ qq{ local checker = healthcheck.new({ name = "testing", shm_name = "test_shm", + type = "http", checks = { active = { - type = "http", http_path = "/status", healthy = { interval = 999, -- we don't want active checks @@ -55,7 +55,6 @@ qq{ } }, passive = { - type = "http", healthy = { successes = 3, }, @@ -69,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false } @@ -87,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy HTTP increment (1/3) for '(127.0.0.1:2117)' unhealthy HTTP increment (2/3) for '(127.0.0.1:2117)' unhealthy HTTP increment (3/3) for '(127.0.0.1:2117)' @@ -120,9 +115,9 @@ qq{ local checker = healthcheck.new({ name = "testing", shm_name = "test_shm", + type = "tcp", checks = { active = { - type = "tcp", http_path = "/status", healthy = { interval = 999, -- we don't want active checks @@ -135,7 +130,6 @@ qq{ } }, passive = { - type = "tcp", healthy = { successes = 3, }, @@ -149,14 +143,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false } @@ -167,8 +159,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' event: target status '(127.0.0.1:2117)' from 'true' to 'false' @@ -198,9 +188,9 @@ qq{ local checker = healthcheck.new({ name = "testing", shm_name = "test_shm", + type = "tcp", checks = { active = { - type = "tcp", http_path = "/status", healthy = { interval = 999, -- we don't want active checks @@ -213,7 +203,6 @@ qq{ } }, passive = { - type = "tcp", healthy = { successes = 3, }, @@ -227,14 +216,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- true } @@ -244,9 +231,6 @@ GET /t --- response_body true true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' diff --git a/t/06-report_http_status.t b/t/with_worker-events/06-report_http_status.t similarity index 95% rename from t/06-report_http_status.t rename to t/with_worker-events/06-report_http_status.t index 235e5c71..0d2c1cdd 100644 --- a/t/06-report_http_status.t +++ b/t/with_worker-events/06-report_http_status.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 53; +plan tests => repeat_each() * 41; my $pwd = cwd(); @@ -68,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false } @@ -86,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy HTTP increment (1/3) for '(127.0.0.1:2119)' unhealthy HTTP increment (2/3) for '(127.0.0.1:2119)' unhealthy HTTP increment (3/3) for '(127.0.0.1:2119)' @@ -148,7 +144,6 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") @@ -157,7 +152,6 @@ qq{ checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- true } @@ -168,8 +162,6 @@ GET /t true true --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do healthy SUCCESS increment (1/4) for '(127.0.0.1:2119)' healthy SUCCESS increment (2/4) for '(127.0.0.1:2119)' healthy SUCCESS increment (3/4) for '(127.0.0.1:2119)' @@ -230,12 +222,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false } } @@ -243,9 +233,6 @@ qq{ GET /t --- response_body false ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log healthy SUCCESS increment event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' @@ -299,12 +286,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false } } @@ -312,9 +297,6 @@ qq{ GET /t --- response_body false ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log healthy SUCCESS increment event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' @@ -368,12 +350,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true } } @@ -381,9 +361,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy HTTP increment event: target status '127.0.0.1 (127.0.0.1:2119)' from 'true' to 'false' @@ -437,12 +414,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- true } } @@ -450,9 +425,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy HTTP increment event: target status '(127.0.0.1:2119)' from 'true' to 'false' @@ -489,7 +461,6 @@ qq{ } }) local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") diff --git a/t/07-report_tcp_failure.t b/t/with_worker-events/07-report_tcp_failure.t similarity index 93% rename from t/07-report_tcp_failure.t rename to t/with_worker-events/07-report_tcp_failure.t index be82e72e..9e4e0e7c 100644 --- a/t/07-report_tcp_failure.t +++ b/t/with_worker-events/07-report_tcp_failure.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 24; +plan tests => repeat_each() * 18; my $pwd = cwd(); @@ -68,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false } @@ -86,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy TCP increment (1/3) for '(127.0.0.1:2120)' unhealthy TCP increment (2/3) for '(127.0.0.1:2120)' unhealthy TCP increment (3/3) for '(127.0.0.1:2120)' @@ -130,7 +126,7 @@ qq{ unhealthy = { interval = 999, -- we don't want active checks tcp_failures = 0, - http_failures = 0, + http_failures = 5, } }, passive = { @@ -146,11 +142,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) - we.poll() checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true } } @@ -158,9 +152,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2120)' from 'true' to 'false' @@ -208,18 +199,16 @@ qq{ }, unhealthy = { tcp_failures = 0, - http_failures = 0, + http_failures = 5, } } } }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) - we.poll() checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true } } @@ -227,9 +216,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2120)' from 'true' to 'false' diff --git a/t/08-report_timeout.t b/t/with_worker-events/08-report_timeout.t similarity index 94% rename from t/08-report_timeout.t rename to t/with_worker-events/08-report_timeout.t index 317e245f..fb61ea18 100644 --- a/t/08-report_timeout.t +++ b/t/with_worker-events/08-report_timeout.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 22; +plan tests => repeat_each() * 16; my $pwd = cwd(); @@ -70,12 +70,10 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2113, nil, "passive") checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false } @@ -86,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy TIMEOUT increment (1/2) for '(127.0.0.1:2122)' unhealthy TIMEOUT increment (2/2) for '(127.0.0.1:2122)' event: target status '(127.0.0.1:2122)' from 'true' to 'false' @@ -146,11 +142,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) - we.poll() checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2122, nil, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true } } @@ -158,9 +152,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2122)' from 'true' to 'false' @@ -217,11 +208,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) - we.poll() checker:report_timeout("127.0.0.1", 2122, nil, "passive") checker:report_timeout("127.0.0.1", 2122, nil, "passive") checker:report_timeout("127.0.0.1", 2122, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true } } @@ -229,9 +218,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2122)' from 'true' to 'false' diff --git a/t/09-active_probes.t b/t/with_worker-events/09-active_probes.t similarity index 73% rename from t/09-active_probes.t rename to t/with_worker-events/09-active_probes.t index 599f65d7..dd68faf4 100644 --- a/t/09-active_probes.t +++ b/t/with_worker-events/09-active_probes.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 56; +plan tests => repeat_each() * 59; my $pwd = cwd(); @@ -55,9 +55,9 @@ qq{ }, } }) + ngx.sleep(2) -- active healthchecks might take some time to start local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false } } @@ -67,10 +67,10 @@ GET /t false --- error_log checking unhealthy targets: nothing to do -unhealthy HTTP increment (1/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' checking healthy targets: nothing to do @@ -112,8 +112,8 @@ qq{ } }) local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true } } @@ -123,10 +123,10 @@ GET /t true --- error_log checking healthy targets: nothing to do -healthy SUCCESS increment (1/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (2/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' checking unhealthy targets: nothing to do === TEST 3: active probes, custom http status (regression test for pre-filled defaults) @@ -167,8 +167,7 @@ qq{ } }) local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true } } @@ -180,10 +179,10 @@ true checking unhealthy targets: nothing to do --- no_error_log checking healthy targets: nothing to do -unhealthy HTTP increment (1/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' === TEST 4: active probes, custom http status, node failing @@ -223,9 +222,9 @@ qq{ }, } }) + ngx.sleep(2) -- active healthchecks might take some time to start local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false } } @@ -235,10 +234,10 @@ GET /t false --- error_log checking unhealthy targets: nothing to do -unhealthy HTTP increment (1/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' checking healthy targets: nothing to do @@ -285,9 +284,9 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false) - we.poll() - ngx.sleep(0.3) -- wait for 3x the check interval + ngx.sleep(0.2) -- wait for 2x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true } } @@ -297,7 +296,7 @@ GET /t true --- error_log event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' -checking unhealthy targets: nothing to do +checking unhealthy targets: #1 === TEST 6: active probes, tcp node failing @@ -328,10 +327,10 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start -- Note: no http server configured, so port 2114 remains unanswered local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false } } @@ -341,10 +340,10 @@ GET /t false --- error_log checking unhealthy targets: nothing to do -unhealthy TCP increment (1/3) for '(127.0.0.1:2114)' -unhealthy TCP increment (2/3) for '(127.0.0.1:2114)' -unhealthy TCP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy TCP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' checking healthy targets: nothing to do @@ -385,9 +384,9 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true } } @@ -397,10 +396,10 @@ GET /t true --- error_log checking healthy targets: nothing to do -healthy SUCCESS increment (1/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (2/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' checking unhealthy targets: nothing to do @@ -447,8 +446,8 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false, "custom-host.test") - we.poll() ngx.sleep(0.3) -- wait for 3x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true } @@ -462,3 +461,61 @@ event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' checking unhealthy targets: nothing to do + +=== TEST 9: active probes, interval is respected +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + access_by_lua_block { + ngx.sleep(0.3) + ngx.exit(200) + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local we = require "resty.worker.events" + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + test = true, + name = "testing", + shm_name = "test_shm", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 1, + successes = 1, + }, + unhealthy = { + interval = 1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(1) -- wait for the check interval + -- checker callback should not be called more than 5 times + if checker.checker_callback_count < 5 then + ngx.say("OK") + else + ngx.say("BAD") + end + } + } +--- request +GET /t +--- response_body +OK +--- no_error_log +[error] diff --git a/t/10-garbagecollect.t b/t/with_worker-events/10-garbagecollect.t similarity index 100% rename from t/10-garbagecollect.t rename to t/with_worker-events/10-garbagecollect.t diff --git a/t/11-clear.t b/t/with_worker-events/11-clear.t similarity index 53% rename from t/11-clear.t rename to t/with_worker-events/11-clear.t index 70f1407f..0ddb02d5 100644 --- a/t/11-clear.t +++ b/t/with_worker-events/11-clear.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 23; +plan tests => repeat_each() * 27; my $pwd = cwd(); @@ -100,7 +100,7 @@ initial target list (11 targets) checker1:add_target("127.0.0.1", 20000 + i, nil, false) end checker2:clear() - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) ngx.say(true) } } @@ -151,7 +151,7 @@ qq{ } local checker1 = healthcheck.new(config) checker1:add_target("127.0.0.1", 21120, nil, true) - ngx.sleep(0.3) -- wait 1.5x the interval + ngx.sleep(0.5) -- wait 2.5x the interval checker1:clear() checker1:add_target("127.0.0.1", 21120, nil, true) ngx.sleep(0.3) -- wait 1.5x the interval @@ -164,7 +164,119 @@ GET /t true --- error_log -unhealthy HTTP increment (1/3) for '(127.0.0.1:21120)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:21120)' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:21120)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:21120)' --- no_error_log unhealthy HTTP increment (3/3) for '(127.0.0.1:21120)' + + +=== TEST 4: delayed_clear() clears the list, after interval new checkers don't see it +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.worker.events" + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 10000 + i, nil, false) + end + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10001)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + ngx.say(checker2:get_target_status("127.0.0.1", 10001)) + ngx.sleep(2.6) -- wait while the targets are cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found + +=== TEST 5: delayed_clear() would clear tgt list, but adding again keeps the previous status +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.worker.events" + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + checker1:add_target("127.0.0.1", 10001, nil, false) + checker1:add_target("127.0.0.1", 10002, nil, false) + checker1:add_target("127.0.0.1", 10003, nil, false) + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10002)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + checker2:add_target("127.0.0.1", 10002, nil, true) + ngx.say(checker2:get_target_status("127.0.0.1", 10002)) + ngx.sleep(2.6) -- wait while the targets would be cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10002) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10003) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found +false +target not found diff --git a/t/12-set_target_status.t b/t/with_worker-events/12-set_target_status.t similarity index 95% rename from t/12-set_target_status.t rename to t/with_worker-events/12-set_target_status.t index 83364fe1..ff69adc0 100644 --- a/t/12-set_target_status.t +++ b/t/with_worker-events/12-set_target_status.t @@ -33,17 +33,11 @@ qq{ shm_name = "test_shm", }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:set_target_status("127.0.0.1", 2112, nil, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:set_target_status("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true } } @@ -77,18 +71,12 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:report_http_status("127.0.0.1", 2112, nil, 500) checker:report_http_status("127.0.0.1", 2112, nil, 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:set_target_status("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true } } @@ -125,19 +113,13 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:report_http_status("127.0.0.1", 2112, nil, 500) checker:set_target_status("127.0.0.1", 2112, nil, true) checker:report_http_status("127.0.0.1", 2112, nil, 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:report_http_status("127.0.0.1", 2112, nil, 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false } } @@ -174,21 +156,14 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() checker:set_target_status("127.0.0.1", 2112, nil, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:report_http_status("127.0.0.1", 2112, nil, 200) checker:set_target_status("127.0.0.1", 2112, nil, false) checker:report_http_status("127.0.0.1", 2112, nil, 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:report_http_status("127.0.0.1", 2112, nil, 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true } } diff --git a/t/13-integration.t b/t/with_worker-events/13-integration.t similarity index 98% rename from t/13-integration.t rename to t/with_worker-events/13-integration.t index d650bea4..bc3549f3 100644 --- a/t/13-integration.t +++ b/t/with_worker-events/13-integration.t @@ -67,8 +67,6 @@ qq{ local ok, err = checker:add_target(host, port, nil, true) - we.poll() - -- S = successes counter -- F = http_failures counter -- T = tcp_failures counter @@ -114,9 +112,6 @@ qq{ -- that implements the specified behavior. local function run_test_case(case) assert(checker:set_target_status(host, port, nil, true)) - - we.poll() - local i = 1 local s, f, t, o = 0, 0, 0, 0 local mode = true @@ -155,8 +150,6 @@ qq{ --ngx.say(case, ": ", c, " ", string.format("%08x", ctr), " ", state) --ngx.log(ngx.DEBUG, case, ": ", c, " ", string.format("%08x", ctr), " ", state) - we.poll() - if checker:get_target_status(host, port, nil) ~= mode then ngx.say("failed: ", case, " step ", i, " expected ", mode) return false diff --git a/t/14-tls_active_probes.t b/t/with_worker-events/14-tls_active_probes.t similarity index 100% rename from t/14-tls_active_probes.t rename to t/with_worker-events/14-tls_active_probes.t diff --git a/t/15-get_virtualhost_target_status.t b/t/with_worker-events/15-get_virtualhost_target_status.t similarity index 97% rename from t/15-get_virtualhost_target_status.t rename to t/with_worker-events/15-get_virtualhost_target_status.t index f3222453..9bfbc92c 100644 --- a/t/15-get_virtualhost_target_status.t +++ b/t/with_worker-events/15-get_virtualhost_target_status.t @@ -58,20 +58,15 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2115, "ahostname", true) local ok, err = checker:add_target("127.0.0.1", 2115, "otherhostname", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false checker:report_success("127.0.0.1", 2115, "otherhostname") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true checker:report_tcp_failure("127.0.0.1", 2115, "otherhostname") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true local _, err = checker:get_target_status("127.0.0.1", 2115) @@ -137,11 +132,9 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, "ahostname", true) local ok, err = checker:add_target("127.0.0.1", 2116, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true checker:report_http_status("127.0.0.1", 2116, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false } @@ -202,10 +195,10 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2117, "healthyserver", true) local ok, err = checker:add_target("127.0.0.1", 2117, "unhealthyserver", true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2117, "healthyserver")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2117, "unhealthyserver")) -- false local _, err = checker:get_target_status("127.0.0.1", 2117) @@ -268,19 +261,16 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2118, "127.0.0.1", true) local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true checker:report_http_status("127.0.0.1", 2118, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true checker:report_http_status("127.0.0.1", 2119, "127.0.0.1", 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false diff --git a/t/16-set_all_target_statuses_for_hostname.t b/t/with_worker-events/16-set_all_target_statuses_for_hostname.t similarity index 96% rename from t/16-set_all_target_statuses_for_hostname.t rename to t/with_worker-events/16-set_all_target_statuses_for_hostname.t index 6600dced..ffa9256f 100644 --- a/t/16-set_all_target_statuses_for_hostname.t +++ b/t/with_worker-events/16-set_all_target_statuses_for_hostname.t @@ -35,15 +35,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:set_all_target_statuses_for_hostname("rush", 2112, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false checker:set_all_target_statuses_for_hostname("rush", 2112, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true } @@ -83,15 +80,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:report_http_status("127.0.0.1", 2112, "rush", 500) checker:report_http_status("127.0.0.1", 2112, "rush", 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false checker:set_all_target_statuses_for_hostname("rush", 2112, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true } @@ -133,17 +127,14 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:report_http_status("127.0.0.1", 2112, "rush", 500) checker:set_all_target_statuses_for_hostname("rush", 2112, true) checker:report_http_status("127.0.0.1", 2112, "rush", 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:report_http_status("127.0.0.1", 2112, "rush", 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true } @@ -186,19 +177,15 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() checker:set_all_target_statuses_for_hostname("rush", 2112, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false checker:report_http_status("127.0.0.1", 2112, "rush", 200) checker:set_all_target_statuses_for_hostname("rush", 2112, false) checker:report_http_status("127.0.0.1", 2112, "rush", 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false checker:report_http_status("127.0.0.1", 2112, "rush", 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false } diff --git a/t/17-mtls.t b/t/with_worker-events/17-mtls.t similarity index 90% rename from t/17-mtls.t rename to t/with_worker-events/17-mtls.t index 21166d64..5883cc9d 100644 --- a/t/17-mtls.t +++ b/t/with_worker-events/17-mtls.t @@ -29,8 +29,8 @@ qq{ assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) local pl_file = require "pl.file" - local cert = pl_file.read("t/util/cert.pem", true) - local key = pl_file.read("t/util/key.pem", true) + local cert = pl_file.read("t/with_worker-events/util/cert.pem", true) + local key = pl_file.read("t/with_worker-events/util/key.pem", true) local healthcheck = require("resty.healthcheck") local checker = healthcheck.new({ @@ -85,8 +85,8 @@ qq{ local pl_file = require "pl.file" local ssl = require "ngx.ssl" - local cert = ssl.parse_pem_cert(pl_file.read("t/util/cert.pem", true)) - local key = ssl.parse_pem_priv_key(pl_file.read("t/util/key.pem", true)) + local cert = ssl.parse_pem_cert(pl_file.read("t/with_worker-events/util/cert.pem", true)) + local key = ssl.parse_pem_priv_key(pl_file.read("t/with_worker-events/util/key.pem", true)) local healthcheck = require("resty.healthcheck") local checker = healthcheck.new({ diff --git a/t/20-req-headers.t b/t/with_worker-events/18-req-headers.t similarity index 93% rename from t/20-req-headers.t rename to t/with_worker-events/18-req-headers.t index 13bee7e4..4ba1b02c 100644 --- a/t/20-req-headers.t +++ b/t/with_worker-events/18-req-headers.t @@ -42,7 +42,7 @@ qq{ healthy = { interval = 0.1 }, - headers = {"User-Agent: curl/7.29.0"} + req_headers = {"User-Agent: curl/7.29.0"} } } }) @@ -60,6 +60,7 @@ true checking healthy targets: nothing to do checking healthy targets: #1 GET /status HTTP/1.1 +Connection: close User-Agent: curl/7.29.0 Host: 127.0.0.1 @@ -92,7 +93,7 @@ qq{ healthy = { interval = 0.1 }, - headers = {"User-Agent: curl"} + req_headers = {"User-Agent: curl"} } } }) @@ -110,6 +111,7 @@ true checking healthy targets: nothing to do checking healthy targets: #1 GET /status HTTP/1.1 +Connection: close User-Agent: curl Host: 127.0.0.1 @@ -141,7 +143,7 @@ qq{ healthy = { interval = 0.1 }, - headers = { ["User-Agent"] = "curl" } + req_headers = { ["User-Agent"] = "curl" } } } }) @@ -159,6 +161,7 @@ true checking healthy targets: nothing to do checking healthy targets: #1 GET /status HTTP/1.1 +Connection: close User-Agent: curl Host: 127.0.0.1 @@ -191,7 +194,7 @@ qq{ healthy = { interval = 0.1 }, - headers = { ["User-Agent"] = {"curl"} } + req_headers = { ["User-Agent"] = {"curl"} } } } }) @@ -209,6 +212,7 @@ true checking healthy targets: nothing to do checking healthy targets: #1 GET /status HTTP/1.1 +Connection: close User-Agent: curl Host: 127.0.0.1 @@ -241,7 +245,7 @@ qq{ healthy = { interval = 0.1 }, - headers = { ["User-Agent"] = {"curl", "nginx"} } + req_headers = { ["User-Agent"] = {"curl", "nginx"} } } } }) @@ -259,6 +263,7 @@ true checking healthy targets: nothing to do checking healthy targets: #1 GET /status HTTP/1.1 +Connection: close User-Agent: curl User-Agent: nginx Host: 127.0.0.1 diff --git a/t/get_target_list.t b/t/with_worker-events/98-get_target_list.t similarity index 50% rename from t/get_target_list.t rename to t/with_worker-events/98-get_target_list.t index 67340476..5a92419e 100644 --- a/t/get_target_list.t +++ b/t/with_worker-events/98-get_target_list.t @@ -76,3 +76,68 @@ qq{ --- request GET /t --- timeout: 5 + + + +=== TEST 2: healthcheck - add_target with meta +--- http_config eval +qq{ + $::HttpConfig + + # ignore lua tcp socket read timed out + lua_socket_log_errors off; + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local we = require "resty.worker.events" + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local healthcheck = require("resty.healthcheck") + local name = "testing" + local shm_name = "test_shm" + local checker = healthcheck.new({ + name = name, + shm_name = shm_name, + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 0.1, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + checker:add_target("127.0.0.1", 2116, nil, false, nil, { raw = "host_1" }) + checker:add_target("127.0.0.2", 2116, nil, false, nil, { raw = "host_2" }) + ngx.sleep(3) + local nodes = healthcheck.get_target_list(name, shm_name) + assert(#nodes == 2, "invalid number of nodes") + for _, node in ipairs(nodes) do + assert(node.ip == "127.0.0.1" or node.ip == "127.0.0.2", "invalid ip") + assert(node.port == 2116, "invalid port") + assert(node.status == "healthy", "invalid status") + assert(node.counter.success == 1, "invalid success counter") + assert(node.counter.tcp_failure == 0, "invalid tcp failure counter") + assert(node.counter.http_failure == 0, "invalid http failure counter") + assert(node.counter.timeout_failure == 0, "invalid timeout failure counter") + assert(node.meta.raw == "host_1" or node.meta.raw == "host_2", "invalid node meta") + end + } + } +--- request +GET /t +--- timeout: 5 diff --git a/t/status-ver.t b/t/with_worker-events/99-status_ver.t similarity index 100% rename from t/status-ver.t rename to t/with_worker-events/99-status_ver.t diff --git a/t/with_worker-events/util/cert.pem b/t/with_worker-events/util/cert.pem new file mode 100644 index 00000000..2df6a75a --- /dev/null +++ b/t/with_worker-events/util/cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIUWWntedJ1yLAJE2baK/Mg06osmGAwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UECgwJS29uZyBJbmMuMB4XDTIwMDQyMzIwMjcwMFoXDTMwMDQy +MTIwMjcwMFowFDESMBAGA1UECgwJS29uZyBJbmMuMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAvVBrEH34MzwKlkBapiNyXr9huSShuojy+7i/01BSFng3 +1TiejXJ3pEjykZqt7ENkZ6+BTYUdb9klK221yXiSyX71x97O0WHHuhH/m4XwGiIH +YPBHdg+ExdMRflXgwtlW3of2hTWxkPkPQDPhoSQVMc5DkU7EOgrTxkv1rUWVAed4 +gSK4IT2AkhKwOSkewZANj2bnK5Evf71ACyJd7IQbJAIYoKBwRJAUXJMA7XAreIB+ +nEr9whNYTklhB4aEa2wtOQuiQubIMJzdOryEX5nufH+tL4p1QKhRPFAqqtJ2Czgw +YZY/v9IrThl19r0nL7FIvxFDNIMeOamJxDLQqsh9NwIDAQABo1MwUTAdBgNVHQ4E +FgQU9t6YAdQ5mOXeqvptN5l3yYZGibEwHwYDVR0jBBgwFoAU9t6YAdQ5mOXeqvpt +N5l3yYZGibEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhi83 +aXsfJGqr9Zb1guWxbI8uKoG6o88ptXjV2c6dJnxXag0A/Rj+bX2bcPkN2kvQksNl +MBUQlniOydZfsBUAoC0V7yyGUv9eO2RIeFnnNpRXNu+n+Kg2bvgvu8BKNNNOASZv ++Vmzvo9lbfhS9MNAxYk9eTiPNUZ3zn2RfFyT6YWWJbRjk//EAlchyud3XGug9/hw +c05dtzWEYT8GdzMd+Y1/2kR5r/CapSj7GEqL5T3+zDIfjbhTokV7WBrw6og2avoZ +vzrF8xWucry5/2mKQbRxMyCtKYUKTcoLzF4HrNQCETm0n9qUODrHER7Wit9fQFZX +1GEA3BkX2tsbIVVaig== +-----END CERTIFICATE----- diff --git a/t/with_worker-events/util/key.pem b/t/with_worker-events/util/key.pem new file mode 100644 index 00000000..ae945f44 --- /dev/null +++ b/t/with_worker-events/util/key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC9UGsQffgzPAqW +QFqmI3Jev2G5JKG6iPL7uL/TUFIWeDfVOJ6NcnekSPKRmq3sQ2Rnr4FNhR1v2SUr +bbXJeJLJfvXH3s7RYce6Ef+bhfAaIgdg8Ed2D4TF0xF+VeDC2Vbeh/aFNbGQ+Q9A +M+GhJBUxzkORTsQ6CtPGS/WtRZUB53iBIrghPYCSErA5KR7BkA2PZucrkS9/vUAL +Il3shBskAhigoHBEkBRckwDtcCt4gH6cSv3CE1hOSWEHhoRrbC05C6JC5sgwnN06 +vIRfme58f60vinVAqFE8UCqq0nYLODBhlj+/0itOGXX2vScvsUi/EUM0gx45qYnE +MtCqyH03AgMBAAECggEAA1hWa/Yt2onnDfyZHXJm5PGwwlq5WNhuorADA7LZoHgD +VIspkgpBvu9jCduX0yLltUdOm5YMjRtjIr9PhP3SaikKIrv3H5AAvXLv90mIko2j +X70fJiDkEbLHDlpqHEdG16vDWVs3hf5AnLvN8tD2ZujkHL8tjHEAiPJyptsh5OSw +XaltCD67U940XXJ89x0zFZ/3RoRk78wX3ELz7/dY0cMnslMavON+LYTq9hQZyVmm +nOhZICWerKjax4t5f9PZ/zM6IhEVrUhw2WrC31tgRo+ITCIA/nkKid8vNhkiLVdw +jTyAYDLgYW7K8/zVrzmV9TOr3CaZHLQxnF/LMpIEAQKBgQDjnA/G4g2mDD7lsqU1 +N3it87v2VBnZPFNW6L17Qig+2BDTXg1kadFBlp8qtEJI+H5axVSmzsrlmATJVhUK +iYOQwiEsQnt4tGmWZI268NAIUtv0TX0i9yscsezmvGABMcyBCF7ZwFhUfhy0pn1t +kzmbYN4AjYdcisCnSusoMD92NwKBgQDU7YVNuieMIZCIuSxG61N1+ZyX3Ul5l6KU +m1xw1PZvugqXnQlOLV/4Iaz86Vvlt2aDqTWO/iv4LU7ixNdhRtxFIU/b2a8DzDOw +ijhzMGRJqJOdi1NfciiIWHyrjRmGbhCgm784vqV7qbQomiIsjgnDvjoZkossZMiJ +63vs7huxAQKBgQDiQjT8w6JFuk6cD+Zi7G2unmfvCtNXO7ys3Fffu3g+YJL5SrmN +ZBN8W7qFvQNXfo48tYTc/Rx8941qh4QLIYAD2rcXRE9xQgbkVbj+aHykiZnVVWJb +69CTidux0vist1BPxH5lf+tOsr7eZdKxpnTRnI2Thx1URSoWI0d4f93WKQKBgBXn +kW0bl3HtCgdmtU1ebCmY0ik1VJezp8AN84aQAgIga3KJbymhtVu7ayZhg1iwc1Vc +FOxu7WsMji75/QY+2e4qrSJ61GxZl3+z2HbRJaAGPZlZeew5vD26jKjBTTztGbzM +CPH3euKr5KLAqH9Y5VxDt4pl7vdULuUxWoBXRnYBAoGAHIFMYiCdXETtrFHKVTzc +vm4P24PnsNHoDTGMXPeRYRKF2+3VEJrwp1Q3fue4Go4zFB8I6nhNVIbh4dIHxFab +hyxZvGWGUgRvTvD4VYn/YHVoSf2/xNZ0r/S2LKomp+jwoWKfukbCoDjAOWvnK5iD +o41Tn0yhzBdnrYguKznGR3g= +-----END PRIVATE KEY----- diff --git a/t/with_worker-events/util/reindex b/t/with_worker-events/util/reindex new file mode 100755 index 00000000..77ae5484 --- /dev/null +++ b/t/with_worker-events/util/reindex @@ -0,0 +1,27 @@ +#!/usr/bin/env lua + +if not arg[1] then + io.stderr:write("Usage: "..arg[0].." t/*.t\n") + os.exit(1) +end + +for _, name in ipairs(arg) do + local i = 1 + local fd = io.open(name, "r") + if fd then + local new = name.."~" + local out = io.open(new, "w") + for line in fd:lines() do + local test, n, desc = line:match("^(===%s*TEST%s*)(%d+)(.*)$") + if test then + out:write(test .. tostring(i) .. desc .. "\n") + i = i + 1 + else + out:write(line .. "\n") + end + end + out:close() + fd:close() + os.execute("mv " .. new .. " " .. name) + end +end

    Clear all healthcheck data.
    checker:delayed_clear (delay)Clear all healthcheck data after a period of time.
    checker:get_target_status (ip, port, hostname) Get the current status of the target.