diff --git a/.asf.yaml b/.asf.yaml index 1ec6a2f34d40..9038b5c11c43 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -17,16 +17,16 @@ github: description: The Cloud-Native API Gateway - homepage: https://apisix.apache.org/ + homepage: https://apisix.apache.org/blog/ labels: - api-gateway - cloud-native - nginx - - lua - luajit - apigateway - microservices - api + - apis - loadbalancing - reverse-proxy - api-management @@ -36,6 +36,9 @@ github: - devops - kubernetes - docker + - kubernetes-ingress + - kubernetes-ingress-controller + - service-mesh enabled_merge_buttons: squash: true @@ -50,6 +53,14 @@ github: dismiss_stale_reviews: true require_code_owner_reviews: true required_approving_review_count: 2 + release/2.99: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 + release/2.15: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 release/2.14: required_pull_request_reviews: require_code_owner_reviews: true diff --git a/.github/actions/action-semantic-pull-request b/.github/actions/action-semantic-pull-request new file mode 160000 index 000000000000..348e2e692213 --- /dev/null +++ b/.github/actions/action-semantic-pull-request @@ -0,0 +1 @@ +Subproject commit 348e2e6922130ee27d6d6a0a3b284890776d1f80 diff --git a/.github/semantic.yml b/.github/semantic.yml deleted file mode 100644 index 5fe591ed88af..000000000000 --- a/.github/semantic.yml +++ /dev/null @@ -1,15 +0,0 @@ -titleOnly: true -allowRevertCommits: true -types: - - feat - - fix - - docs - - style - - refactor - - perf - - test - - build - - ci - - chore - - revert - - change diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 21d185cbe455..9c251732868f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,10 +25,10 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_17 + - linux_openresty_1_19 test_dir: - t/plugin - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc @@ -106,6 +106,10 @@ jobs: - name: Start CI env (PLUGIN_TEST) if: steps.test_env.outputs.type == 'plugin' run: | + # download keycloak cas provider + sudo wget https://github.com/jacekkow/keycloak-protocol-cas/releases/download/18.0.2/keycloak-protocol-cas-18.0.2.jar -O /opt/keycloak-protocol-cas-18.0.2.jar + + sh ci/pod/openfunction/build-function-image.sh make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml index b308c79fb95b..2da72dc6b190 100644 --- a/.github/workflows/centos7-ci.yml +++ b/.github/workflows/centos7-ci.yml @@ -101,6 +101,10 @@ jobs: - name: Start CI env (PLUGIN_TEST) if: steps.test_env.outputs.type == 'plugin' run: | + # download keycloak cas provider + sudo wget https://github.com/jacekkow/keycloak-protocol-cas/releases/download/18.0.2/keycloak-protocol-cas-18.0.2.jar -O /opt/keycloak-protocol-cas-18.0.2.jar + + sh ci/pod/openfunction/build-function-image.sh make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh diff --git a/.github/workflows/chaos.yml b/.github/workflows/chaos.yml index 20b45f602c90..95f48132cb54 100644 --- a/.github/workflows/chaos.yml +++ b/.github/workflows/chaos.yml @@ -26,7 +26,7 @@ jobs: - name: Setup go uses: actions/setup-go@v3 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/cache@v3 with: diff --git a/.github/workflows/cli-master.yml b/.github/workflows/cli-master.yml index 58a34d8b187a..fc6b76c26c4b 100644 --- a/.github/workflows/cli-master.yml +++ b/.github/workflows/cli-master.yml @@ -26,7 +26,7 @@ jobs: matrix: job_name: - linux_apisix_master_luarocks - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 15 env: OPENRESTY_VERSION: default diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 342da39b2df8..3582d76183c4 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -25,7 +25,7 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 job_name: - linux_apisix_current_luarocks - linux_apisix_current_luarocks_in_customed_nginx diff --git a/.github/workflows/doc-lint.yml b/.github/workflows/doc-lint.yml index d6b64921b0da..e2a2c36204a7 100644 --- a/.github/workflows/doc-lint.yml +++ b/.github/workflows/doc-lint.yml @@ -1,6 +1,10 @@ name: Doc Lint on: + push: + paths: + - 'docs/**' + - '**/*.md' pull_request: branches: [master, 'release/**'] paths: @@ -18,7 +22,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: 🚀 Use Node.js - uses: actions/setup-node@v3.3.0 + uses: actions/setup-node@v3.4.1 with: node-version: '12.x' - run: npm install -g markdownlint-cli@0.25.0 diff --git a/.github/workflows/fuzzing-ci.yaml b/.github/workflows/fuzzing-ci.yaml index 426ebcc3768c..60dc602c561a 100644 --- a/.github/workflows/fuzzing-ci.yaml +++ b/.github/workflows/fuzzing-ci.yaml @@ -63,21 +63,15 @@ jobs: - name: install boofuzz run: | + # Avoid "ERROR: flask has requirement click>=8.0, but you'll have click 7.0 which is incompatible" + sudo apt remove python3-click pip install -r $PWD/t/fuzzing/requirements.txt - - name: run simpleroute test + - name: run tests run: | python $PWD/t/fuzzing/simpleroute_test.py - - - name: run serverless route test - run: | python $PWD/t/fuzzing/serverless_route_test.py - - - name: run vars route test - run: | python $PWD/t/fuzzing/vars_route_test.py - - - name: run check leak test - run: | python $PWD/t/fuzzing/client_abort.py python $PWD/t/fuzzing/simple_http.py + python $PWD/t/fuzzing/http_upstream.py diff --git a/.github/workflows/kubernetes-ci.yml b/.github/workflows/kubernetes-ci.yml index 66615cf80cc1..b6495d765377 100644 --- a/.github/workflows/kubernetes-ci.yml +++ b/.github/workflows/kubernetes-ci.yml @@ -25,10 +25,10 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_17 + - linux_openresty_1_19 runs-on: ${{ matrix.platform }} timeout-minutes: 15 @@ -44,22 +44,17 @@ jobs: - name: Setup kubernetes cluster run: | - KIND_VERSION="v0.11.1" - KUBECTL_VERSION="v1.22.0" - curl -Lo ./kind "https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-$(uname)-amd64" - curl -Lo ./kubectl "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl" - chmod +x ./kind - chmod +x ./kubectl + KUBERNETES_VERSION="v1.22.7" - ./kind create cluster --name apisix-test --config ./t/kubernetes/configs/kind.yaml + kind create cluster --name apisix-test --config ./t/kubernetes/configs/kind.yaml --image kindest/node:${KUBERNETES_VERSION} - ./kubectl wait --for=condition=Ready nodes --all --timeout=180s + kubectl wait --for=condition=Ready nodes --all --timeout=180s - ./kubectl apply -f ./t/kubernetes/configs/account.yaml + kubectl apply -f ./t/kubernetes/configs/account.yaml - ./kubectl apply -f ./t/kubernetes/configs/endpoint.yaml + kubectl apply -f ./t/kubernetes/configs/endpoint.yaml - KUBERNETES_CLIENT_TOKEN_CONTENT=$(./kubectl get secrets | grep apisix-test | awk '{system("./kubectl get secret -o jsonpath={.data.token} "$1" | base64 --decode")}') + KUBERNETES_CLIENT_TOKEN_CONTENT=$(kubectl get secrets | grep apisix-test | awk '{system("kubectl get secret -o jsonpath={.data.token} "$1" | base64 --decode")}') KUBERNETES_CLIENT_TOKEN_DIR="/tmp/var/run/secrets/kubernetes.io/serviceaccount" @@ -73,7 +68,7 @@ jobs: echo 'KUBERNETES_CLIENT_TOKEN='"${KUBERNETES_CLIENT_TOKEN_CONTENT}" echo 'KUBERNETES_CLIENT_TOKEN_FILE='${KUBERNETES_CLIENT_TOKEN_FILE} - ./kubectl proxy -p 6445 & + kubectl proxy -p 6445 & - name: Linux Install run: | diff --git a/.github/workflows/license-checker.yml b/.github/workflows/license-checker.yml index 697a956512cb..55abed61cbc5 100644 --- a/.github/workflows/license-checker.yml +++ b/.github/workflows/license-checker.yml @@ -32,6 +32,6 @@ jobs: steps: - uses: actions/checkout@v3 - name: Check License Header - uses: apache/skywalking-eyes@v0.3.0 + uses: apache/skywalking-eyes@v0.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2338100168a7..61e279a86d31 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,6 +1,6 @@ name: ❄️ Lint -on: [pull_request] +on: [push, pull_request] permissions: contents: read @@ -32,7 +32,7 @@ jobs: uses: actions/checkout@v3 - name: Setup Nodejs env - uses: actions/setup-node@v3.3.0 + uses: actions/setup-node@v3.4.1 with: node-version: '12' diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index 0f022df602b6..786c43362898 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -13,7 +13,7 @@ permissions: jobs: performance: if: github.event_name == 'pull_request' && github.event.label.name == 'performance' - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 45 steps: diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml new file mode 100644 index 000000000000..dc1a79010b5c --- /dev/null +++ b/.github/workflows/semantic.yml @@ -0,0 +1,35 @@ +name: "PR Lint" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + main: + name: Validate PR title + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v3 + with: + submodules: recursive + - uses: ./.github/actions/action-semantic-pull-request + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + types: | + feat + fix + docs + style + refactor + perf + test + build + ci + chore + revert + change diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 7a41b11c1753..0f54f6ec0b68 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Prune Stale - uses: actions/stale@v5 + uses: actions/stale@v6 with: days-before-issue-stale: 350 days-before-issue-close: 14 diff --git a/.github/workflows/tars-ci.yml b/.github/workflows/tars-ci.yml index e85044671b89..79b69586255d 100644 --- a/.github/workflows/tars-ci.yml +++ b/.github/workflows/tars-ci.yml @@ -25,10 +25,10 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_17 + - linux_openresty_1_19 runs-on: ${{ matrix.platform }} timeout-minutes: 15 diff --git a/.gitignore b/.gitignore index 33afe64aa1a2..25bc8265ab94 100644 --- a/.gitignore +++ b/.gitignore @@ -77,6 +77,10 @@ t/fuzzing/__pycache__/ boofuzz-results/ *.pyc *.wasm +t/grpc_server_example/grpc_server_example +t/plugin/grpc-web/grpc-web-server +t/plugin/grpc-web/node_modules/ + # release tar package *.tgz release/* diff --git a/.gitmodules b/.gitmodules index beb354b89aa3..3c8ed44e4c56 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "t/toolkit"] path = t/toolkit url = https://github.com/api7/test-toolkit.git +[submodule ".github/actions/action-semantic-pull-request"] + path = .github/actions/action-semantic-pull-request + url = https://github.com/amannn/action-semantic-pull-request.git diff --git a/.licenserc.yaml b/.licenserc.yaml index 85f1c69e4722..ea5863015302 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -19,7 +19,7 @@ header: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation - license-location-threshold: 350 + license-location-threshold: 360 paths-ignore: - '.gitignore' diff --git a/CHANGELOG.md b/CHANGELOG.md index 63e5737651b6..8da19cb395d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,8 +23,11 @@ title: Changelog ## Table of Contents +- [3.0.0-beta](#300-beta) +- [2.15.0](#2150) - [2.14.1](#2141) - [2.14.0](#2140) +- [2.13.3](#2133) - [2.13.2](#2132) - [2.13.1](#2131) - [2.13.0](#2130) @@ -59,6 +62,206 @@ title: Changelog - [0.7.0](#070) - [0.6.0](#060) +## 3.0.0-beta + +Here we use 2.99.0 as the version number in the source code instead of the code name +`3.0.0-beta` for two reasons: + +1. avoid unexpected errors when some programs try to compare the +version, as `3.0.0-beta` contains `3.0.0` and is longer than it. +2. some package system might not allow package which has a suffix +after the version number. + +### Change + +#### Moves the config_center, etcd and Admin API configuration to the deployment + +We've adjusted the configuration in the static configuration file, so you need to update the configuration in `config.yaml` as well: + +- The `config_center` function is now implemented by `config_provider` under `deployment`: [#7901](https://github.com/apache/apisix/pull/7901) +- The `etcd` field is moved to `deployment`: [#7860](https://github.com/apache/apisix/pull/7860) +- The following Admin API configuration is moved to the `admin` field under `deployment`: [#7823](https://github.com/apache/apisix/pull/7823) + - admin_key + - enable_admin_cors + - allow_admin + - admin_listen + - https_admin + - admin_api_mtls + - admin_api_version + +You can refer to the latest `config-default.yaml` for details. + +#### Removing multiple deprecated configurations + +With the new 3.0 release, we took the opportunity to clean out many configurations that were previously marked as deprecated. + +In the static configuration, we removed several fields as follows: + +- Removed `enable_http2` and `listen_port` from `apisix.ssl`: [#7717](https://github.com/apache/apisix/pull/7717) +- Removed `apisix.port_admin`: [#7716](https://github.com/apache/apisix/pull/7716) +- Removed `etcd.health_check_retry`: [#7676](https://github.com/apache/apisix/pull/7676) +- Removed `nginx_config.http.lua_shared_dicts`: [#7677](https://github.com/apache/apisix/pull/7677) +- Removed `nginx_config.http.real_ip_header`: [#7696](https://github.com/apache/apisix/pull/7696) + +In the dynamic configuration, we made the following adjustments: + +- Moved `disable` of the plugin configuration under `_meta`: [#7707](https://github.com/apache/apisix/pull/7707) +- Removed `service_protocol` from the Route: [#7701](https://github.com/apache/apisix/pull/7701) + +There are also specific plugin level changes: + +- Removed `audience` field from authz-keycloak: [#7683](https://github.com/apache/apisix/pull/7683) +- Removed `upstream` field from mqtt-proxy: [#7694](https://github.com/apache/apisix/pull/7694) +- tcp-related configuration placed under the `tcp` field in error-log-logger: [#7700](https://github.com/apache/apisix/pull/7700) +- Removed `max_retry_times` and `retry_interval` fields from syslog: [#7699](https://github.com/apache/apisix/pull/7699) +- The `scheme` field has been removed from proxy-rewrite: [#7695](https://github.com/apache/apisix/pull/7695) + +#### New Admin API response format + +We have adjusted the response format of the Admin API in several PRs as follows: + +- [#7630](https://github.com/apache/apisix/pull/7630) +- [#7622](https://github.com/apache/apisix/pull/7622) + +The new response format is shown below: + +Returns a single configuration: + +```json +{ + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 +} +``` + +Returns multiple configurations: + +```json +{ + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 +} +``` + +#### Other + +- Port of Admin API changed to 9180: [#7806](https://github.com/apache/apisix/pull/7806) +- We only support OpenResty 1.19.3.2 and above: [#7625](https://github.com/apache/apisix/pull/7625) +- Adjusted the priority of the Plugin Config object so that the priority of a plugin configuration with the same name changes from Consumer > Plugin Config > Route > Service to Consumer > Route > Plugin Config > Service: [#7614](https://github. com/apache/apisix/pull/7614) + +### Core + +- Integrating grpc-client-nginx-module to APISIX: [#7917](https://github.com/apache/apisix/pull/7917) +- k8s service discovery support for configuring multiple clusters: [#7895](https://github.com/apache/apisix/pull/7895) + +### Plugin + +- Support for injecting header with specified prefix in opentelemetry plugin: [#7822](https://github.com/apache/apisix/pull/7822) +- Added openfunction plugin: [#7634](https://github.com/apache/apisix/pull/7634) +- Added elasticsearch-logger plugin: [#7643](https://github.com/apache/apisix/pull/7643) +- response-rewrite plugin supports adding response bodies: [#7794](https://github.com/apache/apisix/pull/7794) +- log-rorate supports specifying the maximum size to cut logs: [#7749](https://github.com/apache/apisix/pull/7749) +- Added workflow plug-in. + - [#7760](https://github.com/apache/apisix/pull/7760) + - [#7771](https://github.com/apache/apisix/pull/7771) +- Added Tencent Cloud Log Service plugin: [#7593](https://github.com/apache/apisix/pull/7593) +- jwt-auth supports ES256 algorithm: [#7627](https://github.com/apache/apisix/pull/7627) +- ldap-auth internal implementation, switching from lualdap to lua-resty-ldap: [#7590](https://github.com/apache/apisix/pull/7590) +- http request metrics within the prometheus plugin supports setting additional labels via variables: [#7549](https://github.com/apache/apisix/pull/7549) +- The clickhouse-logger plugin supports specifying multiple clickhouse endpoints: [#7517](https://github.com/apache/apisix/pull/7517) + +### Bugfix + +- gRPC proxy sets :authority request header to configured upstream Host: [#7939](https://github.com/apache/apisix/pull/7939) +- response-rewrite writing to an empty body may cause AIPSIX to fail to respond to the request: [#7836](https://github.com/apache/apisix/pull/7836) +- Fix the problem that when using Plugin Config and Consumer at the same time, there is a certain probability that the plugin configuration is not updated: [#7965](https://github.com/apache/apisix/pull/7965) +- Only reopen log files once when log cutting: [#7869](https://github.com/apache/apisix/pull/7869) +- Passive health checks should not be enabled by default: [#7850](https://github.com/apache/apisix/pull/7850) +- The zipkin plugin should pass trace IDs upstream even if it does not sample: [#7833](https://github.com/apache/apisix/pull/7833) +- Correction of opentelemetry span kind to server: [#7830](https://github.com/apache/apisix/pull/7830) +- in limit-count plugin, different routes with the same configuration should not share the same counter: [#7750](https://github.com/apache/apisix/pull/7750) +- Fix occasional exceptions thrown when removing clean_handler: [#7648](https://github.com/apache/apisix/pull/7648) +- Allow direct use of IPv6 literals when configuring upstream nodes: [#7594](https://github.com/apache/apisix/pull/7594) +- The wolf-rbac plugin adjusts the way it responds to errors: + - [#7561](https://github.com/apache/apisix/pull/7561) + - [#7497](https://github.com/apache/apisix/pull/7497) +- the phases after proxy didn't run when 500 error happens before proxy: [#7703](https://github.com/apache/apisix/pull/7703) +- avoid error when multiple plugins associated with consumer and have rewrite phase: [#7531](https://github.com/apache/apisix/pull/7531) +- upgrade lua-resty-etcd to 1.8.3 which fixes various issues: [#7565](https://github.com/apache/apisix/pull/7565) + +## 2.15.0 + +### Change + +- We now map the grpc error code OUT_OF_RANGE to http code 400 in grpc-transcode plugin: [#7419](https://github.com/apache/apisix/pull/7419) +- Rename health_check_retry configuration in etcd section of `config-default.yaml` to startup_retry: [#7304](https://github.com/apache/apisix/pull/7304) +- Remove `upstream.enable_websocket` which is deprecated since 2020: [#7222](https://github.com/apache/apisix/pull/7222) + +### Core + +- Support running plugins conditionally: [#7453](https://github.com/apache/apisix/pull/7453) +- Allow users to specify plugin execution priority: [#7273](https://github.com/apache/apisix/pull/7273) +- Support getting upstream certificate from ssl object: [#7221](https://github.com/apache/apisix/pull/7221) +- Allow customizing error response in the plugin: [#7128](https://github.com/apache/apisix/pull/7128) +- Add metrics to xRPC Redis proxy: [#7183](https://github.com/apache/apisix/pull/7183) +- Introduce deployment role to simplify the deployment of APISIX: + - [#7405](https://github.com/apache/apisix/pull/7405) + - [#7417](https://github.com/apache/apisix/pull/7417) + - [#7392](https://github.com/apache/apisix/pull/7392) + - [#7365](https://github.com/apache/apisix/pull/7365) + - [#7249](https://github.com/apache/apisix/pull/7249) + +### Plugin + +- Add ngx.shared.dict statistic in promethues plugin: [#7412](https://github.com/apache/apisix/pull/7412) +- Allow using unescaped raw URL in proxy-rewrite plugin: [#7401](https://github.com/apache/apisix/pull/7401) +- Add PKCE support to the openid-connect plugin: [#7370](https://github.com/apache/apisix/pull/7370) +- Support custom log format in sls-logger plugin: [#7328](https://github.com/apache/apisix/pull/7328) +- Export some params for kafka-client in kafka-logger plugin: [#7266](https://github.com/apache/apisix/pull/7266) +- Add support for capturing OIDC refresh tokens in openid-connect plugin: [#7220](https://github.com/apache/apisix/pull/7220) +- Add prometheus plugin in stream subsystem: [#7174](https://github.com/apache/apisix/pull/7174) + +### Bugfix + +- clear remain state from the latest try before retrying in Kubernetes discovery: [#7506](https://github.com/apache/apisix/pull/7506) +- the query string was repeated twice when enabling both http_to_https and append_query_string in the redirect plugin: [#7433](https://github.com/apache/apisix/pull/7433) +- don't send empty Authorization header by default in http-logger: [#7444](https://github.com/apache/apisix/pull/7444) +- ensure both `group` and `disable` configurations can be used in limit-count: [#7384](https://github.com/apache/apisix/pull/7384) +- adjust the execution priority of request-id so the tracing plugins can use the request id: [#7281](https://github.com/apache/apisix/pull/7281) +- correct the transcode of repeated Message in grpc-transcode: [#7231](https://github.com/apache/apisix/pull/7231) +- var missing in proxy-cache cache key should be ignored: [#7168](https://github.com/apache/apisix/pull/7168) +- reduce memory usage when abnormal weights are given in chash: [#7103](https://github.com/apache/apisix/pull/7103) +- cache should be bypassed when the method mismatch in proxy-cache: [#7111](https://github.com/apache/apisix/pull/7111) +- Upstream keepalive should consider TLS param: +    - [#7054](https://github.com/apache/apisix/pull/7054) +    - [#7466](https://github.com/apache/apisix/pull/7466) +- The redirect plugin sets a correct port during redirecting HTTP to HTTPS: +    - [#7065](https://github.com/apache/apisix/pull/7065) + ## 2.14.1 ### Bugfix @@ -121,6 +324,12 @@ title: Changelog - [#6686](https://github.com/apache/apisix/pull/6686) - Admin API rejects unknown stream plugin: [#6813](https://github.com/apache/apisix/pull/6813) +## 2.13.3 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** + +[https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2133](https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2133) + ## 2.13.2 **This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** diff --git a/LICENSE b/LICENSE index 0474b542fb65..5cadce448d62 100644 --- a/LICENSE +++ b/LICENSE @@ -216,3 +216,4 @@ The following components are provided under the Apache License. See project link The text of each license is the standard Apache 2.0 license. ewma.lua file from kubernetes/ingress-nginx: https://github.com/kubernetes/ingress-nginx Apache 2.0 + hello.go file from OpenFunction/samples: https://github.com/OpenFunction/samples Apache 2.0 diff --git a/MAINTAIN.md b/MAINTAIN.md index cc91824d0045..795aa8c665ef 100644 --- a/MAINTAIN.md +++ b/MAINTAIN.md @@ -26,8 +26,7 @@ 2. Create a [pull request](https://github.com/apache/apisix/commit/21d7673c6e8ff995677456cdebc8ded5afbb3d0a) (contains the backport commits, and the change in step 1) to minor branch > This should include those PRs that contain the `need backport` tag since the last patch release. Also, the title of these PRs need to be added to the changelog of the minor branch. 3. Merge it into minor branch -4. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created -via `VERSION=x.y.z make release-src` +4. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src` 5. Send the [vote email](https://lists.apache.org/thread/vq4qtwqro5zowpdqhx51oznbjy87w9d0) to dev@apisix.apache.org > After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents` 6. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/k2frnvj4zj9oynsbr7h7nd6n6m3q5p89) to dev@apisix.apache.org @@ -38,15 +37,15 @@ via `VERSION=x.y.z make release-src` 11. Update APISIX rpm package > Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the package to yum repo -12. First, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2` +12. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`. + - If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository. 13. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234) if the version number is the largest 14. Send the [ANNOUNCE email](https://lists.apache.org/thread.html/ree7b06e6eac854fd42ba4f302079661a172f514a92aca2ef2f1aa7bb%40%3Cdev.apisix.apache.org%3E) to dev@apisix.apache.org & announce@apache.org ### Release minor version 1. Create a minor branch, and create [pull request](https://github.com/apache/apisix/commit/bc6ddf51f15e41fffea6c5bd7d01da9838142b66) to master branch from it -2. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created -via `VERSION=x.y.z make release-src` +2. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src` 3. Send the [vote email](https://lists.apache.org/thread/q8zq276o20r5r9qjkg074nfzb77xwry9) to dev@apisix.apache.org > After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents` 4. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/p1m9s116rojlhb91g38cj8646393qkz7) to dev@apisix.apache.org @@ -57,6 +56,7 @@ via `VERSION=x.y.z make release-src` 9. Update [APISIX's website](https://github.com/apache/apisix-website/commit/7bf0ab5a1bbd795e6571c4bb89a6e646115e7ca3) 10. Update APISIX rpm package. > Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the rpm package to yum repo -11. First, Update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2` +11. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`. + - If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository. 12. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234) 13. Send the [ANNOUNCE email](https://lists.apache.org/thread/4s4msqwl1tq13p9dnv3hx7skbgpkozw1) to dev@apisix.apache.org & announce@apache.org diff --git a/Makefile b/Makefile index 6c82a6a94341..49468dc57e41 100644 --- a/Makefile +++ b/Makefile @@ -243,7 +243,7 @@ clean: .PHONY: reload reload: runtime @$(call func_echo_status, "$@ -> [ Start ]") - $(ENV_NGINX) -s reload + $(ENV_APISIX) reload @$(call func_echo_success_status, "$@ -> [ Done ]") @@ -259,19 +259,19 @@ install: runtime $(ENV_INSTALL) conf/debug.yaml /usr/local/apisix/conf/debug.yaml $(ENV_INSTALL) conf/cert/* /usr/local/apisix/conf/cert/ - # Lua directories listed in alphabetical order + # directories listed in alphabetical order $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix $(ENV_INSTALL) apisix/*.lua $(ENV_INST_LUADIR)/apisix/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/include/apisix/model - $(ENV_INSTALL) apisix/include/apisix/model/*.proto $(ENV_INST_LUADIR)/apisix/include/apisix/model/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/admin $(ENV_INSTALL) apisix/admin/*.lua $(ENV_INST_LUADIR)/apisix/admin/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/balancer $(ENV_INSTALL) apisix/balancer/*.lua $(ENV_INST_LUADIR)/apisix/balancer/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/cli + $(ENV_INSTALL) apisix/cli/*.lua $(ENV_INST_LUADIR)/apisix/cli/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/control $(ENV_INSTALL) apisix/control/*.lua $(ENV_INST_LUADIR)/apisix/control/ @@ -281,34 +281,34 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/core/dns $(ENV_INSTALL) apisix/core/dns/*.lua $(ENV_INST_LUADIR)/apisix/core/dns - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/cli - $(ENV_INSTALL) apisix/cli/*.lua $(ENV_INST_LUADIR)/apisix/cli/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery $(ENV_INSTALL) apisix/discovery/*.lua $(ENV_INST_LUADIR)/apisix/discovery/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery/{consul_kv,dns,eureka,nacos,kubernetes,tars} $(ENV_INSTALL) apisix/discovery/consul_kv/*.lua $(ENV_INST_LUADIR)/apisix/discovery/consul_kv $(ENV_INSTALL) apisix/discovery/dns/*.lua $(ENV_INST_LUADIR)/apisix/discovery/dns $(ENV_INSTALL) apisix/discovery/eureka/*.lua $(ENV_INST_LUADIR)/apisix/discovery/eureka - $(ENV_INSTALL) apisix/discovery/nacos/*.lua $(ENV_INST_LUADIR)/apisix/discovery/nacos $(ENV_INSTALL) apisix/discovery/kubernetes/*.lua $(ENV_INST_LUADIR)/apisix/discovery/kubernetes + $(ENV_INSTALL) apisix/discovery/nacos/*.lua $(ENV_INST_LUADIR)/apisix/discovery/nacos $(ENV_INSTALL) apisix/discovery/tars/*.lua $(ENV_INST_LUADIR)/apisix/discovery/tars - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/pubsub - $(ENV_INSTALL) apisix/pubsub/*.lua $(ENV_INST_LUADIR)/apisix/pubsub/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http $(ENV_INSTALL) apisix/http/*.lua $(ENV_INST_LUADIR)/apisix/http/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http/router $(ENV_INSTALL) apisix/http/router/*.lua $(ENV_INST_LUADIR)/apisix/http/router/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/include/apisix/model + $(ENV_INSTALL) apisix/include/apisix/model/*.proto $(ENV_INST_LUADIR)/apisix/include/apisix/model/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins $(ENV_INSTALL) apisix/plugins/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin $(ENV_INSTALL) apisix/plugins/ext-plugin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging + $(ENV_INSTALL) apisix/plugins/google-cloud-logging/*.lua $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode $(ENV_INSTALL) apisix/plugins/grpc-transcode/*.lua $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode/ @@ -321,9 +321,6 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-count $(ENV_INSTALL) apisix/plugins/limit-count/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-count/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging - $(ENV_INSTALL) apisix/plugins/google-cloud-logging/*.lua $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/opa $(ENV_INSTALL) apisix/plugins/opa/*.lua $(ENV_INST_LUADIR)/apisix/plugins/opa/ @@ -342,6 +339,12 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/syslog $(ENV_INSTALL) apisix/plugins/syslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/syslog/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls + $(ENV_INSTALL) apisix/plugins/tencent-cloud-cls/*.lua $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/pubsub + $(ENV_INSTALL) apisix/pubsub/*.lua $(ENV_INST_LUADIR)/apisix/pubsub/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/zipkin $(ENV_INSTALL) apisix/plugins/zipkin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/zipkin/ diff --git a/README.md b/README.md index e28bef917c13..ee401d93b7c3 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ [![Commit activity](https://img.shields.io/github/commit-activity/m/apache/apisix)](https://github.com/apache/apisix/graphs/commit-activity) [![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Average time to resolve an issue") [![Percentage of issues still open](http://isitmaintained.com/badge/open/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Percentage of issues still open") +[![Slack](https://badgen.net/badge/Slack/Join%20Apache%20APISIX?icon=slack)](https://apisix.apache.org/slack) **Apache APISIX** is a dynamic, real-time, high-performance API Gateway. @@ -43,7 +44,7 @@ The technical architecture of Apache APISIX: - Mailing List: Mail to dev-subscribe@apisix.apache.org, follow the reply to subscribe to the mailing list. - QQ Group - 552030619, 781365357 -- Slack Workspace - [invitation link](https://join.slack.com/t/the-asf/shared_invite/zt-vlfbf7ch-HkbNHiU_uDlcH_RvaHv9gQ) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix"). +- Slack Workspace - [invitation link](https://apisix.apache.org/slack) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix"). - ![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAPISIX?style=social) - follow and interact with us using hashtag `#ApacheAPISIX` - [Documentation](https://apisix.apache.org/docs/) - [Discussions](https://github.com/apache/apisix/discussions) @@ -75,7 +76,7 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - **Full Dynamic** - [Hot Updates And Hot Plugins](docs/en/latest/terminology/plugin.md): Continuously updates its configurations and plugins without restarts! - - [Proxy Rewrite](docs/en/latest/plugins/proxy-rewrite.md): Support rewrite the `host`, `uri`, `schema`, `enable_websocket`, `headers` of the request before send to upstream. + - [Proxy Rewrite](docs/en/latest/plugins/proxy-rewrite.md): Support rewrite the `host`, `uri`, `schema`, `method`, `headers` of the request before send to upstream. - [Response Rewrite](docs/en/latest/plugins/response-rewrite.md): Set customized response status code, body and header to the client. - Dynamic Load Balancing: Round-robin load balancing with weight. - Hash-based Load Balancing: Load balance with consistent hashing sessions. @@ -134,7 +135,9 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - High performance: The single-core QPS reaches 18k with an average delay of fewer than 0.2 milliseconds. - [Fault Injection](docs/en/latest/plugins/fault-injection.md) - [REST Admin API](docs/en/latest/admin-api.md): Using the REST Admin API to control Apache APISIX, which only allows 127.0.0.1 access by default, you can modify the `allow_admin` field in `conf/config.yaml` to specify a list of IPs that are allowed to call the Admin API. Also, note that the Admin API uses key auth to verify the identity of the caller. **The `admin_key` field in `conf/config.yaml` needs to be modified before deployment to ensure security**. - - External Loggers: Export access logs to external log management tools. ([HTTP Logger](docs/en/latest/plugins/http-logger.md), [TCP Logger](docs/en/latest/plugins/tcp-logger.md), [Kafka Logger](docs/en/latest/plugins/kafka-logger.md), [UDP Logger](docs/en/latest/plugins/udp-logger.md), [RocketMQ Logger](docs/en/latest/plugins/rocketmq-logger.md), [SkyWalking Logger](docs/en/latest/plugins/skywalking-logger.md), [Alibaba Cloud Logging(SLS)](docs/en/latest/plugins/sls-logger.md), [Google Cloud Logging](docs/en/latest/plugins/google-cloud-logging.md), [Splunk HEC Logging](docs/en/latest/plugins/splunk-hec-logging.md), [File Logger](docs/en/latest/plugins/file-logger.md), [SolarWinds Loggly Logging](docs/en/latest/plugins/loggly.md)) + - External Loggers: Export access logs to external log management tools. ([HTTP Logger](docs/en/latest/plugins/http-logger.md), [TCP Logger](docs/en/latest/plugins/tcp-logger.md), [Kafka Logger](docs/en/latest/plugins/kafka-logger.md), [UDP Logger](docs/en/latest/plugins/udp-logger.md), [RocketMQ Logger](docs/en/latest/plugins/rocketmq-logger.md), [SkyWalking Logger](docs/en/latest/plugins/skywalking-logger.md), [Alibaba Cloud Logging(SLS)](docs/en/latest/plugins/sls-logger.md), [Google Cloud Logging](docs/en/latest/plugins/google-cloud-logging.md), [Splunk HEC Logging](docs/en/latest/plugins/splunk-hec-logging.md), [File Logger](docs/en/latest/plugins/file-logger.md), [SolarWinds Loggly Logging](docs/en/latest/plugins/loggly.md), [TencentCloud CLS](docs/en/latest/plugins/tencent-cloud-cls.md)). + - [ClickHouse](docs/en/latest/plugins/clickhouse-logger.md): push logs to ClickHouse. + - [Elasticsearch](docs/en/latest/plugins/elasticsearch-logger.md): push logs to Elasticsearch. - [Datadog](docs/en/latest/plugins/datadog.md): push custom metrics to the DogStatsD server, comes bundled with [Datadog agent](https://docs.datadoghq.com/agent/), over the UDP protocol. DogStatsD basically is an implementation of StatsD protocol which collects the custom metrics for Apache APISIX agent, aggregates it into a single data point and sends it to the configured Datadog server. - [Helm charts](https://github.com/apache/apisix-helm-chart) - [HashiCorp Vault](https://www.vaultproject.io/): Support secret management solution for accessing secrets from Vault secure storage backed in a low trust environment. Currently, RS256 keys (public-private key pairs) or secret keys can be linked from vault in [jwt-auth](docs/en/latest/plugins/jwt-auth.md#enable-jwt-auth-with-vault-compatibility) authentication plugin. @@ -162,11 +165,11 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against 1. Installation - Please refer to [install documentation](docs/en/latest/installation-guide.md). + Please refer to [install documentation](https://apisix.apache.org/docs/apisix/installation-guide/). 2. Getting started - The getting started guide is a great way to learn the basics of APISIX. Just follow the steps in [Getting Started](docs/en/latest/getting-started.md). + The getting started guide is a great way to learn the basics of APISIX. Just follow the steps in [Getting Started](https://apisix.apache.org/docs/apisix/getting-started/). Further, you can follow the documentation to try more [plugins](docs/en/latest/plugins). @@ -187,7 +190,7 @@ Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of on [Benchmark script](benchmark/run.sh) has been open sourced, welcome to try and contribute. -[The APISIX APISIX Gateway also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3) +[APISIX also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3) ## Contributor Over Time @@ -199,7 +202,7 @@ Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of on - [European eFactory Platform: API Security Gateway – Using APISIX in the eFactory Platform](https://www.efactory-project.eu/post/api-security-gateway-using-apisix-in-the-efactory-platform) - [Copernicus Reference System Software](https://github.com/COPRS/infrastructure/wiki/Networking-trade-off) -- [More Stories](https://apisix.apache.org/blog/tags/user-case) +- [More Stories](https://apisix.apache.org/blog/tags/case-studies/) ## Who Uses APISIX API Gateway? diff --git a/Vision-and-Milestones.md b/Vision-and-Milestones.md new file mode 100644 index 000000000000..333d991f9399 --- /dev/null +++ b/Vision-and-Milestones.md @@ -0,0 +1,40 @@ + + +### Vision + +Apache APISIX is an open source API gateway designed to help developers connect any APIs securely and efficiently in any environment. + +Managing thousands or tens of thousands of APIs and microservices in a multi-cloud and hybrid cloud environment is not an easy task. +There will be many challenges as authentication, observability, security, etc. + +Apache APISIX, a community-driven project, hopes to help everyone better manage and use APIs through the power of developers. +Every developer's contribution will used by thousands of companies and served by billions of users. + +### Milestones + +Apache APISIX has relatively complete features for north-south traffic, +and will be iterated around the following directions in the next 6 months (if you have any ideas, feel free to create issue to discuss): + +- More complete support for Gateway API on APISIX ingress controller +- Add support for service mesh +- User-friendly documentation +- More plugins for public cloud and SaaS services +- Java/Go plugins and Wasm production-ready +- Add dynamic debugging tools for Apache APISIX diff --git a/apisix/admin/consumers.lua b/apisix/admin/consumers.lua index 46b23de09bdb..77416dbb07df 100644 --- a/apisix/admin/consumers.lua +++ b/apisix/admin/consumers.lua @@ -22,6 +22,7 @@ local pairs = pairs local _M = { version = 0.1, + need_v3_filter = true, } diff --git a/apisix/admin/global_rules.lua b/apisix/admin/global_rules.lua index c4dd4ca93380..88d3d2af6009 100644 --- a/apisix/admin/global_rules.lua +++ b/apisix/admin/global_rules.lua @@ -23,6 +23,7 @@ local tostring = tostring local _M = { version = 0.1, + need_v3_filter = true, } diff --git a/apisix/admin/init.lua b/apisix/admin/init.lua index 318348ecd4ab..bdf19da38bee 100644 --- a/apisix/admin/init.lua +++ b/apisix/admin/init.lua @@ -18,6 +18,7 @@ local require = require local core = require("apisix.core") local route = require("apisix.utils.router") local plugin = require("apisix.plugin") +local v3_adapter = require("apisix.admin.v3_adapter") local ngx = ngx local get_method = ngx.req.get_method local ngx_time = ngx.time @@ -46,9 +47,9 @@ local resources = { upstreams = require("apisix.admin.upstreams"), consumers = require("apisix.admin.consumers"), schema = require("apisix.admin.schema"), - ssl = require("apisix.admin.ssl"), + ssls = require("apisix.admin.ssl"), plugins = require("apisix.admin.plugins"), - proto = require("apisix.admin.proto"), + protos = require("apisix.admin.proto"), global_rules = require("apisix.admin.global_rules"), stream_routes = require("apisix.admin.stream_routes"), plugin_metadata = require("apisix.admin.plugin_metadata"), @@ -62,8 +63,8 @@ local router local function check_token(ctx) local local_conf = core.config.local_conf() - if not local_conf or not local_conf.apisix - or not local_conf.apisix.admin_key then + local admin_key = core.table.try_read_attr(local_conf, "deployment", "admin", "admin_key") + if not admin_key then return true end @@ -74,7 +75,7 @@ local function check_token(ctx) end local admin - for i, row in ipairs(local_conf.apisix.admin_key) do + for i, row in ipairs(admin_key) do if req_token == row.key then admin = row break @@ -186,7 +187,17 @@ local function run() local code, data = resource[method](seg_id, req_body, seg_sub_path, uri_args) if code then + if v3_adapter.enable_v3() then + core.response.set_header("X-API-VERSION", "v3") + else + core.response.set_header("X-API-VERSION", "v2") + end + if resource.need_v3_filter then + data = v3_adapter.filter(data) + end + data = strip_etcd_resp(data) + core.response.exit(code, data) end end diff --git a/apisix/admin/plugin_config.lua b/apisix/admin/plugin_config.lua index bcf199fcd27c..708de0164636 100644 --- a/apisix/admin/plugin_config.lua +++ b/apisix/admin/plugin_config.lua @@ -24,6 +24,7 @@ local ipairs = ipairs local _M = { + need_v3_filter = true, } diff --git a/apisix/admin/plugin_metadata.lua b/apisix/admin/plugin_metadata.lua index bde9af05abfb..23859c775e24 100644 --- a/apisix/admin/plugin_metadata.lua +++ b/apisix/admin/plugin_metadata.lua @@ -21,6 +21,7 @@ local utils = require("apisix.admin.utils") local injected_mark = "injected metadata_schema" local _M = { + need_v3_filter = true, } diff --git a/apisix/admin/proto.lua b/apisix/admin/proto.lua index 132db68a1406..d00c216535e0 100644 --- a/apisix/admin/proto.lua +++ b/apisix/admin/proto.lua @@ -26,6 +26,7 @@ local tostring = tostring local _M = { version = 0.1, + need_v3_filter = true, } @@ -69,7 +70,7 @@ function _M.put(id, conf) return 400, err end - local key = "/proto/" .. id + local key = "/protos/" .. id local ok, err = utils.inject_conf_with_prev_conf("proto", key, conf) if not ok then @@ -87,7 +88,7 @@ end function _M.get(id) - local key = "/proto" + local key = "/protos" if id then key = key .. "/" .. id end @@ -109,7 +110,7 @@ function _M.post(id, conf) return 400, err end - local key = "/proto" + local key = "/protos" utils.inject_timestamp(conf) local res, err = core.etcd.push(key, conf) if not res then @@ -181,7 +182,7 @@ function _M.delete(id) end core.log.info("proto delete service ref check pass: ", id) - local key = "/proto/" .. id + local key = "/protos/" .. id -- core.log.info("key: ", key) local res, err = core.etcd.delete(key) if not res then diff --git a/apisix/admin/routes.lua b/apisix/admin/routes.lua index 877f6cf5e2c1..4cd36b385146 100644 --- a/apisix/admin/routes.lua +++ b/apisix/admin/routes.lua @@ -26,6 +26,7 @@ local loadstring = loadstring local _M = { version = 0.2, + need_v3_filter = true, } diff --git a/apisix/admin/services.lua b/apisix/admin/services.lua index 59c53eec3c6f..505ab2ccd045 100644 --- a/apisix/admin/services.lua +++ b/apisix/admin/services.lua @@ -27,6 +27,7 @@ local loadstring = loadstring local _M = { version = 0.3, + need_v3_filter = true, } diff --git a/apisix/admin/ssl.lua b/apisix/admin/ssl.lua index 9a73107c9f10..35f80a7ffe87 100644 --- a/apisix/admin/ssl.lua +++ b/apisix/admin/ssl.lua @@ -22,6 +22,7 @@ local type = type local _M = { version = 0.1, + need_v3_filter = true, } @@ -72,7 +73,7 @@ function _M.put(id, conf) end end - local key = "/ssl/" .. id + local key = "/ssls/" .. id local ok, err = utils.inject_conf_with_prev_conf("ssl", key, conf) if not ok then @@ -90,7 +91,7 @@ end function _M.get(id) - local key = "/ssl" + local key = "/ssls" if id then key = key .. "/" .. id end @@ -126,7 +127,7 @@ function _M.post(id, conf) end end - local key = "/ssl" + local key = "/ssls" utils.inject_timestamp(conf) local res, err = core.etcd.push(key, conf) if not res then @@ -143,7 +144,7 @@ function _M.delete(id) return 400, {error_msg = "missing ssl id"} end - local key = "/ssl/" .. id + local key = "/ssls/" .. id -- core.log.info("key: ", key) local res, err = core.etcd.delete(key) if not res then @@ -168,7 +169,7 @@ function _M.patch(id, conf, sub_path) return 400, {error_msg = "invalid configuration"} end - local key = "/ssl" + local key = "/ssls" if id then key = key .. "/" .. id end diff --git a/apisix/admin/stream_routes.lua b/apisix/admin/stream_routes.lua index 6770830acf1f..51b944ebaea3 100644 --- a/apisix/admin/stream_routes.lua +++ b/apisix/admin/stream_routes.lua @@ -22,6 +22,7 @@ local tostring = tostring local _M = { version = 0.1, + need_v3_filter = true, } diff --git a/apisix/admin/upstreams.lua b/apisix/admin/upstreams.lua index 5aec652691f3..45a7199f4373 100644 --- a/apisix/admin/upstreams.lua +++ b/apisix/admin/upstreams.lua @@ -26,6 +26,7 @@ local type = type local _M = { version = 0.2, + need_v3_filter = true, } diff --git a/apisix/admin/utils.lua b/apisix/admin/utils.lua index 3ff695a473b6..db73dda6751f 100644 --- a/apisix/admin/utils.lua +++ b/apisix/admin/utils.lua @@ -24,8 +24,8 @@ local _M = {} local function inject_timestamp(conf, prev_conf, patch_conf) if not conf.create_time then - if prev_conf and prev_conf.node.value.create_time then - conf.create_time = prev_conf.node.value.create_time + if prev_conf and (prev_conf.node or prev_conf.list).value.create_time then + conf.create_time = (prev_conf.node or prev_conf.list).value.create_time else -- As we don't know existent data's create_time, we have to pretend -- they are created now. diff --git a/apisix/admin/v3_adapter.lua b/apisix/admin/v3_adapter.lua new file mode 100644 index 000000000000..154efe463820 --- /dev/null +++ b/apisix/admin/v3_adapter.lua @@ -0,0 +1,214 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local fetch_local_conf = require("apisix.core.config_local").local_conf +local try_read_attr = require("apisix.core.table").try_read_attr +local deepcopy = require("apisix.core.table").deepcopy +local log = require("apisix.core.log") +local request = require("apisix.core.request") +local response = require("apisix.core.response") +local table = require("apisix.core.table") +local tonumber = tonumber +local re_find = ngx.re.find +local pairs = pairs + +local _M = {} + + +local admin_api_version +local function enable_v3() + if admin_api_version then + if admin_api_version == "v3" then + return true + end + + if admin_api_version == "default" then + return false + end + end + + local local_conf, err = fetch_local_conf() + if not local_conf then + admin_api_version = "default" + log.error("failed to fetch local conf: ", err) + return false + end + + local api_ver = try_read_attr(local_conf, "deployment", "admin", "admin_api_version") + if api_ver ~= "v3" then + admin_api_version = "default" + return false + end + + admin_api_version = api_ver + return true +end +_M.enable_v3 = enable_v3 + + +function _M.to_v3(body, action) + if not enable_v3() then + body.action = action + end +end + + +function _M.to_v3_list(body) + if not enable_v3() then + return + end + + if body.node.dir then + body.list = body.node.nodes + body.node = nil + end +end + + +local function sort(l, r) + return l.createdIndex < r.createdIndex +end + + +local function pagination(body, args) + args.page = tonumber(args.page) + args.page_size = tonumber(args.page_size) + if not args.page or not args.page_size then + return + end + + if args.page_size < 10 or args.page_size > 500 then + return response.exit(400, "page_size must be between 10 and 500") + end + + if not args.page or args.page < 1 then + -- default page is 1 + args.page = 1 + end + + local list = body.list + + -- sort nodes by there createdIndex + table.sort(list, sort) + + local to = args.page * args.page_size + local from = to - args.page_size + 1 + + local res = table.new(20, 0) + + for i = from, to do + if list[i] then + res[i - from + 1] = list[i] + end + end + + body.list = res +end + + +local function filter(body, args) + if not args.name and not args.label and not args.uri then + return + end + + for i = #body.list, 1, -1 do + local name_matched = true + local label_matched = true + local uri_matched = true + if args.name then + name_matched = false + local matched = re_find(body.list[i].value.name, args.name, "jo") + if matched then + name_matched = true + end + end + + if args.label then + label_matched = false + if body.list[i].value.labels then + for k, _ in pairs(body.list[i].value.labels) do + if k == args.label then + label_matched = true + break + end + end + end + end + + if args.uri then + uri_matched = false + if body.list[i].value.uri then + local matched = re_find(body.list[i].value.uri, args.uri, "jo") + if matched then + uri_matched = true + end + end + + if body.list[i].value.uris then + for _, uri in pairs(body.list[i].value.uris) do + if re_find(uri, args.uri, "jo") then + uri_matched = true + break + end + end + end + end + + if not name_matched or not label_matched or not uri_matched then + table.remove(body.list, i) + end + end +end + + +function _M.filter(body) + if not enable_v3() then + return + end + + local args = request.get_uri_args() + local processed_body = deepcopy(body) + + if processed_body.deleted then + processed_body.node = nil + end + + -- strip node wrapping for single query, create, and update scenarios. + if processed_body.node then + processed_body = processed_body.node + end + + -- filter and paging logic for list query only + if processed_body.list then + filter(processed_body, args) + + -- calculate the total amount of filtered data + processed_body.total = processed_body.list and #processed_body.list or 0 + + pagination(processed_body, args) + + -- remove the count field returned by etcd + -- we don't need a field that reflects the length of the currently returned data, + -- it doesn't make sense + processed_body.count = nil + end + + return processed_body +end + + +return _M diff --git a/apisix/balancer.lua b/apisix/balancer.lua index 4dd387400533..462d04f07ad2 100644 --- a/apisix/balancer.lua +++ b/apisix/balancer.lua @@ -26,6 +26,7 @@ local set_more_tries = balancer.set_more_tries local get_last_failure = balancer.get_last_failure local set_timeouts = balancer.set_timeouts local ngx_now = ngx.now +local str_byte = string.byte local module_name = "balancer" @@ -195,6 +196,12 @@ local function pick_server(route, ctx) core.log.info("ctx: ", core.json.delay_encode(ctx, true)) local up_conf = ctx.upstream_conf + for _, node in ipairs(up_conf.nodes) do + if core.utils.parse_ipv6(node.host) and str_byte(node.host, 1) ~= str_byte("[") then + node.host = '[' .. node.host .. ']' + end + end + local nodes_count = #up_conf.nodes if nodes_count == 1 then local node = up_conf.nodes[1] @@ -302,6 +309,7 @@ do local size = keepalive_pool.size local requests = keepalive_pool.requests + core.table.clear(pool_opt) pool_opt.pool_size = size local scheme = up_conf.scheme @@ -358,7 +366,7 @@ function _M.run(route, ctx, plugin_funcs) local header_changed local pass_host = ctx.pass_host - if pass_host == "node" and balancer.recreate_request then + if pass_host == "node" then local host = server.upstream_host if host ~= ctx.var.upstream_host then -- retried node has a different host @@ -369,7 +377,7 @@ function _M.run(route, ctx, plugin_funcs) local _, run = plugin_funcs("before_proxy") -- always recreate request as the request may be changed by plugins - if (run or header_changed) and balancer.recreate_request then + if run or header_changed then balancer.recreate_request() end end diff --git a/apisix/cli/apisix.lua b/apisix/cli/apisix.lua index d284e20848dd..079691f51a04 100755 --- a/apisix/cli/apisix.lua +++ b/apisix/cli/apisix.lua @@ -18,14 +18,20 @@ local pkg_cpath_org = package.cpath local pkg_path_org = package.path +local _, find_pos_end = string.find(pkg_path_org, ";", -1, true) +if not find_pos_end then + pkg_path_org = pkg_path_org .. ";" +end + local apisix_home = "/usr/local/apisix" local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;" .. apisix_home .. "/deps/lib/lua/5.1/?.so;" -local pkg_path = apisix_home .. "/deps/share/lua/5.1/?.lua;" +local pkg_path_deps = apisix_home .. "/deps/share/lua/5.1/?.lua;" +local pkg_path_env = apisix_home .. "/?.lua;" -- modify the load path to load our dependencies package.cpath = pkg_cpath .. pkg_cpath_org -package.path = pkg_path .. pkg_path_org +package.path = pkg_path_deps .. pkg_path_org .. pkg_path_env -- pass path to construct the final result local env = require("apisix.cli.env")(apisix_home, pkg_cpath_org, pkg_path_org) diff --git a/apisix/cli/env.lua b/apisix/cli/env.lua index 3c78ab3c11d2..f0e1a36e7e88 100644 --- a/apisix/cli/env.lua +++ b/apisix/cli/env.lua @@ -82,7 +82,7 @@ return function (apisix_home, pkg_cpath_org, pkg_path_org) -- pre-transform openresty path res, err = util.execute_cmd("command -v openresty") if not res then - error("failed to exec ulimit cmd \'command -v openresty\', err: " .. err) + error("failed to exec cmd \'command -v openresty\', err: " .. err) end local openresty_path_abs = util.trim(res) diff --git a/apisix/cli/etcd.lua b/apisix/cli/etcd.lua index 43aa4f84ae26..14c6a4467e3d 100644 --- a/apisix/cli/etcd.lua +++ b/apisix/cli/etcd.lua @@ -157,7 +157,7 @@ function _M.init(env, args) util.die("failed to read `apisix` field from yaml file when init etcd") end - if yaml_conf.apisix.config_center ~= "etcd" then + if yaml_conf.deployment.config_provider ~= "etcd" then return true end @@ -198,8 +198,7 @@ function _M.init(env, args) local retry_time = 0 local etcd = yaml_conf.etcd - -- TODO: remove deprecated health_check_retry option in APISIX v3 - local max_retry = tonumber(etcd.startup_retry or etcd.health_check_retry) or 2 + local max_retry = tonumber(etcd.startup_retry) or 2 while retry_time < max_retry do res, err = request(version_url, yaml_conf) -- In case of failure, request returns nil followed by an error message. diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua index 9c528005e1fd..13af90f4abb2 100644 --- a/apisix/cli/file.lua +++ b/apisix/cli/file.lua @@ -133,7 +133,7 @@ local function path_is_multi_type(path, type_val) return true end - if path == "apisix->ssl->listen_port" and type_val == "number" then + if path == "apisix->ssl->key_encrypt_salt" then return true end @@ -237,7 +237,39 @@ function _M.read_yaml_conf(apisix_home) end end - if default_conf.apisix.config_center == "yaml" then + if default_conf.deployment then + default_conf.deployment.config_provider = "etcd" + if default_conf.deployment.role == "traditional" then + default_conf.etcd = default_conf.deployment.etcd + + elseif default_conf.deployment.role == "control_plane" then + default_conf.etcd = default_conf.deployment.etcd + default_conf.apisix.enable_admin = true + + elseif default_conf.deployment.role == "data_plane" then + if default_conf.deployment.role_data_plane.config_provider == "yaml" then + default_conf.deployment.config_provider = "yaml" + elseif default_conf.deployment.role_data_plane.config_provider == "xds" then + default_conf.deployment.config_provider = "xds" + else + default_conf.etcd = default_conf.deployment.role_data_plane.control_plane + end + default_conf.apisix.enable_admin = false + end + + if default_conf.etcd and default_conf.deployment.certs then + -- copy certs configuration to keep backward compatible + local certs = default_conf.deployment.certs + local etcd = default_conf.etcd + if not etcd.tls then + etcd.tls = {} + end + etcd.tls.cert = certs.cert + etcd.tls.key = certs.cert_key + end + end + + if default_conf.deployment.config_provider == "yaml" then local apisix_conf_path = profile:yaml_path("apisix") local apisix_conf_yaml, _ = util.read_file(apisix_conf_path) if apisix_conf_yaml then @@ -251,13 +283,6 @@ function _M.read_yaml_conf(apisix_home) end end - if default_conf.deployment - and default_conf.deployment.role == "traditional" - and default_conf.deployment.etcd - then - default_conf.etcd = default_conf.deployment.etcd - end - return default_conf end diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua index f22280766982..a655efcfc934 100644 --- a/apisix/cli/ngx_tpl.lua +++ b/apisix/cli/ngx_tpl.lua @@ -58,6 +58,8 @@ env {*name*}; {% end %} {% if use_apisix_openresty then %} +thread_pool grpc-client-nginx-module threads=1; + lua { {% if enabled_stream_plugins["prometheus"] then %} lua_shared_dict prometheus-metrics {* meta.lua_shared_dict["prometheus-metrics"] *}; @@ -66,6 +68,12 @@ lua { {% if enabled_stream_plugins["prometheus"] and not enable_http then %} http { + lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=] + .. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};"; + lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=] + .. [=[$prefix/deps/lib/lua/5.1/?.so;;]=] + .. [=[{*lua_cpath*};"; + {% if enabled_stream_plugins["prometheus"] then %} init_worker_by_lua_block { require("apisix.plugins.prometheus.exporter").http_init(true) @@ -232,8 +240,11 @@ http { lua_shared_dict balancer-ewma-last-touched-at {* http.lua_shared_dict["balancer-ewma-last-touched-at"] *}; lua_shared_dict etcd-cluster-health-check {* http.lua_shared_dict["etcd-cluster-health-check"] *}; # etcd health check - {% if enabled_discoveries["kubernetes"] then %} - lua_shared_dict kubernetes {* http.lua_shared_dict["kubernetes"] *}; + # for discovery shared dict + {% if discovery_shared_dicts then %} + {% for key, size in pairs(discovery_shared_dicts) do %} + lua_shared_dict {*key*} {*size*}; + {% end %} {% end %} {% if enabled_discoveries["tars"] then %} @@ -276,6 +287,10 @@ http { lua_shared_dict introspection {* http.lua_shared_dict["introspection"] *}; # cache for JWT verification results {% end %} + {% if enabled_plugins["cas-auth"] then %} + lua_shared_dict cas_sessions {* http.lua_shared_dict["cas-auth"] *}; + {% end %} + {% if enabled_plugins["authz-keycloak"] then %} # for authz-keycloak lua_shared_dict access-tokens {* http.lua_shared_dict["access-tokens"] *}; # cache for service account access tokens @@ -296,33 +311,6 @@ http { lua_shared_dict {*cache_key*} {*cache_size*}; {% end %} {% end %} - {% if http.lua_shared_dicts then %} - {% for cache_key, cache_size in pairs(http.lua_shared_dicts) do %} - lua_shared_dict {*cache_key*} {*cache_size*}; - {% end %} - {% end %} - - {% if enabled_plugins["proxy-cache"] then %} - # for proxy cache - {% for _, cache in ipairs(proxy_cache.zones) do %} - {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} - proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off; - {% else %} - lua_shared_dict {* cache.name *} {* cache.memory_size *}; - {% end %} - {% end %} - {% end %} - - {% if enabled_plugins["proxy-cache"] then %} - # for proxy cache - map $upstream_cache_zone $upstream_cache_zone_info { - {% for _, cache in ipairs(proxy_cache.zones) do %} - {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} - {* cache.name *} {* cache.disk_path *},{* cache.cache_levels *}; - {% end %} - {% end %} - } - {% end %} {% if enabled_plugins["error-log-logger"] then %} lua_capture_error_log 10m; @@ -369,10 +357,7 @@ http { # error_page error_page 500 @50x.html; - {% if real_ip_header then %} - real_ip_header {* real_ip_header *}; - {% print("\nDeprecated: apisix.real_ip_header has been moved to nginx_config.http.real_ip_header. apisix.real_ip_header will be removed in the future version. Please use nginx_config.http.real_ip_header first.\n\n") %} - {% elseif http.real_ip_header then %} + {% if http.real_ip_header then %} real_ip_header {* http.real_ip_header *}; {% end %} @@ -380,12 +365,7 @@ http { real_ip_recursive {* http.real_ip_recursive *}; {% end %} - {% if real_ip_from then %} - {% print("\nDeprecated: apisix.real_ip_from has been moved to nginx_config.http.real_ip_from. apisix.real_ip_from will be removed in the future version. Please use nginx_config.http.real_ip_from first.\n\n") %} - {% for _, real_ip in ipairs(real_ip_from) do %} - set_real_ip_from {*real_ip*}; - {% end %} - {% elseif http.real_ip_from then %} + {% if http.real_ip_from then %} {% for _, real_ip in ipairs(http.real_ip_from) do %} set_real_ip_from {*real_ip*}; {% end %} @@ -461,17 +441,20 @@ http { dns_resolver = dns_resolver, } apisix.http_init(args) + + -- set apisix_lua_home into constans module + -- it may be used by plugins to determine the work path of apisix + local constants = require("apisix.constants") + constants.apisix_lua_home = "{*apisix_lua_home*}" } init_worker_by_lua_block { apisix.http_init_worker() } - {% if not use_openresty_1_17 then %} exit_worker_by_lua_block { apisix.http_exit_worker() } - {% end %} {% if enable_control then %} server { @@ -515,7 +498,7 @@ http { } {% end %} - {% if enable_admin and admin_server_addr then %} + {% if enable_admin then %} server { {%if https_admin then%} listen {* admin_server_addr *} ssl; @@ -580,6 +563,27 @@ http { {* conf_server *} {% end %} + {% if deployment_role ~= "control_plane" then %} + + {% if enabled_plugins["proxy-cache"] then %} + # for proxy cache + {% for _, cache in ipairs(proxy_cache.zones) do %} + {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} + proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off; + {% else %} + lua_shared_dict {* cache.name *} {* cache.memory_size *}; + {% end %} + {% end %} + + map $upstream_cache_zone $upstream_cache_zone_info { + {% for _, cache in ipairs(proxy_cache.zones) do %} + {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} + {* cache.name *} {* cache.disk_path *},{* cache.cache_levels *}; + {% end %} + {% end %} + } + {% end %} + server { {% for _, item in ipairs(node_listen) do %} listen {* item.ip *}:{* item.port *} default_server {% if item.enable_http2 then %} http2 {% end %} {% if enable_reuseport then %} reuseport {% end %}; @@ -593,7 +597,7 @@ http { listen {* proxy_protocol.listen_http_port *} default_server proxy_protocol; {% end %} {% if proxy_protocol and proxy_protocol.listen_https_port then %} - listen {* proxy_protocol.listen_https_port *} ssl default_server {% if ssl.enable_http2 then %} http2 {% end %} proxy_protocol; + listen {* proxy_protocol.listen_https_port *} ssl default_server proxy_protocol; {% end %} server_name _; @@ -631,27 +635,6 @@ http { stub_status; } - {% if enable_admin and not admin_server_addr then %} - location /apisix/admin { - set $upstream_scheme 'http'; - set $upstream_host $http_host; - set $upstream_uri ''; - - {%if allow_admin then%} - {% for _, allow_ip in ipairs(allow_admin) do %} - allow {*allow_ip*}; - {% end %} - deny all; - {%else%} - allow all; - {%end%} - - content_by_lua_block { - apisix.http_admin() - } - } - {% end %} - {% if ssl.enable then %} ssl_certificate_by_lua_block { apisix.http_ssl_phase() @@ -769,6 +752,14 @@ http { apisix.grpc_access_phase() } + {% if use_apisix_openresty then %} + # For servers which obey the standard, when `:authority` is missing, + # `host` will be used instead. When used with apisix-base, we can do + # better by setting `:authority` directly + grpc_set_header ":authority" $upstream_host; + {% else %} + grpc_set_header "Host" $upstream_host; + {% end %} grpc_set_header Content-Type application/grpc; grpc_socket_keepalive on; grpc_pass $upstream_scheme://apisix_backend; @@ -852,6 +843,8 @@ http { } } } + {% end %} + # http end configuration snippet starts {% if http_end_configuration_snippet then %} {* http_end_configuration_snippet *} diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua index d2275bed5813..c8cf6ab44e79 100644 --- a/apisix/cli/ops.lua +++ b/apisix/cli/ops.lua @@ -183,8 +183,10 @@ local function init(env) -- check the Admin API token local checked_admin_key = false - if yaml_conf.apisix.enable_admin and yaml_conf.apisix.allow_admin then - for _, allow_ip in ipairs(yaml_conf.apisix.allow_admin) do + local allow_admin = yaml_conf.deployment.admin and + yaml_conf.deployment.admin.allow_admin + if yaml_conf.apisix.enable_admin and allow_admin then + for _, allow_ip in ipairs(allow_admin) do if allow_ip == "127.0.0.0/24" then checked_admin_key = true end @@ -198,13 +200,17 @@ local function init(env) Please modify "admin_key" in conf/config.yaml . ]] - if type(yaml_conf.apisix.admin_key) ~= "table" or - #yaml_conf.apisix.admin_key == 0 + local admin_key = yaml_conf.deployment.admin + if admin_key then + admin_key = admin_key.admin_key + end + + if type(admin_key) ~= "table" or #admin_key == 0 then util.die(help:format("ERROR: missing valid Admin API token.")) end - for _, admin in ipairs(yaml_conf.apisix.admin_key) do + for _, admin in ipairs(admin_key) do if type(admin.key) == "table" then admin.key = "" else @@ -224,10 +230,23 @@ Please modify "admin_key" in conf/config.yaml . end end + if yaml_conf.deployment.admin then + local admin_api_mtls = yaml_conf.deployment.admin.admin_api_mtls + local https_admin = yaml_conf.deployment.admin.https_admin + if https_admin and not (admin_api_mtls and + admin_api_mtls.admin_ssl_cert and + admin_api_mtls.admin_ssl_cert ~= "" and + admin_api_mtls.admin_ssl_cert_key and + admin_api_mtls.admin_ssl_cert_key ~= "") + then + util.die("missing ssl cert for https admin") + end + end + if yaml_conf.apisix.enable_admin and - yaml_conf.apisix.config_center == "yaml" + yaml_conf.deployment.config_provider == "yaml" then - util.die("ERROR: Admin API can only be used with etcd config_center.\n") + util.die("ERROR: Admin API can only be used with etcd config_provider.\n") end local or_ver = get_openresty_version() @@ -235,16 +254,11 @@ Please modify "admin_key" in conf/config.yaml . util.die("can not find openresty\n") end - local need_ver = "1.17.8" + local need_ver = "1.19.3" if not version_greater_equal(or_ver, need_ver) then util.die("openresty version must >=", need_ver, " current ", or_ver, "\n") end - local use_openresty_1_17 = false - if not version_greater_equal(or_ver, "1.19.3") then - use_openresty_1_17 = true - end - local or_info = util.execute_cmd("openresty -V 2>&1") if or_info and not or_info:find("http_stub_status_module", 1, true) then util.die("'http_stub_status_module' module is missing in ", @@ -320,14 +334,10 @@ Please modify "admin_key" in conf/config.yaml . -- listen in admin use a separate port, support specific IP, compatible with the original style local admin_server_addr if yaml_conf.apisix.enable_admin then - if yaml_conf.apisix.admin_listen then - admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", - yaml_conf.apisix.admin_listen.ip, - 9180, yaml_conf.apisix.admin_listen.port) - elseif yaml_conf.apisix.port_admin then - admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", nil, - 9180, yaml_conf.apisix.port_admin) - end + local ip = yaml_conf.deployment.admin.admin_listen.ip + local port = yaml_conf.deployment.admin.admin_listen.port + admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", ip, + 9180, port) end local control_server_addr @@ -433,46 +443,28 @@ Please modify "admin_key" in conf/config.yaml . local ssl_listen = {} -- listen in https, support multiple ports, support specific IP for _, value in ipairs(yaml_conf.apisix.ssl.listen) do - if type(value) == "number" then - listen_table_insert(ssl_listen, "https", "0.0.0.0", value, - yaml_conf.apisix.ssl.enable_http2, yaml_conf.apisix.enable_ipv6) - elseif type(value) == "table" then - local ip = value.ip - local port = value.port - local enable_ipv6 = false - local enable_http2 = (value.enable_http2 or yaml_conf.apisix.ssl.enable_http2) - - if ip == nil then - ip = "0.0.0.0" - if yaml_conf.apisix.enable_ipv6 then - enable_ipv6 = true - end - end - - if port == nil then - port = 9443 - end - - if enable_http2 == nil then - enable_http2 = false + local ip = value.ip + local port = value.port + local enable_ipv6 = false + local enable_http2 = value.enable_http2 + + if ip == nil then + ip = "0.0.0.0" + if yaml_conf.apisix.enable_ipv6 then + enable_ipv6 = true end + end - listen_table_insert(ssl_listen, "https", ip, port, - enable_http2, enable_ipv6) + if port == nil then + port = 9443 end - end - -- listen in https, compatible with the original style - if type(yaml_conf.apisix.ssl.listen_port) == "number" then - listen_table_insert(ssl_listen, "https", "0.0.0.0", yaml_conf.apisix.ssl.listen_port, - yaml_conf.apisix.ssl.enable_http2, yaml_conf.apisix.enable_ipv6) - elseif type(yaml_conf.apisix.ssl.listen_port) == "table" then - for _, value in ipairs(yaml_conf.apisix.ssl.listen_port) do - if type(value) == "number" then - listen_table_insert(ssl_listen, "https", "0.0.0.0", value, - yaml_conf.apisix.ssl.enable_http2, yaml_conf.apisix.enable_ipv6) - end + if enable_http2 == nil then + enable_http2 = false end + + listen_table_insert(ssl_listen, "https", ip, port, + enable_http2, enable_ipv6) end yaml_conf.apisix.ssl.listen = ssl_listen @@ -491,17 +483,6 @@ Please modify "admin_key" in conf/config.yaml . yaml_conf.apisix.ssl.ssl_trusted_certificate = cert_path end - local admin_api_mtls = yaml_conf.apisix.admin_api_mtls - if yaml_conf.apisix.https_admin and - not (admin_api_mtls and - admin_api_mtls.admin_ssl_cert and - admin_api_mtls.admin_ssl_cert ~= "" and - admin_api_mtls.admin_ssl_cert_key and - admin_api_mtls.admin_ssl_cert_key ~= "") - then - util.die("missing ssl cert for https admin") - end - -- enable ssl with place holder crt&key yaml_conf.apisix.ssl.ssl_cert = "cert/ssl_PLACE_HOLDER.crt" yaml_conf.apisix.ssl.ssl_cert_key = "cert/ssl_PLACE_HOLDER.key" @@ -546,16 +527,22 @@ Please modify "admin_key" in conf/config.yaml . end if yaml_conf.deployment and yaml_conf.deployment.role then - env.deployment_role = yaml_conf.deployment.role + local role = yaml_conf.deployment.role + env.deployment_role = role + + if role == "control_plane" and not admin_server_addr then + local listen = node_listen[1] + admin_server_addr = str_format("%s:%s", listen.ip, listen.port) + end end -- Using template.render local sys_conf = { - use_openresty_1_17 = use_openresty_1_17, lua_path = env.pkg_path_org, lua_cpath = env.pkg_cpath_org, os_name = util.trim(util.execute_cmd("uname")), apisix_lua_home = env.apisix_home, + deployment_role = env.deployment_role, use_apisix_openresty = use_apisix_openresty, error_log = {level = "warn"}, enable_http = enable_http, @@ -591,6 +578,11 @@ Please modify "admin_key" in conf/config.yaml . for k,v in pairs(yaml_conf.nginx_config) do sys_conf[k] = v end + if yaml_conf.deployment.admin then + for k,v in pairs(yaml_conf.deployment.admin) do + sys_conf[k] = v + end + end sys_conf["wasm"] = yaml_conf.wasm @@ -609,10 +601,6 @@ Please modify "admin_key" in conf/config.yaml . sys_conf["worker_processes"] = "auto" end - if sys_conf.allow_admin and #sys_conf.allow_admin == 0 then - sys_conf.allow_admin = nil - end - local dns_resolver = sys_conf["dns_resolver"] if not dns_resolver or #dns_resolver == 0 then local dns_addrs, err = local_dns_resolver("/etc/resolv.conf") @@ -650,11 +638,6 @@ Please modify "admin_key" in conf/config.yaml . sys_conf["worker_processes"] = floor(tonumber(env_worker_processes)) end - if sys_conf["http"]["lua_shared_dicts"] then - stderr:write("lua_shared_dicts is deprecated, " .. - "use custom_lua_shared_dict instead\n") - end - local exported_vars = file.get_exported_vars() if exported_vars then if not sys_conf["envs"] then @@ -679,36 +662,52 @@ Please modify "admin_key" in conf/config.yaml . end end - -- inject kubernetes discovery environment variable + -- inject kubernetes discovery shared dict and environment variable if enabled_discoveries["kubernetes"] then - local kubernetes_conf = yaml_conf.discovery["kubernetes"] + if not sys_conf["discovery_shared_dicts"] then + sys_conf["discovery_shared_dicts"] = {} + end - local keys = { - kubernetes_conf.service.host, - kubernetes_conf.service.port, - } + local kubernetes_conf = yaml_conf.discovery["kubernetes"] - if kubernetes_conf.client.token then - table_insert(keys, kubernetes_conf.client.token) - end + local inject_environment = function(conf, envs) + local keys = { + conf.service.host, + conf.service.port, + } - if kubernetes_conf.client.token_file then - table_insert(keys, kubernetes_conf.client.token_file) - end + if conf.client.token then + table_insert(keys, conf.client.token) + end - local envs = {} + if conf.client.token_file then + table_insert(keys, conf.client.token_file) + end - for _, key in ipairs(keys) do - if #key > 3 then - local first, second = str_byte(key, 1, 2) - if first == str_byte('$') and second == str_byte('{') then - local last = str_byte(key, #key) - if last == str_byte('}') then - envs[str_sub(key, 3, #key - 1)] = "" + for _, key in ipairs(keys) do + if #key > 3 then + local first, second = str_byte(key, 1, 2) + if first == str_byte('$') and second == str_byte('{') then + local last = str_byte(key, #key) + if last == str_byte('}') then + envs[str_sub(key, 3, #key - 1)] = "" + end end end end + + end + + local envs = {} + if #kubernetes_conf == 0 then + sys_conf["discovery_shared_dicts"]["kubernetes"] = kubernetes_conf.shared_size + inject_environment(kubernetes_conf, envs) + else + for _, item in ipairs(kubernetes_conf) do + sys_conf["discovery_shared_dicts"]["kubernetes-" .. item.id] = item.shared_size + inject_environment(item, envs) + end end if not sys_conf["envs"] then @@ -718,6 +717,7 @@ Please modify "admin_key" in conf/config.yaml . for item in pairs(envs) do table_insert(sys_conf["envs"], item) end + end -- fix up lua path diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua index db4f47477de5..e1c1a0920812 100644 --- a/apisix/cli/schema.lua +++ b/apisix/cli/schema.lua @@ -47,7 +47,6 @@ local etcd_schema = { }, prefix = { type = "string", - pattern = [[^/[^/]+$]] }, host = { type = "array", @@ -56,18 +55,22 @@ local etcd_schema = { pattern = [[^https?://]] }, minItems = 1, + }, + timeout = { + type = "integer", + default = 30, + minimum = 1, + description = "etcd connection timeout in seconds", } }, required = {"prefix", "host"} } + local config_schema = { type = "object", properties = { apisix = { properties = { - config_center = { - enum = {"etcd", "yaml", "xds"}, - }, lua_module_hook = { pattern = "^[a-zA-Z._-]+$", }, @@ -128,12 +131,6 @@ local config_schema = { } } }, - port_admin = { - type = "integer", - }, - https_admin = { - type = "boolean", - }, stream_proxy = { type = "object", properties = { @@ -203,7 +200,44 @@ local config_schema = { properties = { ssl_trusted_certificate = { type = "string", - } + }, + listen = { + type = "array", + items = { + type = "object", + properties = { + ip = { + type = "string", + }, + port = { + type = "integer", + minimum = 1, + maximum = 65535 + }, + enable_http2 = { + type = "boolean", + } + } + } + }, + key_encrypt_salt = { + anyOf = { + { + type = "array", + minItems = 1, + items = { + type = "string", + minLength = 16, + maxLength = 16 + } + }, + { + type = "string", + minLength = 16, + maxLength = 16 + } + } + }, } }, } @@ -229,6 +263,22 @@ local config_schema = { } }, etcd = etcd_schema, + plugins = { + type = "array", + default = {}, + minItems = 0, + items = { + type = "string" + } + }, + stream_plugins = { + type = "array", + default = {}, + minItems = 0, + items = { + type = "string" + } + }, wasm = { type = "object", properties = { @@ -261,20 +311,127 @@ local config_schema = { type = "object", properties = { role = { - enum = {"traditional", "control_plane", "data_plane", "standalone"} + enum = {"traditional", "control_plane", "data_plane", "standalone"}, + default = "traditional" } }, - required = {"role"}, + }, + }, + required = {"apisix", "deployment"}, +} + +local admin_schema = { + type = "object", + properties = { + admin_key = { + type = "array", + properties = { + items = { + properties = { + name = {type = "string"}, + key = {type = "string"}, + role = {type = "string"}, + } + } + } + }, + admin_listen = { + properties = { + listen = { type = "string" }, + port = { type = "integer" }, + }, + default = { + listen = "0.0.0.0", + port = 9180, + } + }, + https_admin = { + type = "boolean", }, } } + local deployment_schema = { traditional = { properties = { etcd = etcd_schema, + admin = admin_schema, + role_traditional = { + properties = { + config_provider = { + enum = {"etcd"} + }, + }, + required = {"config_provider"} + } }, required = {"etcd"} }, + control_plane = { + properties = { + etcd = etcd_schema, + role_control_plane = { + properties = { + config_provider = { + enum = {"etcd"} + }, + conf_server = { + properties = { + listen = { + type = "string", + default = "0.0.0.0:9280", + }, + cert = { type = "string" }, + cert_key = { type = "string" }, + client_ca_cert = { type = "string" }, + }, + required = {"cert", "cert_key"} + }, + }, + required = {"config_provider", "conf_server"} + }, + certs = { + properties = { + cert = { type = "string" }, + cert_key = { type = "string" }, + trusted_ca_cert = { type = "string" }, + }, + dependencies = { + cert = { + required = {"cert_key"}, + }, + }, + default = {}, + }, + }, + required = {"etcd", "role_control_plane"} + }, + data_plane = { + properties = { + role_data_plane = { + properties = { + config_provider = { + enum = {"control_plane", "yaml", "xds"} + }, + }, + required = {"config_provider"} + }, + certs = { + properties = { + cert = { type = "string" }, + cert_key = { type = "string" }, + trusted_ca_cert = { type = "string" }, + }, + dependencies = { + cert = { + required = {"cert_key"}, + }, + }, + default = {}, + }, + }, + required = {"role_data_plane"} + } } @@ -298,13 +455,11 @@ function _M.validate(yaml_conf) end end - if yaml_conf.deployment then - local role = yaml_conf.deployment.role - local validator = jsonschema.generate_validator(deployment_schema[role]) - local ok, err = validator(yaml_conf.deployment) - if not ok then - return false, "invalid deployment " .. role .. " configuration: " .. err - end + local role = yaml_conf.deployment.role + local validator = jsonschema.generate_validator(deployment_schema[role]) + local ok, err = validator(yaml_conf.deployment) + if not ok then + return false, "invalid deployment " .. role .. " configuration: " .. err end return true diff --git a/apisix/cli/snippet.lua b/apisix/cli/snippet.lua index bfaf973a026c..3b5eb3232394 100644 --- a/apisix/cli/snippet.lua +++ b/apisix/cli/snippet.lua @@ -24,7 +24,10 @@ local _M = {} function _M.generate_conf_server(env, conf) - if not (conf.deployment and conf.deployment.role == "traditional") then + if not (conf.deployment and ( + conf.deployment.role == "traditional" or + conf.deployment.role == "control_plane")) + then return nil, nil end @@ -36,8 +39,7 @@ function _M.generate_conf_server(env, conf) if servers[1]:find(prefix, 1, true) then enable_https = true end - -- there is not a compatible way to verify upstream TLS like the one we do in cosocket - -- so here we just ignore it as the verification is already done in the init phase + for i, s in ipairs(servers) do if (s:find(prefix, 1, true) ~= nil) ~= enable_https then return nil, "all nodes in the etcd cluster should enable/disable TLS together" @@ -49,6 +51,24 @@ function _M.generate_conf_server(env, conf) end end + local control_plane + if conf.deployment.role == "control_plane" then + control_plane = conf.deployment.role_control_plane.conf_server + control_plane.cert = pl_path.abspath(control_plane.cert) + control_plane.cert_key = pl_path.abspath(control_plane.cert_key) + + if control_plane.client_ca_cert then + control_plane.client_ca_cert = pl_path.abspath(control_plane.client_ca_cert) + end + end + + local trusted_ca_cert + if conf.deployment.certs then + if conf.deployment.certs.trusted_ca_cert then + trusted_ca_cert = pl_path.abspath(conf.deployment.certs.trusted_ca_cert) + end + end + local conf_render = template.compile([[ upstream apisix_conf_backend { server 0.0.0.0:80; @@ -57,8 +77,26 @@ function _M.generate_conf_server(env, conf) conf_server.balancer() } } + + {% if trusted_ca_cert then %} + lua_ssl_trusted_certificate {* trusted_ca_cert *}; + {% end %} + server { + {% if control_plane then %} + listen {* control_plane.listen *} ssl; + ssl_certificate {* control_plane.cert *}; + ssl_certificate_key {* control_plane.cert_key *}; + + {% if control_plane.client_ca_cert then %} + ssl_verify_client on; + ssl_client_certificate {* control_plane.client_ca_cert *}; + {% end %} + + {% else %} listen unix:{* home *}/conf/config_listen.sock; + {% end %} + access_log off; set $upstream_host ''; @@ -71,17 +109,25 @@ function _M.generate_conf_server(env, conf) location / { {% if enable_https then %} proxy_pass https://apisix_conf_backend; + proxy_ssl_protocols TLSv1.2 TLSv1.3; proxy_ssl_server_name on; + + {% if etcd_tls_verify then %} + proxy_ssl_verify on; + proxy_ssl_trusted_certificate {* ssl_trusted_certificate *}; + {% end %} + {% if sni then %} proxy_ssl_name {* sni *}; {% else %} proxy_ssl_name $upstream_host; {% end %} - proxy_ssl_protocols TLSv1.2 TLSv1.3; + {% if client_cert then %} proxy_ssl_certificate {* client_cert *}; proxy_ssl_certificate_key {* client_cert_key *}; {% end %} + {% else %} proxy_pass http://apisix_conf_backend; {% end %} @@ -89,6 +135,7 @@ function _M.generate_conf_server(env, conf) proxy_http_version 1.1; proxy_set_header Connection ""; proxy_set_header Host $upstream_host; + proxy_next_upstream error timeout non_idempotent http_500 http_502 http_503 http_504; } log_by_lua_block { @@ -101,17 +148,33 @@ function _M.generate_conf_server(env, conf) local tls = etcd.tls local client_cert local client_cert_key - if tls and tls.cert then - client_cert = pl_path.abspath(tls.cert) - client_cert_key = pl_path.abspath(tls.key) + local ssl_trusted_certificate + local etcd_tls_verify + if tls then + if tls.cert then + client_cert = pl_path.abspath(tls.cert) + client_cert_key = pl_path.abspath(tls.key) + end + + etcd_tls_verify = tls.verify + if enable_https and etcd_tls_verify then + if not conf.apisix.ssl.ssl_trusted_certificate then + return nil, "should set ssl_trusted_certificate if etcd tls verify is enabled" + end + ssl_trusted_certificate = pl_path.abspath(conf.apisix.ssl.ssl_trusted_certificate) + end end return conf_render({ - sni = etcd.tls and etcd.tls.sni, - enable_https = enable_https, + sni = tls and tls.sni, home = env.apisix_home or ".", + control_plane = control_plane, + enable_https = enable_https, client_cert = client_cert, client_cert_key = client_cert_key, + trusted_ca_cert = trusted_ca_cert, + etcd_tls_verify = etcd_tls_verify, + ssl_trusted_certificate = ssl_trusted_certificate, }) end diff --git a/apisix/conf_server.lua b/apisix/conf_server.lua index 40cf2895158b..e0ea91e77013 100644 --- a/apisix/conf_server.lua +++ b/apisix/conf_server.lua @@ -21,7 +21,9 @@ local balancer = require("ngx.balancer") local error = error local ipairs = ipairs local ngx = ngx +local ngx_shared = ngx.shared local ngx_var = ngx.var +local tonumber = tonumber local _M = {} @@ -30,6 +32,16 @@ local resolved_results = {} local server_picker local has_domain = false +local is_http = ngx.config.subsystem == "http" +local health_check_shm_name = "etcd-cluster-health-check" +if not is_http then + health_check_shm_name = health_check_shm_name .. "-stream" +end +-- an endpoint is unhealthy if it is failed for HEALTH_CHECK_MAX_FAILURE times in +-- HEALTH_CHECK_DURATION_SECOND +local HEALTH_CHECK_MAX_FAILURE = 3 +local HEALTH_CHECK_DURATION_SECOND = 10 + local function create_resolved_result(server) local host, port = core.utils.parse_addr(server) @@ -48,6 +60,10 @@ function _M.init() end local etcd = conf.deployment.etcd + if etcd.health_check_timeout then + HEALTH_CHECK_DURATION_SECOND = etcd.health_check_timeout + end + for i, s in ipairs(etcd.host) do local _, to = core.string.find(s, "://") if not to then @@ -80,7 +96,13 @@ end local function response_err(err) - ngx.log(ngx.ERR, "failure in conf server: ", err) + core.log.error("failure in conf server: ", err) + + if ngx.get_phase() == "balancer" then + return + end + + ngx.status = 503 ngx.say(core.json.encode({error = err})) ngx.exit(0) end @@ -127,25 +149,87 @@ local function resolve_servers(ctx) end +local function gen_unhealthy_key(addr) + return "conf_server:" .. addr +end + + +local function is_node_health(addr) + local key = gen_unhealthy_key(addr) + local count, err = ngx_shared[health_check_shm_name]:get(key) + if err then + core.log.warn("failed to get health check count, key: ", key, " err: ", err) + return true + end + + if not count then + return true + end + + return tonumber(count) < HEALTH_CHECK_MAX_FAILURE +end + + +local function report_failure(addr) + local key = gen_unhealthy_key(addr) + local count, err = + ngx_shared[health_check_shm_name]:incr(key, 1, 0, HEALTH_CHECK_DURATION_SECOND) + if not count then + core.log.error("failed to report failure, key: ", key, " err: ", err) + else + -- count might be larger than HEALTH_CHECK_MAX_FAILURE + core.log.warn("report failure, endpoint: ", addr, " count: ", count) + end +end + + +local function pick_node_by_server_picker(ctx) + local server, err = ctx.server_picker.get(ctx) + if not server then + err = err or "no valid upstream node" + return nil, "failed to find valid upstream server: " .. err + end + + ctx.balancer_server = server + + for _, r in ipairs(resolved_results) do + if r.server == server then + return r + end + end + + return nil, "unknown server: " .. server +end + + local function pick_node(ctx) local res if server_picker then - local server, err = server_picker.get(ctx) - if not server then - err = err or "no valid upstream node" - return nil, "failed to find valid upstream server, " .. err + if not ctx.server_picker then + ctx.server_picker = server_picker end - ctx.server_picker = server_picker - ctx.balancer_server = server + local err + res, err = pick_node_by_server_picker(ctx) + if not res then + return nil, err + end + + while not is_node_health(res.server) do + core.log.warn("endpoint ", res.server, " is unhealthy, skipped") + + if server_picker.after_balance then + server_picker.after_balance(ctx, true) + end - for _, r in ipairs(resolved_results) do - if r.server == server then - res = r - break + res, err = pick_node_by_server_picker(ctx) + if not res then + return nil, err end end + else + -- we don't do health check if there is only one candidate res = resolved_results[1] end @@ -153,7 +237,7 @@ local function pick_node(ctx) ctx.balancer_port = res.port ngx_var.upstream_host = res.domain or res.host - if balancer.recreate_request and ngx.get_phase() == "balancer" then + if ngx.get_phase() == "balancer" then balancer.recreate_request() end @@ -185,6 +269,12 @@ function _M.balancer() core.log.warn("could not set upstream retries: ", err) end else + if ctx.server_picker and ctx.server_picker.after_balance then + ctx.server_picker.after_balance(ctx, true) + end + + report_failure(ctx.balancer_server) + local ok, err = pick_node(ctx) if not ok then return response_err(err) diff --git a/apisix/constants.lua b/apisix/constants.lua index cf04e890cc8c..1c82ec3d49cd 100644 --- a/apisix/constants.lua +++ b/apisix/constants.lua @@ -23,20 +23,20 @@ return { HTTP_ETCD_DIRECTORY = { ["/upstreams"] = true, ["/plugins"] = true, - ["/ssl"] = true, + ["/ssls"] = true, ["/stream_routes"] = true, ["/plugin_metadata"] = true, ["/routes"] = true, ["/services"] = true, ["/consumers"] = true, ["/global_rules"] = true, - ["/proto"] = true, + ["/protos"] = true, ["/plugin_configs"] = true, }, STREAM_ETCD_DIRECTORY = { ["/upstreams"] = true, ["/plugins"] = true, - ["/ssl"] = true, + ["/ssls"] = true, ["/stream_routes"] = true, ["/plugin_metadata"] = true, }, diff --git a/apisix/consumer.lua b/apisix/consumer.lua index 9a4dc3c428dd..5e25b75215a3 100644 --- a/apisix/consumer.lua +++ b/apisix/consumer.lua @@ -56,6 +56,7 @@ local function plugin_consumer() -- is 'username' field in admin new_consumer.consumer_name = new_consumer.id new_consumer.auth_conf = config + new_consumer.modifiedIndex = consumer.modifiedIndex core.log.info("consumer:", core.json.delay_encode(new_consumer)) core.table.insert(plugins[name].nodes, new_consumer) end diff --git a/apisix/control/v1.lua b/apisix/control/v1.lua index bbe457cd607f..c6d1e065041f 100644 --- a/apisix/control/v1.lua +++ b/apisix/control/v1.lua @@ -276,6 +276,28 @@ function _M.dump_service_info() return 200, info end +function _M.dump_all_plugin_metadata() + local names = core.config.local_conf().plugins + local metadatas = core.table.new(0, #names) + for _, name in ipairs(names) do + local metadata = plugin.plugin_metadata(name) + if metadata then + core.table.insert(metadatas, metadata.value) + end + end + return 200, metadatas +end + +function _M.dump_plugin_metadata() + local uri_segs = core.utils.split_uri(ngx_var.uri) + local name = uri_segs[4] + local metadata = plugin.plugin_metadata(name) + if not metadata then + return 404, {error_msg = str_format("plugin metadata[%s] not found", name)} + end + return 200, metadata.value +end + return { -- /v1/schema @@ -337,5 +359,17 @@ return { methods = {"GET"}, uris = {"/upstream/*"}, handler = _M.dump_upstream_info, + }, + -- /v1/plugin_metadatas + { + methods = {"GET"}, + uris = {"/plugin_metadatas"}, + handler = _M.dump_all_plugin_metadata, + }, + -- /v1/plugin_metadata/* + { + methods = {"GET"}, + uris = {"/plugin_metadata/*"}, + handler = _M.dump_plugin_metadata, } } diff --git a/apisix/core.lua b/apisix/core.lua index 0e0919bb83db..f421716e6bdc 100644 --- a/apisix/core.lua +++ b/apisix/core.lua @@ -21,11 +21,11 @@ if not local_conf then error("failed to parse yaml config: " .. err) end -local config_center = local_conf.apisix and local_conf.apisix.config_center +local config_provider = local_conf.deployment and local_conf.deployment.config_provider or "etcd" -log.info("use config_center: ", config_center) -local config = require("apisix.core.config_" .. config_center) -config.type = config_center +log.info("use config_provider: ", config_provider) +local config = require("apisix.core.config_" .. config_provider) +config.type = config_provider return { diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua index 183c52aac338..e432b05d950d 100644 --- a/apisix/core/config_etcd.lua +++ b/apisix/core/config_etcd.lua @@ -21,6 +21,7 @@ local table = require("apisix.core.table") local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") local log = require("apisix.core.log") local json = require("apisix.core.json") local etcd_apisix = require("apisix.core.etcd") @@ -212,14 +213,17 @@ local function load_full_data(self, dir_res, headers) self:upgrade_version(item.modifiedIndex) else - if not dir_res.nodes then - dir_res.nodes = {} + -- here dir_res maybe res.body.node or res.body.list + -- we need make values equals to res.body.node.nodes or res.body.list + local values = (dir_res and dir_res.nodes) or dir_res + if not values then + values = {} end - self.values = new_tab(#dir_res.nodes, 0) - self.values_hash = new_tab(0, #dir_res.nodes) + self.values = new_tab(#values, 0) + self.values_hash = new_tab(0, #values) - for _, item in ipairs(dir_res.nodes) do + for _, item in ipairs(values) do local key = short_key(self, item.key) local data_valid = true if type(item.value) ~= "table" then @@ -302,7 +306,7 @@ local function sync_data(self) return false, err end - local dir_res, headers = res.body.node or {}, res.headers + local dir_res, headers = res.body.list or {}, res.headers log.debug("readdir key: ", self.key, " res: ", json.delay_encode(dir_res)) if not dir_res then @@ -311,12 +315,7 @@ local function sync_data(self) if self.values then for i, val in ipairs(self.values) do - if val and val.clean_handlers then - for _, clean_handler in ipairs(val.clean_handlers) do - clean_handler(val) - end - val.clean_handlers = nil - end + config_util.fire_all_clean_handlers(val) end self.values = nil @@ -403,11 +402,8 @@ local function sync_data(self) local pre_index = self.values_hash[key] if pre_index then local pre_val = self.values[pre_index] - if pre_val and pre_val.clean_handlers then - for _, clean_handler in ipairs(pre_val.clean_handlers) do - clean_handler(pre_val) - end - pre_val.clean_handlers = nil + if pre_val then + config_util.fire_all_clean_handlers(pre_val) end if res.value then @@ -511,7 +507,7 @@ do end local err - etcd_cli, err = etcd_apisix.new() + etcd_cli, err = etcd_apisix.switch_proxy() return etcd_cli, err end end @@ -812,18 +808,13 @@ function _M.init() return true end - local etcd_cli, err = get_etcd() + -- don't go through proxy during start because the proxy is not available + local etcd_cli, prefix, err = etcd_apisix.new_without_proxy() if not etcd_cli then return nil, "failed to start a etcd instance: " .. err end - -- don't go through proxy during start because the proxy is not available - local proxy = etcd_cli.unix_socket_proxy - etcd_cli.unix_socket_proxy = nil - local etcd_conf = local_conf.etcd - local prefix = etcd_conf.prefix local res, err = readdir(etcd_cli, prefix, create_formatter(prefix)) - etcd_cli.unix_socket_proxy = proxy if not res then return nil, err end diff --git a/apisix/core/config_local.lua b/apisix/core/config_local.lua index cf44feed193a..1c17086dc6e2 100644 --- a/apisix/core/config_local.lua +++ b/apisix/core/config_local.lua @@ -19,8 +19,9 @@ -- -- @module core.config_local -local file = require("apisix.cli.file") +local file = require("apisix.cli.file") local schema = require("apisix.cli.schema") +local error = error local _M = {} @@ -65,7 +66,10 @@ function _M.local_conf(force) end -- fill the default value by the schema - schema.validate(default_conf) + local ok, err = schema.validate(default_conf) + if not ok then + error(err) + end config_data = default_conf return config_data diff --git a/apisix/core/config_util.lua b/apisix/core/config_util.lua index 8a2ce7b57b7b..b3fb13b7cce5 100644 --- a/apisix/core/config_util.lua +++ b/apisix/core/config_util.lua @@ -20,8 +20,10 @@ -- @module core.config_util local core_tab = require("apisix.core.table") +local log = require("apisix.core.log") local str_byte = string.byte local str_char = string.char +local ipairs = ipairs local setmetatable = setmetatable local tostring = tostring local type = type @@ -56,23 +58,56 @@ end -- or cancelled. Note that Nginx worker exit doesn't trigger the clean handler. -- Return an index so that we can cancel it later. function _M.add_clean_handler(item, func) - local idx = #item.clean_handlers + 1 - item.clean_handlers[idx] = func - return idx + if not item.clean_handlers._id then + item.clean_handlers._id = 1 + end + + local id = item.clean_handlers._id + item.clean_handlers._id = item.clean_handlers._id + 1 + core_tab.insert(item.clean_handlers, {f = func, id = id}) + return id end -- cancel a clean handler added by add_clean_handler. -- If `fire` is true, call the clean handler. function _M.cancel_clean_handler(item, idx, fire) - local f = item.clean_handlers[idx] - core_tab.remove(item.clean_handlers, idx) + local pos, f + -- the number of pending clean handler is small so we can cancel them in O(n) + for i, clean_handler in ipairs(item.clean_handlers) do + if clean_handler.id == idx then + pos = i + f = clean_handler.f + break + end + end + + if not pos then + log.error("failed to find clean_handler with idx ", idx) + return + end + + core_tab.remove(item.clean_handlers, pos) if fire then f(item) end end +-- fire all clean handlers added by add_clean_handler. +function _M.fire_all_clean_handlers(item) + if not item.clean_handlers then + return + end + + for _, clean_handler in ipairs(item.clean_handlers) do + clean_handler.f(item) + end + + item.clean_handlers = nil +end + + --- -- Convert different time units to seconds as time units. -- Time intervals can be specified in milliseconds, seconds, minutes, hours, days and so on, diff --git a/apisix/core/config_xds.lua b/apisix/core/config_xds.lua index 793592b6fb4c..bdb45206a917 100644 --- a/apisix/core/config_xds.lua +++ b/apisix/core/config_xds.lua @@ -20,6 +20,7 @@ -- @module core.config_xds local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") local string = require("apisix.core.string") local log = require("apisix.core.log") local json = require("apisix.core.json") @@ -151,12 +152,7 @@ local function sync_data(self) if self.values then for _, val in ipairs(self.values) do - if val and val.clean_handlers then - for _, clean_handler in ipairs(val.clean_handlers) do - clean_handler(val) - end - val.clean_handlers = nil - end + config_util.fire_all_clean_handlers(val) end self.values = nil self.values_hash = nil diff --git a/apisix/core/config_yaml.lua b/apisix/core/config_yaml.lua index 24a5ff57aa6f..0c6564caf6bb 100644 --- a/apisix/core/config_yaml.lua +++ b/apisix/core/config_yaml.lua @@ -20,6 +20,7 @@ -- @module core.config_yaml local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") local yaml = require("tinyyaml") local log = require("apisix.core.log") local json = require("apisix.core.json") @@ -142,12 +143,7 @@ local function sync_data(self) if self.values then for _, item in ipairs(self.values) do - if item.clean_handlers then - for _, clean_handler in ipairs(item.clean_handlers) do - clean_handler(item) - end - item.clean_handlers = nil - end + config_util.fire_all_clean_handlers(item) end self.values = nil end diff --git a/apisix/core/etcd.lua b/apisix/core/etcd.lua index a57a5d0c86bf..d75a4191650f 100644 --- a/apisix/core/etcd.lua +++ b/apisix/core/etcd.lua @@ -21,33 +21,74 @@ local fetch_local_conf = require("apisix.core.config_local").local_conf local array_mt = require("apisix.core.json").array_mt +local v3_adapter = require("apisix.admin.v3_adapter") local etcd = require("resty.etcd") local clone_tab = require("table.clone") local health_check = require("resty.etcd.health_check") +local pl_path = require("pl.path") local ipairs = ipairs local setmetatable = setmetatable local string = string local tonumber = tonumber local ngx_config_prefix = ngx.config.prefix() +local ngx_socket_tcp = ngx.socket.tcp +local ngx_get_phase = ngx.get_phase local is_http = ngx.config.subsystem == "http" local _M = {} --- this function create the etcd client instance used in the Admin API +local function has_mtls_support() + local s = ngx_socket_tcp() + return s.tlshandshake ~= nil +end + + +local function _new(etcd_conf) + local prefix = etcd_conf.prefix + etcd_conf.http_host = etcd_conf.host + etcd_conf.host = nil + etcd_conf.prefix = nil + etcd_conf.protocol = "v3" + etcd_conf.api_prefix = "/v3" + + -- default to verify etcd cluster certificate + etcd_conf.ssl_verify = true + if etcd_conf.tls then + if etcd_conf.tls.verify == false then + etcd_conf.ssl_verify = false + end + + if etcd_conf.tls.cert then + etcd_conf.ssl_cert_path = etcd_conf.tls.cert + etcd_conf.ssl_key_path = etcd_conf.tls.key + end + + if etcd_conf.tls.sni then + etcd_conf.sni = etcd_conf.tls.sni + end + end + + local etcd_cli, err = etcd.new(etcd_conf) + if not etcd_cli then + return nil, nil, err + end + + return etcd_cli, prefix +end + + local function new() local local_conf, err = fetch_local_conf() if not local_conf then return nil, nil, err end - local etcd_conf + local etcd_conf = clone_tab(local_conf.etcd) local proxy_by_conf_server = false if local_conf.deployment then - etcd_conf = clone_tab(local_conf.deployment.etcd) - if local_conf.deployment.role == "traditional" -- we proxy the etcd requests in traditional mode so we can test the CP's behavior in -- daily development. However, a stream proxy can't be the CP. @@ -62,34 +103,33 @@ local function new() proxy_by_conf_server = true elseif local_conf.deployment.role == "control_plane" then - -- TODO: add the proxy conf in control_plane - proxy_by_conf_server = true - end - else - etcd_conf = clone_tab(local_conf.etcd) - end + local addr = local_conf.deployment.role_control_plane.conf_server.listen + etcd_conf.host = {"https://" .. addr} + etcd_conf.tls = { + verify = false, + } - local prefix = etcd_conf.prefix - etcd_conf.http_host = etcd_conf.host - etcd_conf.host = nil - etcd_conf.prefix = nil - etcd_conf.protocol = "v3" - etcd_conf.api_prefix = "/v3" + if has_mtls_support() and local_conf.deployment.certs.cert then + local cert = local_conf.deployment.certs.cert + local cert_key = local_conf.deployment.certs.cert_key + etcd_conf.tls.cert = cert + etcd_conf.tls.key = cert_key + end - -- default to verify etcd cluster certificate - etcd_conf.ssl_verify = true - if etcd_conf.tls then - if etcd_conf.tls.verify == false then - etcd_conf.ssl_verify = false - end + proxy_by_conf_server = true - if etcd_conf.tls.cert then - etcd_conf.ssl_cert_path = etcd_conf.tls.cert - etcd_conf.ssl_key_path = etcd_conf.tls.key - end + elseif local_conf.deployment.role == "data_plane" then + if has_mtls_support() and local_conf.deployment.certs.cert then + local cert = local_conf.deployment.certs.cert + local cert_key = local_conf.deployment.certs.cert_key - if etcd_conf.tls.sni then - etcd_conf.sni = etcd_conf.tls.sni + if not etcd_conf.tls then + etcd_conf.tls = {} + end + + etcd_conf.tls.cert = cert + etcd_conf.tls.key = cert_key + end end end @@ -106,16 +146,53 @@ local function new() }) end - local etcd_cli - etcd_cli, err = etcd.new(etcd_conf) - if not etcd_cli then + return _new(etcd_conf) +end +_M.new = new + + +--- +-- Create an etcd client which will connect to etcd without being proxyed by conf server. +-- This method is used in init_worker phase when the conf server is not ready. +-- +-- @function core.etcd.new_without_proxy +-- @treturn table|nil the etcd client, or nil if failed. +-- @treturn string|nil the configured prefix of etcd keys, or nil if failed. +-- @treturn nil|string the error message. +local function new_without_proxy() + local local_conf, err = fetch_local_conf() + if not local_conf then return nil, nil, err end - return etcd_cli, prefix + local etcd_conf = clone_tab(local_conf.etcd) + return _new(etcd_conf) end -_M.new = new +_M.new_without_proxy = new_without_proxy + +local function switch_proxy() + if ngx_get_phase() == "init" or ngx_get_phase() == "init_worker" then + return new_without_proxy() + end + + local etcd_cli, prefix, err = new() + if not etcd_cli or err then + return etcd_cli, prefix, err + end + + if not etcd_cli.unix_socket_proxy then + return etcd_cli, prefix, err + end + local sock_path = etcd_cli.unix_socket_proxy:sub(#"unix:" + 1) + local ok = pl_path.exists(sock_path) + if not ok then + return new_without_proxy() + end + + return etcd_cli, prefix, err +end +_M.switch_proxy = switch_proxy -- convert ETCD v3 entry to v2 one local function kvs_to_node(kvs) @@ -168,7 +245,7 @@ function _M.get_format(res, real_key, is_dir, formatter) return not_found(res) end - res.body.action = "get" + v3_adapter.to_v3(res.body, "get") if formatter then return formatter(res) @@ -196,6 +273,7 @@ function _M.get_format(res, real_key, is_dir, formatter) end res.body.kvs = nil + v3_adapter.to_v3_list(res.body) return res end @@ -229,7 +307,7 @@ end function _M.get(key, is_dir) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -248,7 +326,7 @@ end local function set(key, value, ttl) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -269,10 +347,14 @@ local function set(key, value, ttl) return nil, err end + if res.body.error then + return nil, res.body.error + end + res.headers["X-Etcd-Index"] = res.body.header.revision -- etcd v3 set would not return kv info - res.body.action = "set" + v3_adapter.to_v3(res.body, "set") res.body.node = {} res.body.node.key = prefix .. key res.body.node.value = value @@ -288,7 +370,7 @@ _M.set = set function _M.atomic_set(key, value, ttl, mod_revision) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -335,7 +417,7 @@ function _M.atomic_set(key, value, ttl, mod_revision) res.headers["X-Etcd-Index"] = res.body.header.revision -- etcd v3 set would not return kv info - res.body.action = "compareAndSwap" + v3_adapter.to_v3(res.body, "compareAndSwap") res.body.node = { key = key, value = value, @@ -347,7 +429,7 @@ end function _M.push(key, value, ttl) - local etcd_cli, _, err = new() + local etcd_cli, _, err = switch_proxy() if not etcd_cli then return nil, err end @@ -373,13 +455,13 @@ function _M.push(key, value, ttl) return nil, err end - res.body.action = "create" + v3_adapter.to_v3(res.body, "create") return res, nil end function _M.delete(key) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -397,7 +479,7 @@ function _M.delete(key) end -- etcd v3 set would not return kv info - res.body.action = "delete" + v3_adapter.to_v3(res.body, "delete") res.body.node = {} res.body.key = prefix .. key @@ -417,7 +499,7 @@ end -- -- etcdserver = "3.5.0" -- -- } function _M.server_version() - local etcd_cli, err = new() + local etcd_cli, _, err = switch_proxy() if not etcd_cli then return nil, err end @@ -427,7 +509,7 @@ end function _M.keepalive(id) - local etcd_cli, _, err = new() + local etcd_cli, _, err = switch_proxy() if not etcd_cli then return nil, err end diff --git a/apisix/core/grpc.lua b/apisix/core/grpc.lua new file mode 100644 index 000000000000..9cf7d747a667 --- /dev/null +++ b/apisix/core/grpc.lua @@ -0,0 +1,27 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local pcall = pcall + +local ok, mod = pcall(require, "resty.grpc") +if not ok then + -- vanilla OpenResty doesn't have grpc-client-nginx-module + return nil +end + +-- Reimport the `resty.grpc` as `core.grpc`. For the doc of the gRPC API, +-- see https://github.com/api7/grpc-client-nginx-module +return mod diff --git a/apisix/core/profile.lua b/apisix/core/profile.lua index b4c4cfaa908f..389a9d42ccec 100644 --- a/apisix/core/profile.lua +++ b/apisix/core/profile.lua @@ -21,7 +21,7 @@ local _M = { version = 0.1, - profile = os.getenv("APISIX_PROFILE"), + profile = os.getenv("APISIX_PROFILE") or "", apisix_home = (ngx and ngx.config.prefix()) or "" } @@ -40,7 +40,7 @@ local _M = { -- local local_conf_path = profile:yaml_path("config") function _M.yaml_path(self, file_name) local file_path = self.apisix_home .. "conf/" .. file_name - if self.profile and file_name ~= "config-default" then + if self.profile ~= "" and file_name ~= "config-default" then file_path = file_path .. "-" .. self.profile end diff --git a/apisix/core/pubsub.lua b/apisix/core/pubsub.lua index 798153a5060e..d6bcafad11f4 100644 --- a/apisix/core/pubsub.lua +++ b/apisix/core/pubsub.lua @@ -34,7 +34,7 @@ local mt = { __index = _M } local pb_state local function init_pb_state() -- clear current pb state - pb.state(nil) + local old_pb_state = pb.state(nil) -- set int64 rule for pubsub module pb.option("int64_as_string") @@ -42,19 +42,15 @@ local function init_pb_state() -- initialize protoc compiler protoc.reload() local pubsub_protoc = protoc.new() - - -- compile the protobuf file on initial load module - -- ensure that each worker is loaded once - if not pubsub_protoc.loaded["pubsub.proto"] then - pubsub_protoc:addpath("apisix/include/apisix/model") - local ok, err = pcall(pubsub_protoc.loadfile, pubsub_protoc, "pubsub.proto") - if not ok then - pubsub_protoc:reset() - return "failed to load pubsub protocol: " .. err - end + pubsub_protoc:addpath("apisix/include/apisix/model") + local ok, err = pcall(pubsub_protoc.loadfile, pubsub_protoc, "pubsub.proto") + if not ok then + pubsub_protoc:reset() + pb.state(old_pb_state) + return "failed to load pubsub protocol: " .. err end - pb_state = pb.state(nil) + pb_state = pb.state(old_pb_state) end diff --git a/apisix/core/table.lua b/apisix/core/table.lua index 2a6bb47ced61..4346863079cf 100644 --- a/apisix/core/table.lua +++ b/apisix/core/table.lua @@ -41,6 +41,7 @@ local _M = { sort = table.sort, clone = require("table.clone"), isarray = require("table.isarray"), + isempty = require("table.isempty"), } @@ -91,6 +92,10 @@ end -- local arr = {"a", "b", "c"} -- local idx = core.table.array_find(arr, "b") -- idx = 2 function _M.array_find(array, val) + if type(array) ~= "table" then + return nil + end + for i, v in ipairs(array) do if v == val then return i diff --git a/apisix/core/version.lua b/apisix/core/version.lua index 3b0e34726fe4..3a6865a3b56b 100644 --- a/apisix/core/version.lua +++ b/apisix/core/version.lua @@ -20,5 +20,5 @@ -- @module core.version return { - VERSION = "2.14.1" + VERSION = "2.99.0" } diff --git a/apisix/discovery/dns/init.lua b/apisix/discovery/dns/init.lua index 335993e4364c..a0408a9c2301 100644 --- a/apisix/discovery/dns/init.lua +++ b/apisix/discovery/dns/init.lua @@ -59,11 +59,15 @@ function _M.init_worker() local local_conf = config_local.local_conf() local servers = local_conf.discovery.dns.servers + local default_order = {"last", "SRV", "A", "AAAA", "CNAME"} + local order = core.table.try_read_attr(local_conf, "discovery", "dns", "order") + order = order or default_order + local opts = { hosts = {}, resolvConf = {}, nameservers = servers, - order = {"last", "SRV", "A", "AAAA", "CNAME"}, + order = order, } local client, err = core.dns_client.new(opts) diff --git a/apisix/discovery/dns/schema.lua b/apisix/discovery/dns/schema.lua index 94fc9c3cbff6..989938ab1fa3 100644 --- a/apisix/discovery/dns/schema.lua +++ b/apisix/discovery/dns/schema.lua @@ -24,6 +24,15 @@ return { type = "string", }, }, + order = { + type = "array", + minItems = 1, + maxItems = 5, + uniqueItems = true, + items = { + enum = {"last", "SRV", "A", "AAAA", "CNAME"} + }, + }, }, required = {"servers"} } diff --git a/apisix/discovery/kubernetes/informer_factory.lua b/apisix/discovery/kubernetes/informer_factory.lua index a03f27a5ac68..3dca064039fb 100644 --- a/apisix/discovery/kubernetes/informer_factory.lua +++ b/apisix/discovery/kubernetes/informer_factory.lua @@ -263,6 +263,9 @@ local function list_watch(informer, apiserver) local reason, message local httpc = http.new() + informer.continue = "" + informer.version = "" + informer.fetch_state = "connecting" core.log.info("begin to connect ", apiserver.host, ":", apiserver.port) diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua index a0491be45c59..d7258a55642b 100644 --- a/apisix/discovery/kubernetes/init.lua +++ b/apisix/discovery/kubernetes/init.lua @@ -24,15 +24,15 @@ local tostring = tostring local os = os local error = error local pcall = pcall +local setmetatable = setmetatable local process = require("ngx.process") local core = require("apisix.core") local util = require("apisix.cli.util") local local_conf = require("apisix.core.config_local").local_conf() local informer_factory = require("apisix.discovery.kubernetes.informer_factory") -local endpoint_dict -local default_weight +local ctx local endpoint_lrucache = core.lrucache.new({ ttl = 300, @@ -50,9 +50,9 @@ local function sort_nodes_cmp(left, right) end -local function on_endpoint_modified(informer, endpoint) - if informer.namespace_selector and - not informer:namespace_selector(endpoint.metadata.namespace) then +local function on_endpoint_modified(handle, endpoint) + if handle.namespace_selector and + not handle:namespace_selector(endpoint.metadata.namespace) then return end @@ -83,7 +83,7 @@ local function on_endpoint_modified(informer, endpoint) core.table.insert(nodes, { host = address.ip, port = port.port, - weight = default_weight + weight = handle.default_weight }) end end @@ -101,39 +101,39 @@ local function on_endpoint_modified(informer, endpoint) local endpoint_version = ngx.crc32_long(endpoint_content) local _, err - _, err = endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version) + _, err = handle.endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version) if err then core.log.error("set endpoint version into discovery DICT failed, ", err) return end - _, err = endpoint_dict:safe_set(endpoint_key, endpoint_content) + _, err = handle.endpoint_dict:safe_set(endpoint_key, endpoint_content) if err then core.log.error("set endpoint into discovery DICT failed, ", err) - endpoint_dict:delete(endpoint_key .. "#version") + handle.endpoint_dict:delete(endpoint_key .. "#version") end end -local function on_endpoint_deleted(informer, endpoint) - if informer.namespace_selector and - not informer:namespace_selector(endpoint.metadata.namespace) then +local function on_endpoint_deleted(handle, endpoint) + if handle.namespace_selector and + not handle:namespace_selector(endpoint.metadata.namespace) then return end core.log.debug(core.json.delay_encode(endpoint)) local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name - endpoint_dict:delete(endpoint_key .. "#version") - endpoint_dict:delete(endpoint_key) + handle.endpoint_dict:delete(endpoint_key .. "#version") + handle.endpoint_dict:delete(endpoint_key) end -local function pre_list(informer) - endpoint_dict:flush_all() +local function pre_list(handle) + handle.endpoint_dict:flush_all() end -local function post_list(informer) - endpoint_dict:flush_expired() +local function post_list(handle) + handle.endpoint_dict:flush_expired() end @@ -184,7 +184,7 @@ local function setup_namespace_selector(conf, informer) local not_match = conf.namespace_selector.not_match local m, err for _, v in ipairs(not_match) do - m, err = ngx.re.match(namespace, v, "j") + m, err = ngx.re.match(namespace, v, "jo") if m and m[0] == namespace then return false end @@ -196,24 +196,26 @@ local function setup_namespace_selector(conf, informer) end return end + + return end local function read_env(key) if #key > 3 then - local a, b = string.byte(key, 1, 2) - local c = string.byte(key, #key, #key) - -- '$', '{', '}' == 36,123,125 - if a == 36 and b == 123 and c == 125 then - local env = string.sub(key, 3, #key - 1) - local value = os.getenv(env) - if not value then - return nil, "not found environment variable " .. env + local first, second = string.byte(key, 1, 2) + if first == string.byte('$') and second == string.byte('{') then + local last = string.byte(key, #key) + if last == string.byte('}') then + local env = string.sub(key, 3, #key - 1) + local value = os.getenv(env) + if not value then + return nil, "not found environment variable " .. env + end + return value end - return value, nil end end - return key end @@ -272,6 +274,9 @@ local function get_apiserver(conf) return nil, "one of [client.token,client.token_file] should be set but none" end + -- remove possible extra whitespace + apiserver.token = apiserver.token:gsub("%s+", "") + if apiserver.schema == "https" and apiserver.token == "" then return nil, "apiserver.token should set to non-empty string when service.schema is https" end @@ -279,8 +284,7 @@ local function get_apiserver(conf) return apiserver end - -local function create_endpoint_lrucache(endpoint_key, endpoint_port) +local function create_endpoint_lrucache(endpoint_dict, endpoint_key, endpoint_port) local endpoint_content = endpoint_dict:get_stale(endpoint_key) if not endpoint_content then core.log.error("get empty endpoint content from discovery DIC, this should not happen ", @@ -298,60 +302,64 @@ local function create_endpoint_lrucache(endpoint_key, endpoint_port) return endpoint[endpoint_port] end + local _M = { version = "0.0.1" } -function _M.nodes(service_name) - local pattern = "^(.*):(.*)$" -- namespace/name:port_name - local match = ngx.re.match(service_name, pattern, "jo") - if not match then - core.log.info("get unexpected upstream service_name: ", service_name) - return nil - end - local endpoint_key = match[1] - local endpoint_port = match[2] - local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version") - if not endpoint_version then - core.log.info("get empty endpoint version from discovery DICT ", endpoint_key) - return nil - end +local function start_fetch(handle) + local timer_runner + timer_runner = function(premature) + if premature then + return + end - return endpoint_lrucache(service_name, endpoint_version, - create_endpoint_lrucache, endpoint_key, endpoint_port) + local ok, status = pcall(handle.list_watch, handle, handle.apiserver) + + local retry_interval = 0 + if not ok then + core.log.error("list_watch failed, kind: ", handle.kind, + ", reason: ", "RuntimeException", ", message : ", status) + retry_interval = 40 + elseif not status then + retry_interval = 40 + end + + ngx.timer.at(retry_interval, timer_runner) + end + ngx.timer.at(0, timer_runner) end -function _M.init_worker() - endpoint_dict = ngx.shared.kubernetes +local function single_mode_init(conf) + local endpoint_dict = ngx.shared.kubernetes if not endpoint_dict then - error("failed to get lua_shared_dict: kubernetes, please check your APISIX version") + error("failed to get lua_shared_dict: ngx.shared.kubernetes, " .. + "please check your APISIX version") end if process.type() ~= "privileged agent" then + ctx = endpoint_dict return end - local discovery_conf = local_conf.discovery.kubernetes - - default_weight = discovery_conf.default_weight - - local apiserver, err = get_apiserver(discovery_conf) + local apiserver, err = get_apiserver(conf) if err then error(err) return end - local endpoints_informer, err = informer_factory.new("", "v1", - "Endpoints", "endpoints", "") + local default_weight = conf.default_weight + + local endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "") if err then error(err) return end - setup_namespace_selector(discovery_conf, endpoints_informer) - setup_label_selector(discovery_conf, endpoints_informer) + setup_namespace_selector(conf, endpoints_informer) + setup_label_selector(conf, endpoints_informer) endpoints_informer.on_added = on_endpoint_modified endpoints_informer.on_modified = on_endpoint_modified @@ -359,27 +367,152 @@ function _M.init_worker() endpoints_informer.pre_list = pre_list endpoints_informer.post_list = post_list - local timer_runner - timer_runner = function(premature) - if premature then + ctx = setmetatable({ + endpoint_dict = endpoint_dict, + apiserver = apiserver, + default_weight = default_weight + }, { __index = endpoints_informer }) + + start_fetch(ctx) +end + + +local function single_mode_nodes(service_name) + local pattern = "^(.*):(.*)$" -- namespace/name:port_name + local match = ngx.re.match(service_name, pattern, "jo") + if not match then + core.log.error("get unexpected upstream service_name: ", service_name) + return nil + end + + local endpoint_dict = ctx + local endpoint_key = match[1] + local endpoint_port = match[2] + local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version") + if not endpoint_version then + core.log.info("get empty endpoint version from discovery DICT ", endpoint_key) + return nil + end + + return endpoint_lrucache(service_name, endpoint_version, + create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port) +end + + +local function multiple_mode_worker_init(confs) + for _, conf in ipairs(confs) do + + local id = conf.id + if ctx[id] then + error("duplicate id value") + end + + local endpoint_dict = ngx.shared["kubernetes-" .. id] + if not endpoint_dict then + error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. + "please check your APISIX version") + end + + ctx[id] = endpoint_dict + end +end + + +local function multiple_mode_init(confs) + ctx = core.table.new(#confs, 0) + + if process.type() ~= "privileged agent" then + multiple_mode_worker_init(confs) + return + end + + for _, conf in ipairs(confs) do + local id = conf.id + + if ctx[id] then + error("duplicate id value") + end + + local endpoint_dict = ngx.shared["kubernetes-" .. id] + if not endpoint_dict then + error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. + "please check your APISIX version") + end + + local apiserver, err = get_apiserver(conf) + if err then + error(err) return end - local ok, status = pcall(endpoints_informer.list_watch, endpoints_informer, apiserver) + local default_weight = conf.default_weight - local retry_interval = 0 - if not ok then - core.log.error("list_watch failed, kind: ", endpoints_informer.kind, - ", reason: ", "RuntimeException", ", message : ", status) - retry_interval = 40 - elseif not status then - retry_interval = 40 + local endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "") + if err then + error(err) + return end - ngx.timer.at(retry_interval, timer_runner) + setup_namespace_selector(conf, endpoints_informer) + setup_label_selector(conf, endpoints_informer) + + endpoints_informer.on_added = on_endpoint_modified + endpoints_informer.on_modified = on_endpoint_modified + endpoints_informer.on_deleted = on_endpoint_deleted + endpoints_informer.pre_list = pre_list + endpoints_informer.post_list = post_list + + ctx[id] = setmetatable({ + endpoint_dict = endpoint_dict, + apiserver = apiserver, + default_weight = default_weight + }, { __index = endpoints_informer }) end - ngx.timer.at(0, timer_runner) + for _, item in pairs(ctx) do + start_fetch(item) + end +end + + +local function multiple_mode_nodes(service_name) + local pattern = "^(.*)/(.*/.*):(.*)$" -- id/namespace/name:port_name + local match = ngx.re.match(service_name, pattern, "jo") + if not match then + core.log.error("get unexpected upstream service_name: ", service_name) + return nil + end + + local id = match[1] + local endpoint_dict = ctx[id] + if not endpoint_dict then + core.log.error("id not exist") + return nil + end + + local endpoint_key = match[2] + local endpoint_port = match[3] + local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version") + if not endpoint_version then + core.log.info("get empty endpoint version from discovery DICT ", endpoint_key) + return nil + end + + return endpoint_lrucache(service_name, endpoint_version, + create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port) +end + + +function _M.init_worker() + local discovery_conf = local_conf.discovery.kubernetes + core.log.info("kubernetes discovery conf: ", core.json.delay_encode(discovery_conf)) + if #discovery_conf == 0 then + _M.nodes = single_mode_nodes + single_mode_init(discovery_conf) + else + _M.nodes = multiple_mode_nodes + multiple_mode_init(discovery_conf) + end end return _M diff --git a/apisix/discovery/kubernetes/schema.lua b/apisix/discovery/kubernetes/schema.lua index 4888de63c484..170608f553b9 100644 --- a/apisix/discovery/kubernetes/schema.lua +++ b/apisix/discovery/kubernetes/schema.lua @@ -25,116 +25,186 @@ local port_patterns = { { pattern = [[^(([1-9]\d{0,3}|[1-5]\d{4}|6[0-4]\d{3}|65[0-4]\d{2}|655[0-2]\d|6553[0-5]))$]] }, } +local schema_schema = { + type = "string", + enum = { "http", "https" }, + default = "https", +} + +local token_patterns = { + { pattern = [[\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] }, + { pattern = [[^[A-Za-z0-9+\/._=-]{0,4096}$]] }, +} + +local token_schema = { + type = "string", + oneOf = token_patterns, +} + +local token_file_schema = { + type = "string", + pattern = [[^[^\:*?"<>|]*$]], + minLength = 1, + maxLength = 500, +} + local namespace_pattern = [[^[a-z0-9]([-a-z0-9_.]*[a-z0-9])?$]] + local namespace_regex_pattern = [[^[\x21-\x7e]*$]] -return { +local namespace_selector_schema = { type = "object", properties = { - service = { - type = "object", - properties = { - schema = { - type = "string", - enum = { "http", "https" }, - default = "https", - }, - host = { - type = "string", - default = "${KUBERNETES_SERVICE_HOST}", - oneOf = host_patterns, - }, - port = { - type = "string", - default = "${KUBERNETES_SERVICE_PORT}", - oneOf = port_patterns, - }, - }, - default = { - schema = "https", - host = "${KUBERNETES_SERVICE_HOST}", - port = "${KUBERNETES_SERVICE_PORT}", - } + equal = { + type = "string", + pattern = namespace_pattern, }, - client = { - type = "object", - properties = { - token = { - type = "string", - oneOf = { - { pattern = [[\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] }, - { pattern = [[^[A-Za-z0-9+\/._=-]{0,4096}$]] }, - }, - }, - token_file = { - type = "string", - pattern = [[^[^\:*?"<>|]*$]], - minLength = 1, - maxLength = 500, - } - }, - oneOf = { - { required = { "token" } }, - { required = { "token_file" } }, + not_equal = { + type = "string", + pattern = namespace_pattern, + }, + match = { + type = "array", + items = { + type = "string", + pattern = namespace_regex_pattern }, - default = { - token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" - } + minItems = 1 }, - default_weight = { - type = "integer", - default = 50, - minimum = 0, + not_match = { + type = "array", + items = { + type = "string", + pattern = namespace_regex_pattern + }, + minItems = 1 }, - namespace_selector = { + }, + oneOf = { + { required = {} }, + { required = { "equal" } }, + { required = { "not_equal" } }, + { required = { "match" } }, + { required = { "not_match" } } + }, +} + +local label_selector_schema = { + type = "string", +} + +local default_weight_schema = { + type = "integer", + default = 50, + minimum = 0, +} + +local shared_size_schema = { + type = "string", + pattern = [[^[1-9][0-9]?m$]], + default = "1m", +} + +return { + anyOf = { + { type = "object", properties = { - equal = { - type = "string", - pattern = namespace_pattern, - }, - not_equal = { - type = "string", - pattern = namespace_pattern, + service = { + type = "object", + properties = { + schema = schema_schema, + host = { + type = "string", + oneOf = host_patterns, + default = "${KUBERNETES_SERVICE_HOST}", + }, + port = { + type = "string", + oneOf = port_patterns, + default = "${KUBERNETES_SERVICE_PORT}", + }, + }, + default = { + schema = "https", + host = "${KUBERNETES_SERVICE_HOST}", + port = "${KUBERNETES_SERVICE_PORT}", + } }, - match = { - type = "array", - items = { - type = "string", - pattern = namespace_regex_pattern + client = { + type = "object", + properties = { + token = token_schema, + token_file = token_file_schema, }, - minItems = 1 + default = { + token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + }, + ["if"] = { + ["not"] = { + anyOf = { + { required = { "token" } }, + { required = { "token_file" } }, + } + } + }, + ["then"] = { + properties = { + token_file = { + default = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + } + } }, - not_match = { - type = "array", - items = { + namespace_selector = namespace_selector_schema, + label_selector = label_selector_schema, + default_weight = default_weight_schema, + shared_size = shared_size_schema, + }, + }, + { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + id = { type = "string", - pattern = namespace_regex_pattern + pattern = [[^[a-z0-9]{1,8}$]] + }, + service = { + type = "object", + properties = { + schema = schema_schema, + host = { + type = "string", + oneOf = host_patterns, + }, + port = { + type = "string", + oneOf = port_patterns, + }, + }, + required = { "host", "port" } + }, + client = { + type = "object", + properties = { + token = token_schema, + token_file = token_file_schema, + }, + oneOf = { + { required = { "token" } }, + { required = { "token_file" } }, + }, }, - minItems = 1 + namespace_selector = namespace_selector_schema, + label_selector = label_selector_schema, + default_weight = default_weight_schema, + shared_size = shared_size_schema, }, + required = { "id", "service", "client" } }, - oneOf = { - { required = { } }, - { required = { "equal" } }, - { required = { "not_equal" } }, - { required = { "match" } }, - { required = { "not_match" } } - }, - }, - label_selector = { - type = "string", } - }, - default = { - service = { - schema = "https", - host = "${KUBERNETES_SERVICE_HOST}", - port = "${KUBERNETES_SERVICE_PORT}", - }, - client = { - token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" - }, - default_weight = 50 } } diff --git a/apisix/init.lua b/apisix/init.lua index 25d9d5aa2bfb..b2e0b706b5f2 100644 --- a/apisix/init.lua +++ b/apisix/init.lua @@ -41,7 +41,6 @@ local apisix_ssl = require("apisix.ssl") local upstream_util = require("apisix.utils.upstream") local xrpc = require("apisix.stream.xrpc") local ctxdump = require("resty.ctxdump") -local ngx_balancer = require("ngx.balancer") local debug = require("apisix.debug") local pubsub_kafka = require("apisix.pubsub.kafka") local ngx = ngx @@ -110,6 +109,10 @@ function _M.http_init_worker() -- for testing only core.log.info("random test in [1, 10000]: ", math.random(1, 10000)) + -- Because go's scheduler doesn't work after fork, we have to load the gRPC module + -- in each worker. + core.grpc = require("apisix.core.grpc") + local we = require("resty.worker.events") local ok, err = we.configure({shm = "worker-events", interval = 0.1}) if not ok then @@ -153,6 +156,9 @@ end function _M.http_exit_worker() + -- TODO: we can support stream plugin later - currently there is not `destory` method + -- in stream plugins + plugin.exit_worker() require("apisix.plugins.ext-plugin.init").exit_worker() end @@ -219,10 +225,7 @@ local function set_upstream_host(api_ctx, picked_server) return end - local nodes_count = up_conf.nodes and #up_conf.nodes or 0 - if nodes_count == 1 or ngx_balancer.recreate_request then - api_ctx.var.upstream_host = picked_server.upstream_host - end + api_ctx.var.upstream_host = picked_server.upstream_host end @@ -421,6 +424,10 @@ function _M.http_access_phase() api_ctx.route_id = route.value.id api_ctx.route_name = route.value.name + local ref = ctxdump.stash_ngx_ctx() + core.log.info("stash ngx ctx: ", ref) + ngx_var.ctx_ref = ref + -- run global rule plugin.run_global_rules(api_ctx, router.global_rules, nil) @@ -524,10 +531,6 @@ function _M.http_access_phase() core.log.info("enabled websocket for route: ", route.value.id) end - if route.value.service_protocol == "grpc" then - api_ctx.upstream_scheme = "grpc" - end - -- load balancer is not required by kafka upstream, so the upstream -- node selection process is intercepted and left to kafka to -- handle on its own @@ -554,10 +557,6 @@ function _M.http_access_phase() -- run the before_proxy method in access phase first to avoid always reinit request common_phase("before_proxy") - local ref = ctxdump.stash_ngx_ctx() - core.log.info("stash ngx ctx: ", ref) - ngx_var.ctx_ref = ref - local up_scheme = api_ctx.upstream_scheme if up_scheme == "grpcs" or up_scheme == "grpc" then return ngx.exec("@grpc_pass") @@ -767,7 +766,7 @@ end local function cors_admin() local_conf = core.config.local_conf() - if local_conf.apisix and not local_conf.apisix.enable_admin_cors then + if not core.table.try_read_attr(local_conf, "deployment", "admin", "enable_admin_cors") then return end diff --git a/apisix/plugin.lua b/apisix/plugin.lua index d8f4d538c83d..7c26ac4d7c50 100644 --- a/apisix/plugin.lua +++ b/apisix/plugin.lua @@ -19,6 +19,7 @@ local core = require("apisix.core") local config_util = require("apisix.core.config_util") local enable_debug = require("apisix.debug").enable_debug local wasm = require("apisix.wasm") +local expr = require("resty.expr.v1") local ngx = ngx local crc32 = ngx.crc32_short local ngx_exit = ngx.exit @@ -40,6 +41,9 @@ local stream_local_plugins_hash = core.table.new(0, 32) local merged_route = core.lrucache.new({ ttl = 300, count = 512 }) +local expr_lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) local local_conf local check_plugin_metadata @@ -72,6 +76,21 @@ local function custom_sort_plugin(l, r) return l._meta.priority > r._meta.priority end +local function check_disable(plugin_conf) + if not plugin_conf then + return nil + end + + if not plugin_conf._meta then + return nil + end + + if type(plugin_conf._meta) ~= "table" then + return nil + end + + return plugin_conf._meta.disable +end local PLUGIN_TYPE_HTTP = 1 local PLUGIN_TYPE_STREAM = 2 @@ -139,14 +158,6 @@ local function load_plugin(name, plugins_list, plugin_type) local plugin_injected_schema = core.schema.plugin_injected_schema if plugin.schema['$comment'] ~= plugin_injected_schema['$comment'] then - if properties.disable then - core.log.error("invalid plugin [", name, - "]: found forbidden 'disable' field in the schema") - return - end - - properties.disable = plugin_injected_schema.disable - if properties._meta then core.log.error("invalid plugin [", name, "]: found forbidden '_meta' field in the schema") @@ -157,7 +168,6 @@ local function load_plugin(name, plugins_list, plugin_type) -- new injected fields should be added under `_meta` -- 1. so we won't break user's code when adding any new injected fields -- 2. the semantics is clear, especially in the doc and in the caller side - -- TODO: move the `disable` to `_meta` too plugin.schema['$comment'] = plugin_injected_schema['$comment'] end @@ -272,7 +282,7 @@ local function load_stream(plugin_names) end -function _M.load(config) +local function get_plugin_names(config) local http_plugin_names local stream_plugin_names @@ -294,7 +304,7 @@ function _M.load(config) local plugins_conf = config.value -- plugins_conf can be nil when another instance writes into etcd key "/apisix/plugins/" if not plugins_conf then - return local_plugins + return true end for _, conf in ipairs(plugins_conf) do @@ -306,6 +316,16 @@ function _M.load(config) end end + return false, http_plugin_names, stream_plugin_names +end + + +function _M.load(config) + local ignored, http_plugin_names, stream_plugin_names = get_plugin_names(config) + if ignored then + return local_plugins + end + if ngx.config.subsystem == "http" then if not http_plugin_names then core.log.error("failed to read plugin list from local file") @@ -336,6 +356,24 @@ function _M.load(config) end +function _M.exit_worker() + for name, plugin in pairs(local_plugins_hash) do + local ty = PLUGIN_TYPE_HTTP + if plugin.type == "wasm" then + ty = PLUGIN_TYPE_HTTP_WASM + end + unload_plugin(name, ty) + end + + -- we need to load stream plugin so that we can check their schemas in + -- Admin API. Maybe we can avoid calling `load` in this case? So that + -- we don't need to call `destroy` too + for name in pairs(stream_local_plugins_hash) do + unload_plugin(name, PLUGIN_TYPE_STREAM) + end +end + + local function trace_plugins_info_for_debug(ctx, plugins) if not enable_debug() then return @@ -371,6 +409,32 @@ local function trace_plugins_info_for_debug(ctx, plugins) end end +local function meta_filter(ctx, plugin_name, plugin_conf) + local filter = plugin_conf._meta and plugin_conf._meta.filter + if not filter then + return true + end + + local ex, ok, err + if ctx then + ex, err = expr_lrucache(plugin_name .. ctx.conf_type .. ctx.conf_id, + ctx.conf_version, expr.new, filter) + else + ex, err = expr.new(filter) + end + if not ex then + core.log.warn("failed to get the 'vars' expression: ", err , + " plugin_name: ", plugin_name) + return true + end + ok, err = ex:eval() + if err then + core.log.warn("failed to run the 'vars' expression: ", err, + " plugin_name: ", plugin_name) + return true + end + return ok +end function _M.filter(ctx, conf, plugins, route_conf, phase) local user_plugin_conf = conf.value.plugins @@ -389,10 +453,17 @@ function _M.filter(ctx, conf, plugins, route_conf, phase) local name = plugin_obj.name local plugin_conf = user_plugin_conf[name] - if type(plugin_conf) == "table" and not plugin_conf.disable then + if type(plugin_conf) ~= "table" then + goto continue + end + + local matched = meta_filter(ctx, name, plugin_conf) + local disable = check_disable(plugin_conf) + if not disable and matched then if plugin_obj.run_policy == "prefer_route" and route_plugin_conf ~= nil then local plugin_conf_in_route = route_plugin_conf[name] - if plugin_conf_in_route and not plugin_conf_in_route.disable then + local disable_in_route = check_disable(plugin_conf_in_route) + if plugin_conf_in_route and not disable_in_route then goto continue end end @@ -402,9 +473,9 @@ function _M.filter(ctx, conf, plugins, route_conf, phase) end core.table.insert(plugins, plugin_obj) core.table.insert(plugins, plugin_conf) - - ::continue:: end + + ::continue:: end trace_plugins_info_for_debug(ctx, plugins) @@ -470,7 +541,8 @@ function _M.stream_filter(user_route, plugins) local name = plugin_obj.name local plugin_conf = user_plugin_conf[name] - if type(plugin_conf) == "table" and not plugin_conf.disable then + local disable = check_disable(plugin_conf) + if type(plugin_conf) == "table" and not disable then core.table.insert(plugins, plugin_obj) core.table.insert(plugins, plugin_conf) end @@ -583,7 +655,8 @@ function _M.merge_consumer_route(route_conf, consumer_conf, api_ctx) core.log.info("route conf: ", core.json.delay_encode(route_conf)) core.log.info("consumer conf: ", core.json.delay_encode(consumer_conf)) - local flag = tostring(route_conf) .. tostring(consumer_conf) + local flag = route_conf.value.id .. "#" .. route_conf.modifiedIndex + .. "#" .. consumer_conf.id .. "#" .. consumer_conf.modifiedIndex local new_conf = merged_route(flag, nil, merge_consumer_route, route_conf, consumer_conf) @@ -620,16 +693,21 @@ end function _M.init_worker() - _M.load() + local _, http_plugin_names, stream_plugin_names = get_plugin_names() -- some plugins need to be initialized in init* phases - if is_http and local_plugins_hash["prometheus"] then - local prometheus_enabled_in_stream = stream_local_plugins_hash["prometheus"] + if is_http and core.table.array_find(http_plugin_names, "prometheus") then + local prometheus_enabled_in_stream = + core.table.array_find(stream_plugin_names, "prometheus") require("apisix.plugins.prometheus.exporter").http_init(prometheus_enabled_in_stream) - elseif not is_http and stream_local_plugins_hash["prometheus"] then + elseif not is_http and core.table.array_find(stream_plugin_names, "prometheus") then require("apisix.plugins.prometheus.exporter").stream_init() end + -- someone's plugin needs to be initialized after prometheus + -- see https://github.com/apache/apisix/issues/3286 + _M.load() + if local_conf and not local_conf.apisix.enable_admin then init_plugins_syncer() end @@ -711,16 +789,18 @@ local function check_single_plugin_schema(name, plugin_conf, schema_type, skip_d end if plugin_obj.check_schema then - local disable = plugin_conf.disable - plugin_conf.disable = nil - local ok, err = plugin_obj.check_schema(plugin_conf, schema_type) if not ok then return false, "failed to check the configuration of plugin " .. name .. " err: " .. err end - plugin_conf.disable = disable + if plugin_conf._meta and plugin_conf._meta.filter then + ok, err = expr.new(plugin_conf._meta.filter) + if not ok then + return nil, "failed to validate the 'vars' expression: " .. err + end + end end return true @@ -768,16 +848,11 @@ local function stream_check_schema(plugins_conf, schema_type, skip_disabled_plug end if plugin_obj.check_schema then - local disable = plugin_conf.disable - plugin_conf.disable = nil - local ok, err = plugin_obj.check_schema(plugin_conf, schema_type) if not ok then return false, "failed to check the configuration of " .. "stream plugin [" .. name .. "]: " .. err end - - plugin_conf.disable = disable end ::CONTINUE:: @@ -824,13 +899,17 @@ function _M.run_plugin(phase, plugins, api_ctx) and phase ~= "delayed_body_filter" then for i = 1, #plugins, 2 do - if phase == "rewrite_in_consumer" and plugins[i + 1]._from_consumer - and plugins[i].type ~= "auth"then - phase = "rewrite" + local phase_func + if phase == "rewrite_in_consumer" then + if plugins[i].type == "auth" then + plugins[i + 1]._skip_rewrite_in_consumer = true + end + phase_func = plugins[i]["rewrite"] + else + phase_func = plugins[i][phase] end - local phase_func = plugins[i][phase] - if phase == "rewrite" and plugins[i + 1]._skip_rewrite_in_consumer then + if phase == "rewrite_in_consumer" and plugins[i + 1]._skip_rewrite_in_consumer then goto CONTINUE end diff --git a/apisix/plugin_config.lua b/apisix/plugin_config.lua index 903ea6ec1913..cc5a6ff38456 100644 --- a/apisix/plugin_config.lua +++ b/apisix/plugin_config.lua @@ -65,7 +65,9 @@ function _M.merge(route_conf, plugin_config) route_conf.value.plugins = core.table.clone(route_conf.value.plugins) for name, value in pairs(plugin_config.value.plugins) do - route_conf.value.plugins[name] = value + if not route_conf.value.plugins[name] then + route_conf.value.plugins[name] = value + end end route_conf.update_count = route_conf.update_count + 1 diff --git a/apisix/plugins/authz-keycloak.lua b/apisix/plugins/authz-keycloak.lua index 50f718d6b933..336fb69b17ea 100644 --- a/apisix/plugins/authz-keycloak.lua +++ b/apisix/plugins/authz-keycloak.lua @@ -31,8 +31,6 @@ local schema = { token_endpoint = {type = "string", minLength = 1, maxLength = 4096}, resource_registration_endpoint = {type = "string", minLength = 1, maxLength = 4096}, client_id = {type = "string", minLength = 1, maxLength = 100}, - audience = {type = "string", minLength = 1, maxLength = 100, - description = "Deprecated, use `client_id` instead."}, client_secret = {type = "string", minLength = 1, maxLength = 100}, grant_type = { type = "string", @@ -73,6 +71,7 @@ local schema = { maxLength = 4096 }, }, + required = {"client_id"}, allOf = { -- Require discovery or token endpoint. { @@ -81,13 +80,6 @@ local schema = { {required = {"token_endpoint"}} } }, - -- Require client_id or audience. - { - anyOf = { - {required = {"client_id"}}, - {required = {"audience"}} - } - }, -- If lazy_load_paths is true, require discovery or resource registration endpoint. { anyOf = { @@ -120,28 +112,10 @@ local _M = { function _M.check_schema(conf) - -- Check for deprecated audience attribute and emit warnings if used. - if conf.audience then - log.warn("Plugin attribute `audience` is deprecated, use `client_id` instead.") - if conf.client_id then - log.warn("Ignoring `audience` attribute in favor of `client_id`.") - end - end return core.schema.check(schema, conf) end --- Return the configured client ID parameter. -local function authz_keycloak_get_client_id(conf) - if conf.client_id then - -- Prefer client_id, if given. - return conf.client_id - end - - return conf.audience -end - - -- Some auxiliary functions below heavily inspired by the excellent -- lua-resty-openidc module; see https://github.com/zmartzone/lua-resty-openidc @@ -339,7 +313,7 @@ end -- Ensure a valid service account access token is available for the configured client. local function authz_keycloak_ensure_sa_access_token(conf) - local client_id = authz_keycloak_get_client_id(conf) + local client_id = conf.client_id local ttl = conf.cache_ttl_seconds local token_endpoint = authz_keycloak_get_token_endpoint(conf) @@ -648,7 +622,7 @@ local function evaluate_permissions(conf, ctx, token) method = "POST", body = ngx.encode_args({ grant_type = conf.grant_type, - audience = authz_keycloak_get_client_id(conf), + audience = conf.client_id, response_mode = "decision", permission = permission }), @@ -732,7 +706,7 @@ local function generate_token_using_password_grant(conf,ctx) return 422, {message = err} end - local client_id = authz_keycloak_get_client_id(conf) + local client_id = conf.client_id local token_endpoint = authz_keycloak_get_token_endpoint(conf) diff --git a/apisix/plugins/cas-auth.lua b/apisix/plugins/cas-auth.lua new file mode 100644 index 000000000000..2a3e5049c446 --- /dev/null +++ b/apisix/plugins/cas-auth.lua @@ -0,0 +1,199 @@ +-- +---- Licensed to the Apache Software Foundation (ASF) under one or more +---- contributor license agreements. See the NOTICE file distributed with +---- this work for additional information regarding copyright ownership. +---- The ASF licenses this file to You under the Apache License, Version 2.0 +---- (the "License"); you may not use this file except in compliance with +---- the License. You may obtain a copy of the License at +---- +---- http://www.apache.org/licenses/LICENSE-2.0 +---- +---- Unless required by applicable law or agreed to in writing, software +---- distributed under the License is distributed on an "AS IS" BASIS, +---- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +---- See the License for the specific language governing permissions and +---- limitations under the License. +---- +local core = require("apisix.core") +local http = require("resty.http") +local ngx = ngx +local ngx_re_match = ngx.re.match + +local CAS_REQUEST_URI = "CAS_REQUEST_URI" +local COOKIE_NAME = "CAS_SESSION" +local COOKIE_PARAMS = "; Path=/; HttpOnly" +local SESSION_LIFETIME = 3600 +local STORE_NAME = "cas_sessions" + +local store = ngx.shared[STORE_NAME] + + +local plugin_name = "cas-auth" +local schema = { + type = "object", + properties = { + idp_uri = {type = "string"}, + cas_callback_uri = {type = "string"}, + logout_uri = {type = "string"}, + }, + required = { + "idp_uri", "cas_callback_uri", "logout_uri" + } +} + +local _M = { + version = 0.1, + priority = 2597, + name = plugin_name, + schema = schema +} + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + +local function uri_without_ticket(conf, ctx) + return ctx.var.scheme .. "://" .. ctx.var.host .. ":" .. + ctx.var.server_port .. conf.cas_callback_uri +end + +local function get_session_id(ctx) + return ctx.var["cookie_" .. COOKIE_NAME] +end + +local function set_our_cookie(name, val) + core.response.add_header("Set-Cookie", name .. "=" .. val .. COOKIE_PARAMS) +end + +local function first_access(conf, ctx) + local login_uri = conf.idp_uri .. "/login?" .. + ngx.encode_args({ service = uri_without_ticket(conf, ctx) }) + core.log.info("first access: ", login_uri, + ", cookie: ", ctx.var.http_cookie, ", request_uri: ", ctx.var.request_uri) + set_our_cookie(CAS_REQUEST_URI, ctx.var.request_uri) + core.response.set_header("Location", login_uri) + return ngx.HTTP_MOVED_TEMPORARILY +end + +local function with_session_id(conf, ctx, session_id) + -- does the cookie exist in our store? + local user = store:get(session_id); + core.log.info("ticket=", session_id, ", user=", user) + if user == nil then + set_our_cookie(COOKIE_NAME, "deleted; Max-Age=0") + return first_access(conf, ctx) + else + -- refresh the TTL + store:set(session_id, user, SESSION_LIFETIME) + end +end + +local function set_store_and_cookie(session_id, user) + -- place cookie into cookie store + local success, err, forcible = store:add(session_id, user, SESSION_LIFETIME) + if success then + if forcible then + core.log.info("CAS cookie store is out of memory") + end + set_our_cookie(COOKIE_NAME, session_id) + else + if err == "no memory" then + core.log.emerg("CAS cookie store is out of memory") + elseif err == "exists" then + core.log.error("Same CAS ticket validated twice, this should never happen!") + else + core.log.error("CAS cookie store: ", err) + end + end + return success +end + +local function validate(conf, ctx, ticket) + -- send a request to CAS to validate the ticket + local httpc = http.new() + local res, err = httpc:request_uri(conf.idp_uri .. + "/serviceValidate", + { query = { ticket = ticket, service = uri_without_ticket(conf, ctx) } }) + + if res and res.status == ngx.HTTP_OK and res.body ~= nil then + if core.string.find(res.body, "") then + local m = ngx_re_match(res.body, "(.*?)", "jo"); + if m then + return m[1] + end + else + core.log.info("CAS serviceValidate failed: ", res.body) + end + else + core.log.error("validate ticket failed: status=", (res and res.status), + ", has_body=", (res and res.body ~= nil or false), ", err=", err) + end + return nil +end + +local function validate_with_cas(conf, ctx, ticket) + local user = validate(conf, ctx, ticket) + if user and set_store_and_cookie(ticket, user) then + local request_uri = ctx.var["cookie_" .. CAS_REQUEST_URI] + set_our_cookie(CAS_REQUEST_URI, "deleted; Max-Age=0") + core.log.info("ticket: ", ticket, + ", cookie: ", ctx.var.http_cookie, ", request_uri: ", request_uri, ", user=", user) + core.response.set_header("Location", request_uri) + return ngx.HTTP_MOVED_TEMPORARILY + else + return ngx.HTTP_UNAUTHORIZED, {message = "invalid ticket"} + end +end + +local function logout(conf, ctx) + local session_id = get_session_id(ctx) + if session_id == nil then + return ngx.HTTP_UNAUTHORIZED + end + + core.log.info("logout: ticket=", session_id, ", cookie=", ctx.var.http_cookie) + store:delete(session_id) + set_our_cookie(COOKIE_NAME, "deleted; Max-Age=0") + + core.response.set_header("Location", conf.idp_uri .. "/logout") + return ngx.HTTP_MOVED_TEMPORARILY +end + +function _M.access(conf, ctx) + local method = core.request.get_method() + local uri = ctx.var.uri + + if method == "GET" and uri == conf.logout_uri then + return logout(conf, ctx) + end + + if method == "POST" and uri == conf.cas_callback_uri then + local data = core.request.get_body() + local ticket = data:match("(.*)") + if ticket == nil then + return ngx.HTTP_BAD_REQUEST, + {message = "invalid logout request from IdP, no ticket"} + end + core.log.info("Back-channel logout (SLO) from IdP: LogoutRequest: ", data) + local session_id = ticket + local user = store:get(session_id); + if user then + store:delete(session_id) + core.log.info("SLO: user=", user, ", tocket=", ticket) + end + else + local session_id = get_session_id(ctx) + if session_id ~= nil then + return with_session_id(conf, ctx, session_id) + end + + local ticket = ctx.var.arg_ticket + if ticket ~= nil and uri == conf.cas_callback_uri then + return validate_with_cas(conf, ctx, ticket) + else + return first_access(conf, ctx) + end + end +end + +return _M diff --git a/apisix/plugins/clickhouse-logger.lua b/apisix/plugins/clickhouse-logger.lua index f7b734645334..026f0cfa93da 100644 --- a/apisix/plugins/clickhouse-logger.lua +++ b/apisix/plugins/clickhouse-logger.lua @@ -21,6 +21,7 @@ local core = require("apisix.core") local http = require("resty.http") local url = require("net.url") local plugin = require("apisix.plugin") +local math_random = math.random local ngx = ngx local tostring = tostring @@ -31,7 +32,9 @@ local batch_processor_manager = bp_manager_mod.new(plugin_name) local schema = { type = "object", properties = { + -- deprecated, use "endpoint_addrs" instead endpoint_addr = core.schema.uri_def, + endpoint_addrs = {items = core.schema.uri_def, type = "array", minItems = 1}, user = {type = "string", default = ""}, password = {type = "string", default = ""}, database = {type = "string", default = ""}, @@ -40,7 +43,10 @@ local schema = { name = {type = "string", default = "clickhouse logger"}, ssl_verify = {type = "boolean", default = true}, }, - required = {"endpoint_addr", "user", "password", "database", "logtable"} + oneOf = { + {required = {"endpoint_addr", "user", "password", "database", "logtable"}}, + {required = {"endpoint_addrs", "user", "password", "database", "logtable"}} + }, } @@ -72,11 +78,17 @@ end local function send_http_data(conf, log_message) local err_msg local res = true - local url_decoded = url.parse(conf.endpoint_addr) + local selected_endpoint_addr + if conf.endpoint_addr then + selected_endpoint_addr = conf.endpoint_addr + else + selected_endpoint_addr = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] + end + local url_decoded = url.parse(selected_endpoint_addr) local host = url_decoded.host local port = url_decoded.port - core.log.info("sending a batch logs to ", conf.endpoint_addr) + core.log.info("sending a batch logs to ", selected_endpoint_addr) if not port then if url_decoded.scheme == "https" then diff --git a/apisix/plugins/elasticsearch-logger.lua b/apisix/plugins/elasticsearch-logger.lua new file mode 100644 index 000000000000..105cbe4d98bb --- /dev/null +++ b/apisix/plugins/elasticsearch-logger.lua @@ -0,0 +1,176 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local http = require("resty.http") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local plugin = require("apisix.plugin") + +local ngx = ngx +local str_format = core.string.format + +local plugin_name = "elasticsearch-logger" +local batch_processor_manager = bp_manager_mod.new(plugin_name) + + +local schema = { + type = "object", + properties = { + endpoint_addr = { + type = "string", + pattern = "[^/]$", + }, + field = { + type = "object", + properties = { + index = { type = "string"}, + type = { type = "string"} + }, + required = {"index"} + }, + auth = { + type = "object", + properties = { + username = { + type = "string", + minLength = 1 + }, + password = { + type = "string", + minLength = 1 + }, + }, + required = {"username", "password"}, + }, + timeout = { + type = "integer", + minimum = 1, + default = 10 + }, + ssl_verify = { + type = "boolean", + default = true + } + }, + required = { "endpoint_addr", "field" }, +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = log_util.metadata_schema_log_format, + }, +} + + +local _M = { + version = 0.1, + priority = 413, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end + + +local function get_logger_entry(conf, ctx) + local entry + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + if metadata and metadata.value.log_format + and core.table.nkeys(metadata.value.log_format) > 0 + then + entry = log_util.get_custom_format_log(ctx, metadata.value.log_format) + core.log.info("custom log format entry: ", core.json.delay_encode(entry)) + else + entry = log_util.get_full_log(ngx, conf) + core.log.info("full log entry: ", core.json.delay_encode(entry)) + end + + return core.json.encode({ + create = { + _index = conf.field.index, + _type = conf.field.type + } + }) .. "\n" .. + core.json.encode(entry) .. "\n" +end + + +local function send_to_elasticsearch(conf, entries) + local httpc, err = http.new() + if not httpc then + return false, str_format("create http error: %s", err) + end + + local uri = conf.endpoint_addr .. "/_bulk" + local body = core.table.concat(entries, "") + local headers = {["Content-Type"] = "application/x-ndjson"} + if conf.auth then + local authorization = "Basic " .. ngx.encode_base64( + conf.auth.username .. ":" .. conf.auth.password + ) + headers["Authorization"] = authorization + end + + core.log.info("uri: ", uri, ", body: ", body) + + httpc:set_timeout(conf.timeout * 1000) + local resp, err = httpc:request_uri(uri, { + ssl_verify = conf.ssl_verify, + method = "POST", + headers = headers, + body = body + }) + if not resp then + return false, err + end + + if resp.status ~= 200 then + return false, str_format("elasticsearch server returned status: %d, body: %s", + resp.status, resp.body or "") + end + + return true +end + + +function _M.log(conf, ctx) + local entry = get_logger_entry(conf, ctx) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + return send_to_elasticsearch(conf, entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/apisix/plugins/error-log-logger.lua b/apisix/plugins/error-log-logger.lua index f4f28b472153..5aa7a7418cfd 100644 --- a/apisix/plugins/error-log-logger.lua +++ b/apisix/plugins/error-log-logger.lua @@ -66,12 +66,6 @@ local metadata_schema = { }, required = {"endpoint_addr", "user", "password", "database", "logtable"} }, - host = {schema_def.host_def, description = "Deprecated, use `tcp.host` instead."}, - port = {type = "integer", minimum = 0, description = "Deprecated, use `tcp.port` instead."}, - tls = {type = "boolean", default = false, - description = "Deprecated, use `tcp.tls` instead."}, - tls_server_name = {type = "string", - description = "Deprecated, use `tcp.tls_server_name` instead."}, name = {type = "string", default = plugin_name}, level = {type = "string", default = "WARN", enum = {"STDERR", "EMERG", "ALERT", "CRIT", "ERR", "ERROR", "WARN", "NOTICE", "INFO", "DEBUG"}}, diff --git a/apisix/plugins/ext-plugin-post-resp.lua b/apisix/plugins/ext-plugin-post-resp.lua index e6156804c750..40d3ca450c68 100644 --- a/apisix/plugins/ext-plugin-post-resp.lua +++ b/apisix/plugins/ext-plugin-post-resp.lua @@ -16,6 +16,7 @@ -- local core = require("apisix.core") local ext = require("apisix.plugins.ext-plugin.init") +local helper = require("apisix.plugins.ext-plugin.helper") local constants = require("apisix.constants") local http = require("resty.http") @@ -100,36 +101,46 @@ local function get_response(ctx, http_obj) return res, err end +local function send_chunk(chunk) + if not chunk then + return nil + end -local function send_response(res, code) - ngx.status = code or res.status + local ok, print_err = ngx_print(chunk) + if not ok then + return "output response failed: ".. (print_err or "") + end + local ok, flush_err = ngx_flush(true) + if not ok then + core.log.warn("flush response failed: ", flush_err) + end - local reader = res.body_reader - repeat - local chunk, ok, read_err, print_err, flush_err - -- TODO: HEAD or 304 - chunk, read_err = reader() - if read_err then - return "read response failed: ".. (read_err or "") - end + return nil +end - if chunk then - ok, print_err = ngx_print(chunk) - if not ok then - return "output response failed: ".. (print_err or "") - end - ok, flush_err = ngx_flush(true) - if not ok then - core.log.warn("flush response failed: ", flush_err) +-- TODO: response body is empty (304 or HEAD) +-- If the upstream returns 304 or the request method is HEAD, +-- there is no response body. In this case, +-- we need to send a response to the client in the plugin, +-- instead of continuing to execute the subsequent plugin. +local function send_response(ctx, res, code) + ngx.status = code or res.status + + local chunks = ctx.runner_ext_response_body + if chunks then + for i=1, #chunks do + local err = send_chunk(chunks[i]) + if err then + return err end end - until not chunk + return + end - return nil + return helper.response_reader(res.body_reader, send_chunk) end - function _M.check_schema(conf) return core.schema.check(_M.schema, conf) end @@ -157,7 +168,7 @@ function _M.before_proxy(conf, ctx) core.log.info("ext-plugin will send response") -- send origin response, status maybe changed. - err = send_response(res, code) + err = send_response(ctx, res, code) close(http_obj) if err then diff --git a/apisix/plugins/ext-plugin/helper.lua b/apisix/plugins/ext-plugin/helper.lua index 4d141a7f067a..7750bb54adc2 100644 --- a/apisix/plugins/ext-plugin/helper.lua +++ b/apisix/plugins/ext-plugin/helper.lua @@ -56,4 +56,26 @@ function _M.get_conf_token_cache_time() end +function _M.response_reader(reader, callback, ...) + if not reader then + return "get response reader failed" + end + + repeat + local chunk, read_err, cb_err + chunk, read_err = reader() + if read_err then + return "read response failed: ".. (read_err or "") + end + + if chunk then + cb_err = callback(chunk, ...) + if cb_err then + return cb_err + end + end + until not chunk +end + + return _M diff --git a/apisix/plugins/ext-plugin/init.lua b/apisix/plugins/ext-plugin/init.lua index b575ba45f435..0f9e0de14e5f 100644 --- a/apisix/plugins/ext-plugin/init.lua +++ b/apisix/plugins/ext-plugin/init.lua @@ -31,6 +31,7 @@ local extra_info_req = require("A6.ExtraInfo.Req") local extra_info_var = require("A6.ExtraInfo.Var") local extra_info_resp = require("A6.ExtraInfo.Resp") local extra_info_reqbody = require("A6.ExtraInfo.ReqBody") +local extra_info_respbody = require("A6.ExtraInfo.RespBody") local text_entry = require("A6.TextEntry") local err_resp = require("A6.Err.Resp") local err_code = require("A6.Err.Code") @@ -304,7 +305,31 @@ local function handle_extra_info(ctx, input) if err then core.log.error("failed to read request body: ", err) end - + elseif info_type == extra_info.RespBody then + local ext_res = ctx.runner_ext_response + if ext_res then + local info = req:Info() + local respbody_req = extra_info_respbody.New() + respbody_req:Init(info.byte, info.pos) + + local chunks = {} + local err = helper.response_reader(ext_res.body_reader, function (chunk, chunks) + -- When the upstream response is chunked type, + -- we will receive the complete response body + -- before sending it to the runner program + -- to reduce the number of RPC calls. + core.table.insert_tail(chunks, chunk) + end, chunks) + if err then + -- TODO: send RPC_ERROR to runner + core.log.error(err) + else + res = core.table.concat(chunks) + ctx.runner_ext_response_body = chunks + end + else + core.log.error("failed to read response body: not exits") + end else return nil, "unsupported info type: " .. info_type end @@ -732,9 +757,26 @@ local rpc_handlers = { return nil, "failed to send RPC_HTTP_RESP_CALL: " .. err end - local ty, resp = receive(sock) - if ty == nil then - return nil, "failed to receive RPC_HTTP_RESP_CALL: " .. resp + local ty, resp + while true do + ty, resp = receive(sock) + if ty == nil then + return nil, "failed to receive RPC_HTTP_REQ_CALL: " .. resp + end + + if ty ~= constants.RPC_EXTRA_INFO then + break + end + + local out, err = handle_extra_info(ctx, resp) + if not out then + return nil, "failed to handle RPC_EXTRA_INFO: " .. err + end + + local ok, err = send(sock, constants.RPC_EXTRA_INFO, out) + if not ok then + return nil, "failed to reply RPC_EXTRA_INFO: " .. err + end end if ty ~= constants.RPC_HTTP_RESP_CALL then diff --git a/apisix/plugins/grpc-transcode.lua b/apisix/plugins/grpc-transcode.lua index 7da62a805fdd..2405d33ec764 100644 --- a/apisix/plugins/grpc-transcode.lua +++ b/apisix/plugins/grpc-transcode.lua @@ -77,15 +77,24 @@ local schema = { required = { "proto_id", "service", "method" }, } +-- Based on https://cloud.google.com/apis/design/errors#handling_errors local status_rel = { - ["3"] = 400, - ["4"] = 504, - ["5"] = 404, - ["7"] = 403, - ["11"] = 416, - ["12"] = 501, - ["13"] = 500, - ["14"] = 503, + ["1"] = 499, -- CANCELLED + ["2"] = 500, -- UNKNOWN + ["3"] = 400, -- INVALID_ARGUMENT + ["4"] = 504, -- DEADLINE_EXCEEDED + ["5"] = 404, -- NOT_FOUND + ["6"] = 409, -- ALREADY_EXISTS + ["7"] = 403, -- PERMISSION_DENIED + ["8"] = 429, -- RESOURCE_EXHAUSTED + ["9"] = 400, -- FAILED_PRECONDITION + ["10"] = 409, -- ABORTED + ["11"] = 400, -- OUT_OF_RANGE + ["12"] = 501, -- UNIMPLEMENTED + ["13"] = 500, -- INTERNAL + ["14"] = 503, -- UNAVAILABLE + ["15"] = 500, -- DATA_LOSS + ["16"] = 401, -- UNAUTHENTICATED } local _M = { diff --git a/apisix/plugins/grpc-transcode/proto.lua b/apisix/plugins/grpc-transcode/proto.lua index c30c17e71855..c2a3cb523394 100644 --- a/apisix/plugins/grpc-transcode/proto.lua +++ b/apisix/plugins/grpc-transcode/proto.lua @@ -159,7 +159,7 @@ end function _M.init() local err - protos, err = core.config.new("/proto", { + protos, err = core.config.new("/protos", { automatic = true, item_schema = core.schema.proto }) diff --git a/apisix/plugins/http-logger.lua b/apisix/plugins/http-logger.lua index 3d3ebdfb4e2d..93cd8c9bef3b 100644 --- a/apisix/plugins/http-logger.lua +++ b/apisix/plugins/http-logger.lua @@ -33,7 +33,7 @@ local schema = { type = "object", properties = { uri = core.schema.uri_def, - auth_header = {type = "string", default = ""}, + auth_header = {type = "string"}, timeout = {type = "integer", minimum = 1, default = 3}, include_req_body = {type = "boolean", default = false}, include_resp_body = {type = "boolean", default = false}, diff --git a/apisix/plugins/jwt-auth.lua b/apisix/plugins/jwt-auth.lua index 82c12c95b2c5..36006975f5d3 100644 --- a/apisix/plugins/jwt-auth.lua +++ b/apisix/plugins/jwt-auth.lua @@ -60,7 +60,7 @@ local consumer_schema = { secret = {type = "string"}, algorithm = { type = "string", - enum = {"HS256", "HS512", "RS256"}, + enum = {"HS256", "HS512", "RS256", "ES256"}, default = "HS256" }, exp = {type = "integer", minimum = 1, default = 86400}, @@ -71,6 +71,11 @@ local consumer_schema = { vault = { type = "object", properties = {} + }, + lifetime_grace_period = { + type = "integer", + minimum = 0, + default = 0 } }, dependencies = { @@ -89,7 +94,7 @@ local consumer_schema = { public_key = {type = "string"}, private_key= {type = "string"}, algorithm = { - enum = {"RS256"}, + enum = {"RS256", "ES256"}, }, }, required = {"public_key", "private_key"}, @@ -101,7 +106,7 @@ local consumer_schema = { properties = {} }, algorithm = { - enum = {"RS256"}, + enum = {"RS256", "ES256"}, }, }, required = {"vault"}, @@ -161,7 +166,7 @@ function _M.check_schema(conf, schema_type) return true end - if conf.algorithm ~= "RS256" and not conf.secret then + if conf.algorithm ~= "RS256" and conf.algorithm ~= "ES256" and not conf.secret then conf.secret = ngx_encode_base64(resty_random.bytes(32, true)) elseif conf.base64_secret then if ngx_decode_base64(conf.secret) == nil then @@ -169,7 +174,7 @@ function _M.check_schema(conf, schema_type) end end - if conf.algorithm == "RS256" then + if conf.algorithm == "RS256" or conf.algorithm == "ES256" then -- Possible options are a) both are in vault, b) both in schema -- c) one in schema, another in vault. if not conf.public_key then @@ -235,7 +240,7 @@ local function get_secret(conf, consumer_name) end -local function get_rsa_keypair(conf, consumer_name) +local function get_rsa_or_ecdsa_keypair(conf, consumer_name) local public_key = conf.public_key local private_key = conf.private_key -- if keys are present in conf, no need to query vault (fallback) @@ -304,8 +309,10 @@ local function sign_jwt_with_HS(key, consumer, payload) end -local function sign_jwt_with_RS256(key, consumer, payload) - local public_key, private_key, err = get_rsa_keypair(consumer.auth_conf, consumer.username) +local function sign_jwt_with_RS256_ES256(key, consumer, payload) + local public_key, private_key, err = get_rsa_or_ecdsa_keypair( + consumer.auth_conf, consumer.username + ) if not public_key then core.log.error("failed to sign jwt, err: ", err) core.response.exit(503, "failed to sign jwt") @@ -340,12 +347,12 @@ local function algorithm_handler(consumer, method_only) end return get_secret(consumer.auth_conf, consumer.username) - elseif consumer.auth_conf.algorithm == "RS256" then + elseif consumer.auth_conf.algorithm == "RS256" or consumer.auth_conf.algorithm == "ES256" then if method_only then - return sign_jwt_with_RS256 + return sign_jwt_with_RS256_ES256 end - local public_key, _, err = get_rsa_keypair(consumer.auth_conf, consumer.username) + local public_key, _, err = get_rsa_or_ecdsa_keypair(consumer.auth_conf, consumer.username) return public_key, err end end @@ -389,7 +396,10 @@ function _M.rewrite(conf, ctx) core.log.error("failed to retrieve secrets, err: ", err) return 503, {message = "failed to verify jwt"} end - jwt_obj = jwt:verify_jwt_obj(auth_secret, jwt_obj) + local claim_specs = jwt:get_default_validation_options(jwt_obj) + claim_specs.lifetime_grace_period = consumer.auth_conf.lifetime_grace_period + + jwt_obj = jwt:verify_jwt_obj(auth_secret, jwt_obj, claim_specs) core.log.info("jwt object: ", core.json.delay_encode(jwt_obj)) if not jwt_obj.verified then diff --git a/apisix/plugins/ldap-auth.lua b/apisix/plugins/ldap-auth.lua index 3fce91141119..d155696b6337 100644 --- a/apisix/plugins/ldap-auth.lua +++ b/apisix/plugins/ldap-auth.lua @@ -19,7 +19,7 @@ local ngx = ngx local ngx_re = require("ngx.re") local ipairs = ipairs local consumer_mod = require("apisix.consumer") -local lualdap = require("lualdap") +local ldap = require("resty.ldap") local lrucache = core.lrucache.new({ ttl = 300, count = 512 @@ -31,8 +31,9 @@ local schema = { properties = { base_dn = { type = "string" }, ldap_uri = { type = "string" }, - use_tls = { type = "boolean" }, - uid = { type = "string" } + use_tls = { type = "boolean", default = false }, + tls_verify = { type = "boolean", default = false }, + uid = { type = "string", default = "cn" } }, required = {"base_dn","ldap_uri"}, } @@ -136,11 +137,23 @@ function _M.rewrite(conf, ctx) end -- 2. try authenticate the user against the ldap server - local uid = conf.uid or "cn" - - local userdn = uid .. "=" .. user.username .. "," .. conf.base_dn - local ld = lualdap.open_simple (conf.ldap_uri, userdn, user.password, conf.use_tls) - if not ld then + local ldap_host, ldap_port = core.utils.parse_addr(conf.ldap_uri) + + local userdn = conf.uid .. "=" .. user.username .. "," .. conf.base_dn + local ldapconf = { + timeout = 10000, + start_tls = false, + ldap_host = ldap_host, + ldap_port = ldap_port or 389, + ldaps = conf.use_tls, + tls_verify = conf.tls_verify, + base_dn = conf.base_dn, + attribute = conf.uid, + keepalive = 60000, + } + local res, err = ldap.ldap_authenticate(user.username, user.password, ldapconf) + if not res then + core.log.warn("ldap-auth failed: ", err) return 401, { message = "Invalid user authorization" } end diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua index 746e474b93d0..0eafd64235b3 100644 --- a/apisix/plugins/limit-count.lua +++ b/apisix/plugins/limit-count.lua @@ -14,267 +14,24 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local limit_local_new = require("resty.limit.count").new -local core = require("apisix.core") -local apisix_plugin = require("apisix.plugin") -local tab_insert = table.insert -local ipairs = ipairs -local pairs = pairs - +local limit_count = require("apisix.plugins.limit-count.init") local plugin_name = "limit-count" -local limit_redis_cluster_new -local limit_redis_new -do - local redis_src = "apisix.plugins.limit-count.limit-count-redis" - limit_redis_new = require(redis_src).new - - local cluster_src = "apisix.plugins.limit-count.limit-count-redis-cluster" - limit_redis_cluster_new = require(cluster_src).new -end -local lrucache = core.lrucache.new({ - type = 'plugin', serial_creating = true, -}) -local group_conf_lru = core.lrucache.new({ - type = 'plugin', -}) - - -local policy_to_additional_properties = { - redis = { - properties = { - redis_host = { - type = "string", minLength = 2 - }, - redis_port = { - type = "integer", minimum = 1, default = 6379, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_database = { - type = "integer", minimum = 0, default = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, - }, - required = {"redis_host"}, - }, - ["redis-cluster"] = { - properties = { - redis_cluster_nodes = { - type = "array", - minItems = 2, - items = { - type = "string", minLength = 2, maxLength = 100 - }, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, - redis_cluster_name = { - type = "string", - }, - }, - required = {"redis_cluster_nodes", "redis_cluster_name"}, - }, -} -local schema = { - type = "object", - properties = { - count = {type = "integer", exclusiveMinimum = 0}, - time_window = {type = "integer", exclusiveMinimum = 0}, - group = {type = "string"}, - key = {type = "string", default = "remote_addr"}, - key_type = {type = "string", - enum = {"var", "var_combination", "constant"}, - default = "var", - }, - rejected_code = { - type = "integer", minimum = 200, maximum = 599, default = 503 - }, - rejected_msg = { - type = "string", minLength = 1 - }, - policy = { - type = "string", - enum = {"local", "redis", "redis-cluster"}, - default = "local", - }, - allow_degradation = {type = "boolean", default = false}, - show_limit_quota_header = {type = "boolean", default = true} - }, - required = {"count", "time_window"}, - ["if"] = { - properties = { - policy = { - enum = {"redis"}, - }, - }, - }, - ["then"] = policy_to_additional_properties.redis, - ["else"] = { - ["if"] = { - properties = { - policy = { - enum = {"redis-cluster"}, - }, - }, - }, - ["then"] = policy_to_additional_properties["redis-cluster"], - } -} - - local _M = { version = 0.4, priority = 1002, name = plugin_name, - schema = schema, + schema = limit_count.schema, } -local function group_conf(conf) - return conf -end - - function _M.check_schema(conf) - local ok, err = core.schema.check(schema, conf) - if not ok then - return false, err - end - - if conf.group then - local fields = {} - for k in pairs(schema.properties) do - tab_insert(fields, k) - end - local extra = policy_to_additional_properties[conf.policy] - if extra then - for k in pairs(extra.properties) do - tab_insert(fields, k) - end - end - - local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) - - for _, field in ipairs(fields) do - if not core.table.deep_eq(prev_conf[field], conf[field]) then - core.log.error("previous limit-conn group ", prev_conf.group, - " conf: ", core.json.encode(prev_conf)) - core.log.error("current limit-conn group ", conf.group, - " conf: ", core.json.encode(conf)) - return false, "group conf mismatched" - end - end - end - - return true -end - - -local function create_limit_obj(conf) - core.log.info("create new limit-count plugin instance") - - if not conf.policy or conf.policy == "local" then - return limit_local_new("plugin-" .. plugin_name, conf.count, - conf.time_window) - end - - if conf.policy == "redis" then - return limit_redis_new("plugin-" .. plugin_name, - conf.count, conf.time_window, conf) - end - - if conf.policy == "redis-cluster" then - return limit_redis_cluster_new("plugin-" .. plugin_name, conf.count, - conf.time_window, conf) - end - - return nil + return limit_count.check_schema(conf) end function _M.access(conf, ctx) - core.log.info("ver: ", ctx.conf_version) - - local lim, err - if not conf.group then - lim, err = core.lrucache.plugin_ctx(lrucache, ctx, conf.policy, create_limit_obj, conf) - else - lim, err = lrucache(conf.group, "", create_limit_obj, conf) - end - - if not lim then - core.log.error("failed to fetch limit.count object: ", err) - if conf.allow_degradation then - return - end - return 500 - end - - local conf_key = conf.key - local key - if conf.key_type == "var_combination" then - local err, n_resolved - key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) - if err then - core.log.error("could not resolve vars in ", conf_key, " error: ", err) - end - - if n_resolved == 0 then - key = nil - end - elseif conf.key_type == "constant" then - key = conf_key - else - key = ctx.var[conf_key] - end - - if key == nil then - core.log.info("The value of the configured key is empty, use client IP instead") - -- When the value of key is empty, use client IP instead - key = ctx.var["remote_addr"] - end - - -- here we add a separator ':' to mark the boundary of the prefix and the key itself - if not conf.group then - -- Here we use plugin-level conf version to prevent the counter from being resetting - -- because of the change elsewhere. - -- A route which reuses a previous route's ID will inherits its counter. - key = ctx.conf_type .. apisix_plugin.conf_version(conf) .. ':' .. key - else - key = conf.group .. ':' .. key - end - - core.log.info("limit key: ", key) - - local delay, remaining = lim:incoming(key, true) - if not delay then - local err = remaining - if err == "rejected" then - if conf.rejected_msg then - return conf.rejected_code, { error_msg = conf.rejected_msg } - end - return conf.rejected_code - end - - core.log.error("failed to limit count: ", err) - if conf.allow_degradation then - return - end - return 500, {error_msg = "failed to limit count"} - end - - if conf.show_limit_quota_header then - core.response.set_header("X-RateLimit-Limit", conf.count, - "X-RateLimit-Remaining", remaining) - end + return limit_count.rate_limit(conf, ctx) end diff --git a/apisix/plugins/limit-count/init.lua b/apisix/plugins/limit-count/init.lua new file mode 100644 index 000000000000..c9051d2e14ef --- /dev/null +++ b/apisix/plugins/limit-count/init.lua @@ -0,0 +1,310 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local limit_local_new = require("resty.limit.count").new +local core = require("apisix.core") +local apisix_plugin = require("apisix.plugin") +local tab_insert = table.insert +local ipairs = ipairs +local pairs = pairs + + +local plugin_name = "limit-count" +local limit_redis_cluster_new +local limit_redis_new +do + local redis_src = "apisix.plugins.limit-count.limit-count-redis" + limit_redis_new = require(redis_src).new + + local cluster_src = "apisix.plugins.limit-count.limit-count-redis-cluster" + limit_redis_cluster_new = require(cluster_src).new +end +local lrucache = core.lrucache.new({ + type = 'plugin', serial_creating = true, +}) +local group_conf_lru = core.lrucache.new({ + type = 'plugin', +}) + + +local policy_to_additional_properties = { + redis = { + properties = { + redis_host = { + type = "string", minLength = 2 + }, + redis_port = { + type = "integer", minimum = 1, default = 6379, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_database = { + type = "integer", minimum = 0, default = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + }, + required = {"redis_host"}, + }, + ["redis-cluster"] = { + properties = { + redis_cluster_nodes = { + type = "array", + minItems = 2, + items = { + type = "string", minLength = 2, maxLength = 100 + }, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + redis_cluster_name = { + type = "string", + }, + }, + required = {"redis_cluster_nodes", "redis_cluster_name"}, + }, +} +local schema = { + type = "object", + properties = { + count = {type = "integer", exclusiveMinimum = 0}, + time_window = {type = "integer", exclusiveMinimum = 0}, + group = {type = "string"}, + key = {type = "string", default = "remote_addr"}, + key_type = {type = "string", + enum = {"var", "var_combination", "constant"}, + default = "var", + }, + rejected_code = { + type = "integer", minimum = 200, maximum = 599, default = 503 + }, + rejected_msg = { + type = "string", minLength = 1 + }, + policy = { + type = "string", + enum = {"local", "redis", "redis-cluster"}, + default = "local", + }, + allow_degradation = {type = "boolean", default = false}, + show_limit_quota_header = {type = "boolean", default = true} + }, + required = {"count", "time_window"}, + ["if"] = { + properties = { + policy = { + enum = {"redis"}, + }, + }, + }, + ["then"] = policy_to_additional_properties.redis, + ["else"] = { + ["if"] = { + properties = { + policy = { + enum = {"redis-cluster"}, + }, + }, + }, + ["then"] = policy_to_additional_properties["redis-cluster"], + } +} + +local schema_copy = core.table.deepcopy(schema) + +local _M = { + schema = schema +} + + +local function group_conf(conf) + return conf +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.group then + -- means that call by some plugin not support + if conf._vid then + return false, "group is not supported" + end + + local fields = {} + -- When the goup field is configured, + -- we will use schema_copy to get the whitelist of properties, + -- so that we can avoid getting injected properties. + for k in pairs(schema_copy.properties) do + tab_insert(fields, k) + end + local extra = policy_to_additional_properties[conf.policy] + if extra then + for k in pairs(extra.properties) do + tab_insert(fields, k) + end + end + + local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) + + for _, field in ipairs(fields) do + if not core.table.deep_eq(prev_conf[field], conf[field]) then + core.log.error("previous limit-conn group ", prev_conf.group, + " conf: ", core.json.encode(prev_conf)) + core.log.error("current limit-conn group ", conf.group, + " conf: ", core.json.encode(conf)) + return false, "group conf mismatched" + end + end + end + + return true +end + + +local function create_limit_obj(conf) + core.log.info("create new limit-count plugin instance") + + if not conf.policy or conf.policy == "local" then + return limit_local_new("plugin-" .. plugin_name, conf.count, + conf.time_window) + end + + if conf.policy == "redis" then + return limit_redis_new("plugin-" .. plugin_name, + conf.count, conf.time_window, conf) + end + + if conf.policy == "redis-cluster" then + return limit_redis_cluster_new("plugin-" .. plugin_name, conf.count, + conf.time_window, conf) + end + + return nil +end + + +local function gen_limit_key(conf, ctx, key) + if conf.group then + return conf.group .. ':' .. key + end + + -- here we add a separator ':' to mark the boundary of the prefix and the key itself + -- Here we use plugin-level conf version to prevent the counter from being resetting + -- because of the change elsewhere. + -- A route which reuses a previous route's ID will inherits its counter. + local new_key = ctx.conf_type .. ctx.conf_id .. ':' .. apisix_plugin.conf_version(conf) + .. ':' .. key + if conf._vid then + -- conf has _vid means it's from workflow plugin, add _vid to the key + -- so that the counter is unique per action. + return new_key .. ':' .. conf._vid + end + + return new_key +end + + +local function gen_limit_obj(conf, ctx) + if conf.group then + return lrucache(conf.group, "", create_limit_obj, conf) + end + + local extra_key + if conf._vid then + extra_key = conf.policy .. '#' .. conf._vid + else + extra_key = conf.policy + end + + return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf) +end + + +function _M.rate_limit(conf, ctx) + core.log.info("ver: ", ctx.conf_version) + + local lim, err = gen_limit_obj(conf, ctx) + + if not lim then + core.log.error("failed to fetch limit.count object: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + local conf_key = conf.key + local key + if conf.key_type == "var_combination" then + local err, n_resolved + key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) + if err then + core.log.error("could not resolve vars in ", conf_key, " error: ", err) + end + + if n_resolved == 0 then + key = nil + end + elseif conf.key_type == "constant" then + key = conf_key + else + key = ctx.var[conf_key] + end + + if key == nil then + core.log.info("The value of the configured key is empty, use client IP instead") + -- When the value of key is empty, use client IP instead + key = ctx.var["remote_addr"] + end + + key = gen_limit_key(conf, ctx, key) + core.log.info("limit key: ", key) + + local delay, remaining = lim:incoming(key, true) + if not delay then + local err = remaining + if err == "rejected" then + if conf.rejected_msg then + return conf.rejected_code, { error_msg = conf.rejected_msg } + end + return conf.rejected_code + end + + core.log.error("failed to limit count: ", err) + if conf.allow_degradation then + return + end + return 500, {error_msg = "failed to limit count"} + end + + if conf.show_limit_quota_header then + core.response.set_header("X-RateLimit-Limit", conf.count, + "X-RateLimit-Remaining", remaining) + end +end + + +return _M diff --git a/apisix/plugins/log-rotate.lua b/apisix/plugins/log-rotate.lua index 79459371702e..60b1e3ddb547 100644 --- a/apisix/plugins/log-rotate.lua +++ b/apisix/plugins/log-rotate.lua @@ -21,6 +21,7 @@ local plugin = require("apisix.plugin") local process = require("ngx.process") local signal = require("resty.signal") local shell = require("resty.shell") +local ipairs = ipairs local ngx = ngx local ngx_time = ngx.time local ngx_update_time = ngx.update_time @@ -34,15 +35,14 @@ local str_sub = string.sub local str_find = string.find local str_format = string.format local str_reverse = string.reverse -local tab_insert = table.insert -local tab_sort = table.sort - +local ngx_sleep = require("apisix.core.utils").sleep local local_conf local plugin_name = "log-rotate" local INTERVAL = 60 * 60 -- rotate interval (unit: second) local MAX_KEPT = 24 * 7 -- max number of log files will be kept +local MAX_SIZE = -1 -- max size of file will be rotated local COMPRESSION_FILE_SUFFIX = ".tar.gz" -- compression file suffix local rotate_time local default_logs @@ -123,34 +123,22 @@ local function tab_sort_comp(a, b) end -local function scan_log_folder() - local t = { - access = {}, - error = {}, - } - - local log_dir, access_name = get_log_path_info("access.log") - local _, error_name = get_log_path_info("error.log") +local function scan_log_folder(log_file_name) + local t = {} - if enable_compression then - access_name = access_name .. COMPRESSION_FILE_SUFFIX - error_name = error_name .. COMPRESSION_FILE_SUFFIX - end + local log_dir, _ = get_log_path_info(log_file_name) for file in lfs.dir(log_dir) do local n = get_last_index(file, "__") if n ~= nil then local log_type = file:sub(n + 2) - if log_type == access_name then - tab_insert(t.access, file) - elseif log_type == error_name then - tab_insert(t.error, file) + if log_type == log_file_name then + core.table.insert(t, file) end end end - tab_sort(t.access, tab_sort_comp) - tab_sort(t.error, tab_sort_comp) + core.table.sort(t, tab_sort_comp) return t, log_dir end @@ -219,18 +207,79 @@ local function init_default_logs(logs_info, log_type) end +local function file_size(file) + local attr = lfs.attributes(file) + if attr then + return attr.size + end + return 0 +end + + +local function rotate_file(files, now_time, max_kept) + if core.table.isempty(files) then + return + end + + local new_files = core.table.new(2, 0) + -- rename the log files + for _, file in ipairs(files) do + local now_date = os_date("%Y-%m-%d_%H-%M-%S", now_time) + local new_file = rename_file(default_logs[file], now_date) + if not new_file then + return + end + + core.table.insert(new_files, new_file) + end + + -- send signal to reopen log files + local pid = process.get_master_pid() + core.log.warn("send USR1 signal to master process [", pid, "] for reopening log file") + local ok, err = signal.kill(pid, signal.signum("USR1")) + if not ok then + core.log.error("failed to send USR1 signal for reopening log file: ", err) + end + + if enable_compression then + -- Waiting for nginx reopen files + -- to avoid losing logs during compression + ngx_sleep(0.5) + + for _, new_file in ipairs(new_files) do + compression_file(new_file) + end + end + + for _, file in ipairs(files) do + -- clean the oldest file + local log_list, log_dir = scan_log_folder(file) + for i = max_kept + 1, #log_list do + local path = log_dir .. log_list[i] + local ok, err = os_remove(path) + if err then + core.log.error("remove old log file: ", path, " err: ", err, " res:", ok) + end + end + end +end + + local function rotate() local interval = INTERVAL local max_kept = MAX_KEPT + local max_size = MAX_SIZE local attr = plugin.plugin_attr(plugin_name) if attr then interval = attr.interval or interval max_kept = attr.max_kept or max_kept + max_size = attr.max_size or max_size enable_compression = attr.enable_compression or enable_compression end core.log.info("rotate interval:", interval) core.log.info("rotate max keep:", max_kept) + core.log.info("rotate max size:", max_size) if not default_logs then -- first init default log filepath and filename @@ -248,53 +297,28 @@ local function rotate() return end - if now_time < rotate_time then - -- did not reach the rotate time - core.log.info("rotate time: ", rotate_time, " now time: ", now_time) - return - end + if now_time >= rotate_time then + local files = {DEFAULT_ACCESS_LOG_FILENAME, DEFAULT_ERROR_LOG_FILENAME} + rotate_file(files, now_time, max_kept) - local now_date = os_date("%Y-%m-%d_%H-%M-%S", now_time) - local access_new_file = rename_file(default_logs[DEFAULT_ACCESS_LOG_FILENAME], now_date) - local error_new_file = rename_file(default_logs[DEFAULT_ERROR_LOG_FILENAME], now_date) - if not access_new_file and not error_new_file then -- reset rotate time rotate_time = rotate_time + interval - return - end - core.log.warn("send USR1 signal to master process [", - process.get_master_pid(), "] for reopening log file") - local ok, err = signal.kill(process.get_master_pid(), signal.signum("USR1")) - if not ok then - core.log.error("failed to send USR1 signal for reopening log file: ", err) - end + elseif max_size > 0 then + local access_log_file_size = file_size(default_logs[DEFAULT_ACCESS_LOG_FILENAME].file) + local error_log_file_size = file_size(default_logs[DEFAULT_ERROR_LOG_FILENAME].file) + local files = core.table.new(2, 0) - if enable_compression then - compression_file(access_new_file) - compression_file(error_new_file) - end - - -- clean the oldest file - local log_list, log_dir = scan_log_folder() - for i = max_kept + 1, #log_list.error do - local path = log_dir .. log_list.error[i] - ok, err = os_remove(path) - if err then - core.log.error("remove old error file: ", path, " err: ", err, " res:", ok) + if access_log_file_size >= max_size then + core.table.insert(files, DEFAULT_ACCESS_LOG_FILENAME) end - end - for i = max_kept + 1, #log_list.access do - local path = log_dir .. log_list.access[i] - ok, err = os_remove(path) - if err then - core.log.error("remove old error file: ", path, " err: ", err, " res:", ok) + if error_log_file_size >= max_size then + core.table.insert(files, DEFAULT_ERROR_LOG_FILENAME) end - end - -- reset rotate time - rotate_time = rotate_time + interval + rotate_file(files, now_time, max_kept) + end end diff --git a/apisix/plugins/openfunction.lua b/apisix/plugins/openfunction.lua new file mode 100644 index 000000000000..935d6ebbc540 --- /dev/null +++ b/apisix/plugins/openfunction.lua @@ -0,0 +1,35 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx_encode_base64 = ngx.encode_base64 +local plugin_name, plugin_version, priority = "openfunction", 0.1, -1902 + +local openfunction_authz_schema = { + service_token = {type = "string"} +} + +local function request_processor(conf, ctx, params) + local headers = params.headers or {} + -- setting authorization headers if authorization.service_token exists + if conf.authorization and conf.authorization.service_token then + headers["authorization"] = "Basic " .. ngx_encode_base64(conf.authorization.service_token) + end + + params.headers = headers +end + +return require("apisix.plugins.serverless.generic-upstream")(plugin_name, + plugin_version, priority, request_processor, openfunction_authz_schema) diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua index 4a6dbda1ccec..b472feca0159 100644 --- a/apisix/plugins/openid-connect.lua +++ b/apisix/plugins/openid-connect.lua @@ -73,6 +73,11 @@ local schema = { }, public_key = {type = "string"}, token_signing_alg_values_expected = {type = "string"}, + use_pkce = { + description = "when set to true the PKEC(Proof Key for Code Exchange) will be used.", + type = "boolean", + default = false + }, set_access_token_header = { description = "Whether the access token should be added as a header to the request " .. "for downstream", diff --git a/apisix/plugins/opentelemetry.lua b/apisix/plugins/opentelemetry.lua index ea05b0a8025b..c0e3a74e4cc8 100644 --- a/apisix/plugins/opentelemetry.lua +++ b/apisix/plugins/opentelemetry.lua @@ -53,6 +53,7 @@ local lrucache = core.lrucache.new({ type = 'plugin', count = 128, ttl = 24 * 60 * 60, }) +local asterisk = string.byte("*", 1) local attr_schema = { type = "object", @@ -169,6 +170,13 @@ local schema = { type = "string", minLength = 1, } + }, + additional_header_prefix_attributes = { + type = "array", + items = { + type = "string", + minLength = 1, + } } } } @@ -273,6 +281,27 @@ local function create_tracer_obj(conf) end +local function inject_attributes(attributes, wanted_attributes, source, with_prefix) + for _, key in ipairs(wanted_attributes) do + local is_key_a_match = #key >= 2 and key:byte(-1) == asterisk and with_prefix + + if is_key_a_match then + local prefix = key:sub(0, -2) + for possible_key, value in pairs(source) do + if core.string.has_prefix(possible_key, prefix) then + core.table.insert(attributes, attr.string(possible_key, value)) + end + end + else + local val = source[key] + if val then + core.table.insert(attributes, attr.string(key, val)) + end + end + end +end + + function _M.rewrite(conf, api_ctx) local tracer, err = core.lrucache.plugin_ctx(lrucache, api_ctx, nil, create_tracer_obj, conf) if not tracer then @@ -286,17 +315,22 @@ function _M.rewrite(conf, api_ctx) attr.string("service", api_ctx.service_name), attr.string("route", api_ctx.route_name), } + if conf.additional_attributes then - for _, key in ipairs(conf.additional_attributes) do - local val = api_ctx.var[key] - if val then - core.table.insert(attributes, attr.string(key, val)) - end - end + inject_attributes(attributes, conf.additional_attributes, api_ctx.var, false) + end + + if conf.additional_header_prefix_attributes then + inject_attributes( + attributes, + conf.additional_header_prefix_attributes, + core.request.headers(api_ctx), + true + ) end local ctx = tracer:start(upstream_context, api_ctx.var.request_uri, { - kind = span_kind.client, + kind = span_kind.server, attributes = attributes, }) ctx:attach() diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua index c65a39c48ba2..45ff94c3f631 100644 --- a/apisix/plugins/prometheus/exporter.lua +++ b/apisix/plugins/prometheus/exporter.lua @@ -18,6 +18,7 @@ local base_prometheus = require("prometheus") local core = require("apisix.core") local plugin = require("apisix.plugin") local ipairs = ipairs +local pairs = pairs local ngx = ngx local re_gmatch = ngx.re.gmatch local ffi = require("ffi") @@ -38,6 +39,8 @@ local get_protos = require("apisix.plugins.grpc-transcode.proto").protos local service_fetch = require("apisix.http.service").get local latency_details = require("apisix.utils.log-util").latency_details_in_ms local xrpc = require("apisix.stream.xrpc") +local unpack = unpack +local next = next local ngx_capture @@ -64,6 +67,31 @@ local function gen_arr(...) return inner_tab_arr end +local extra_labels_tbl = {} + +local function extra_labels(name, ctx) + clear_tab(extra_labels_tbl) + + local attr = plugin.plugin_attr("prometheus") + local metrics = attr.metrics + + if metrics and metrics[name] and metrics[name].extra_labels then + local labels = metrics[name].extra_labels + for _, kv in ipairs(labels) do + local val, v = next(kv) + if ctx then + val = ctx.var[v:sub(2)] + if val == nil then + val = "" + end + end + core.table.insert(extra_labels_tbl, val) + end + end + + return extra_labels_tbl +end + local _M = {} @@ -122,6 +150,14 @@ function _M.http_init(prometheus_enabled_in_stream) "Etcd modify index for APISIX keys", {"key"}) + metrics.shared_dict_capacity_bytes = prometheus:gauge("shared_dict_capacity_bytes", + "The capacity of each nginx shared DICT since APISIX start", + {"name"}) + + metrics.shared_dict_free_space_bytes = prometheus:gauge("shared_dict_free_space_bytes", + "The free space of each nginx shared DICT since APISIX start", + {"name"}) + -- per service -- The consumer label indicates the name of consumer corresponds to the @@ -129,15 +165,17 @@ function _M.http_init(prometheus_enabled_in_stream) -- no consumer in request. metrics.status = prometheus:counter("http_status", "HTTP status codes per service in APISIX", - {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node"}) + {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node", + unpack(extra_labels("http_status"))}) metrics.latency = prometheus:histogram("http_latency", "HTTP request latency in milliseconds per service in APISIX", - {"type", "route", "service", "consumer", "node"}, DEFAULT_BUCKETS) + {"type", "route", "service", "consumer", "node", unpack(extra_labels("http_latency"))}, + DEFAULT_BUCKETS) metrics.bandwidth = prometheus:counter("bandwidth", "Total bandwidth in bytes consumed per service in APISIX", - {"type", "route", "service", "consumer", "node"}) + {"type", "route", "service", "consumer", "node", unpack(extra_labels("bandwidth"))}) if prometheus_enabled_in_stream then init_stream_metrics() @@ -199,25 +237,35 @@ function _M.http_log(conf, ctx) metrics.status:inc(1, gen_arr(vars.status, route_id, matched_uri, matched_host, - service_id, consumer_name, balancer_ip)) + service_id, consumer_name, balancer_ip, + unpack(extra_labels("http_status", ctx)))) local latency, upstream_latency, apisix_latency = latency_details(ctx) + local latency_extra_label_values = extra_labels("http_latency", ctx) + metrics.latency:observe(latency, - gen_arr("request", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("request", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) if upstream_latency then metrics.latency:observe(upstream_latency, - gen_arr("upstream", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("upstream", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) end metrics.latency:observe(apisix_latency, - gen_arr("apisix", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("apisix", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) + + local bandwidth_extra_label_values = extra_labels("bandwidth", ctx) metrics.bandwidth:inc(vars.request_length, - gen_arr("ingress", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("ingress", route_id, service_id, consumer_name, balancer_ip, + unpack(bandwidth_extra_label_values))) metrics.bandwidth:inc(vars.bytes_sent, - gen_arr("egress", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("egress", route_id, service_id, consumer_name, balancer_ip, + unpack(bandwidth_extra_label_values))) end @@ -352,6 +400,16 @@ local function etcd_modify_index() end +local function shared_dict_status() + local name = {} + for shared_dict_name, shared_dict in pairs(ngx.shared) do + name[1] = shared_dict_name + metrics.shared_dict_capacity_bytes:set(shared_dict:capacity(), name) + metrics.shared_dict_free_space_bytes:set(shared_dict:free_space(), name) + end +end + + local function collect(ctx, stream_only) if not prometheus or not metrics then core.log.error("prometheus: plugin is not initialized, please make sure ", @@ -359,6 +417,9 @@ local function collect(ctx, stream_only) return 500, {message = "An unexpected error occurred"} end + -- collect ngx.shared.DICT status + shared_dict_status() + -- across all services nginx_status() diff --git a/apisix/plugins/proxy-rewrite.lua b/apisix/plugins/proxy-rewrite.lua index c1d7ec4f5d54..7b9a99f0b872 100644 --- a/apisix/plugins/proxy-rewrite.lua +++ b/apisix/plugins/proxy-rewrite.lua @@ -68,16 +68,16 @@ local schema = { type = "string", pattern = [[^[0-9a-zA-Z-.]+(:\d{1,5})?$]], }, - scheme = { - description = "new scheme for upstream", - type = "string", - enum = {"http", "https"} - }, headers = { description = "new headers for request", type = "object", minProperties = 1, }, + use_real_request_uri_unsafe = { + description = "use real_request_uri instead, THIS IS VERY UNSAFE.", + type = "boolean", + default = false, + }, }, minProperties = 1, } @@ -156,12 +156,11 @@ function _M.rewrite(conf, ctx) ctx.var[upstream_vars[name]] = conf[name] end end - if conf["scheme"] then - ctx.upstream_scheme = conf["scheme"] - end local upstream_uri = ctx.var.uri - if conf.uri ~= nil then + if conf.use_real_request_uri_unsafe then + upstream_uri = ctx.var.real_request_uri + elseif conf.uri ~= nil then upstream_uri = core.utils.resolve_var(conf.uri, ctx.var) elseif conf.regex_uri ~= nil then local uri, _, err = re_sub(ctx.var.uri, conf.regex_uri[1], @@ -177,22 +176,24 @@ function _M.rewrite(conf, ctx) end end - local index = str_find(upstream_uri, "?") - if index then - upstream_uri = core.utils.uri_safe_encode(sub_str(upstream_uri, 1, index-1)) .. - sub_str(upstream_uri, index) - else - upstream_uri = core.utils.uri_safe_encode(upstream_uri) - end - - if ctx.var.is_args == "?" then + if not conf.use_real_request_uri_unsafe then + local index = str_find(upstream_uri, "?") if index then - ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "") + upstream_uri = core.utils.uri_safe_encode(sub_str(upstream_uri, 1, index-1)) .. + sub_str(upstream_uri, index) + else + upstream_uri = core.utils.uri_safe_encode(upstream_uri) + end + + if ctx.var.is_args == "?" then + if index then + ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "") + else + ctx.var.upstream_uri = upstream_uri .. "?" .. (ctx.var.args or "") + end else - ctx.var.upstream_uri = upstream_uri .. "?" .. (ctx.var.args or "") + ctx.var.upstream_uri = upstream_uri end - else - ctx.var.upstream_uri = upstream_uri end if conf.headers then diff --git a/apisix/plugins/redirect.lua b/apisix/plugins/redirect.lua index 6c9a99a1575c..421007d20d82 100644 --- a/apisix/plugins/redirect.lua +++ b/apisix/plugins/redirect.lua @@ -101,6 +101,7 @@ end function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) + if not ok then return false, err end @@ -115,6 +116,10 @@ function _M.check_schema(conf) end end + if conf.http_to_https and conf.append_query_string then + return false, "only one of `http_to_https` and `append_query_string` can be configured." + end + return true end @@ -161,11 +166,6 @@ local function get_port(attr) return port end - port = ssl["listen_port"] - if port then - return port - end - local ports = ssl["listen"] if ports and #ports > 0 then local idx = math_random(1, #ports) @@ -192,8 +192,6 @@ function _M.rewrite(conf, ctx) local proxy_proto = core.request.header(ctx, "X-Forwarded-Proto") local _scheme = proxy_proto or core.request.get_scheme(ctx) if conf.http_to_https and _scheme == "http" then - -- TODO: add test case - -- PR: https://github.com/apache/apisix/pull/1958 if ret_port == nil or ret_port == 443 or ret_port <= 0 or ret_port > 65535 then uri = "https://$host$request_uri" else diff --git a/apisix/plugins/response-rewrite.lua b/apisix/plugins/response-rewrite.lua index 9a4015fb98bb..4c3487da0686 100644 --- a/apisix/plugins/response-rewrite.lua +++ b/apisix/plugins/response-rewrite.lua @@ -19,6 +19,7 @@ local expr = require("resty.expr.v1") local re_compile = require("resty.core.regex").re_match_compile local plugin_name = "response-rewrite" local ngx = ngx +local re_match = ngx.re.match local re_sub = ngx.re.sub local re_gsub = ngx.re.gsub local pairs = pairs @@ -27,13 +28,63 @@ local type = type local pcall = pcall +local lrucache = core.lrucache.new({ + type = "plugin", +}) + local schema = { type = "object", properties = { headers = { description = "new headers for response", - type = "object", - minProperties = 1, + anyOf = { + { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + {type = "string"}, + {type = "number"}, + } + } + }, + }, + { + properties = { + add = { + type = "array", + minItems = 1, + items = { + type = "string", + -- "Set-Cookie: =; Max-Age=" + pattern = "^[^:]+:[^:]+[^/]$" + } + }, + set = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + {type = "string"}, + {type = "number"}, + } + } + }, + }, + remove = { + type = "array", + minItems = 1, + items = { + type = "string", + -- "Set-Cookie" + pattern = "^[^:]+$" + } + }, + }, + } + } }, body = { description = "new body for response", @@ -121,6 +172,33 @@ local function vars_matched(conf, ctx) end +local function is_new_headers_conf(headers) + return + (headers.add and type(headers.add) == "table") or + (headers.set and type(headers.set) == "table") or + (headers.remove and type(headers.remove) == "table") +end + + +local function check_set_headers(headers) + for field, value in pairs(headers) do + if type(field) ~= 'string' then + return false, 'invalid type as header field' + end + + if type(value) ~= 'string' and type(value) ~= 'number' then + return false, 'invalid type as header value' + end + + if #field == 0 then + return false, 'invalid field length in header' + end + end + + return true +end + + function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) if not ok then @@ -128,17 +206,10 @@ function _M.check_schema(conf) end if conf.headers then - for field, value in pairs(conf.headers) do - if type(field) ~= 'string' then - return false, 'invalid type as header field' - end - - if type(value) ~= 'string' and type(value) ~= 'number' then - return false, 'invalid type as header value' - end - - if #field == 0 then - return false, 'invalid field length in header' + if not is_new_headers_conf(conf.headers) then + ok, err = check_set_headers(conf.headers) + if not ok then + return false, err end end end @@ -205,17 +276,51 @@ function _M.body_filter(conf, ctx) end if conf.body then - + ngx.arg[2] = true if conf.body_base64 then ngx.arg[1] = ngx.decode_base64(conf.body) else ngx.arg[1] = conf.body end + end +end - ngx.arg[2] = true + +local function create_header_operation(hdr_conf) + local set = {} + local add = {} + if is_new_headers_conf(hdr_conf) then + if hdr_conf.add then + for _, value in ipairs(hdr_conf.add) do + local m, err = re_match(value, [[^([^:\s]+)\s*:\s*([^:]+)$]], "jo") + if not m then + return nil, err + end + core.table.insert_tail(add, m[1], m[2]) + end + end + + if hdr_conf.set then + for field, value in pairs(hdr_conf.set) do + --reform header from object into array, so can avoid use pairs, which is NYI + core.table.insert_tail(set, field, value) + end + end + + else + for field, value in pairs(hdr_conf) do + core.table.insert_tail(set, field, value) + end end + + return { + add = add, + set = set, + remove = hdr_conf.remove or {}, + } end + function _M.header_filter(conf, ctx) ctx.response_rewrite_matched = vars_matched(conf, ctx) if not ctx.response_rewrite_matched then @@ -235,19 +340,28 @@ function _M.header_filter(conf, ctx) return end - --reform header from object into array, so can avoid use pairs, which is NYI - if not conf.headers_arr then - conf.headers_arr = {} + local hdr_op, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, + create_header_operation, conf.headers) + if not hdr_op then + core.log.error("failed to create header operation: ", err) + return + end - for field, value in pairs(conf.headers) do - core.table.insert_tail(conf.headers_arr, field, value) - end + local field_cnt = #hdr_op.add + for i = 1, field_cnt, 2 do + local val = core.utils.resolve_var(hdr_op.add[i+1], ctx.var) + core.response.add_header(hdr_op.add[i], val) end - local field_cnt = #conf.headers_arr + local field_cnt = #hdr_op.set for i = 1, field_cnt, 2 do - local val = core.utils.resolve_var(conf.headers_arr[i+1], ctx.var) - ngx.header[conf.headers_arr[i]] = val + local val = core.utils.resolve_var(hdr_op.set[i+1], ctx.var) + core.response.set_header(hdr_op.set[i], val) + end + + local field_cnt = #hdr_op.remove + for i = 1, field_cnt do + core.response.set_header(hdr_op.remove[i], nil) end end diff --git a/apisix/plugins/syslog.lua b/apisix/plugins/syslog.lua index 7eb4675c0b04..b57f8a1235eb 100644 --- a/apisix/plugins/syslog.lua +++ b/apisix/plugins/syslog.lua @@ -28,8 +28,6 @@ local schema = { properties = { host = {type = "string"}, port = {type = "integer"}, - max_retry_times = {type = "integer", minimum = 1}, - retry_interval = {type = "integer", minimum = 0}, flush_limit = {type = "integer", minimum = 1, default = 4096}, drop_limit = {type = "integer", default = 1048576}, timeout = {type = "integer", minimum = 1, default = 3000}, @@ -59,8 +57,6 @@ function _M.check_schema(conf) return false, err end - conf.max_retry_count = conf.max_retry_times or conf.max_retry_count - conf.retry_delay = conf.retry_interval or conf.retry_delay return true end diff --git a/apisix/plugins/tencent-cloud-cls.lua b/apisix/plugins/tencent-cloud-cls.lua new file mode 100644 index 000000000000..b0726e607eae --- /dev/null +++ b/apisix/plugins/tencent-cloud-cls.lua @@ -0,0 +1,141 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local cls_sdk = require("apisix.plugins.tencent-cloud-cls.cls-sdk") +local plugin = require("apisix.plugin") +local math = math +local ngx = ngx +local pairs = pairs + + +local plugin_name = "tencent-cloud-cls" +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = { + type = "object", + properties = { + cls_host = { type = "string" }, + cls_topic = { type = "string" }, + secret_id = { type = "string" }, + secret_key = { type = "string" }, + sample_ratio = { + type = "number", + minimum = 0.00001, + maximum = 1, + default = 1 + }, + include_req_body = { type = "boolean", default = false }, + include_resp_body = { type = "boolean", default = false }, + global_tag = { type = "object" }, + }, + required = { "cls_host", "cls_topic", "secret_id", "secret_key" } +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = log_util.metadata_schema_log_format, + }, +} + + +local _M = { + version = 0.1, + priority = 397, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + return log_util.check_log_schema(conf) +end + + +function _M.access(conf, ctx) + ctx.cls_sample = false + if conf.sample_ratio == 1 or math.random() < conf.sample_ratio then + core.log.debug("cls sampled") + ctx.cls_sample = true + return + end +end + + +function _M.body_filter(conf, ctx) + if ctx.cls_sample then + log_util.collect_body(conf, ctx) + end +end + + +function _M.log(conf, ctx) + -- sample if set + if not ctx.cls_sample then + core.log.debug("cls not sampled, skip log") + return + end + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + + local entry + + if metadata and metadata.value.log_format + and core.table.nkeys(metadata.value.log_format) > 0 + then + core.log.debug("using custom format log") + entry = log_util.get_custom_format_log(ctx, metadata.value.log_format) + else + entry = log_util.get_full_log(ngx, conf) + end + + if conf.global_tag then + for k, v in pairs(conf.global_tag) do + entry[k] = v + end + end + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + local sdk, err = cls_sdk.new(conf.cls_host, conf.cls_topic, conf.secret_id, conf.secret_key) + if err then + core.log.error("init sdk failed err:", err) + return false, err + end + return sdk:send_to_cls(entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/apisix/plugins/tencent-cloud-cls/cls-sdk.lua b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua new file mode 100644 index 000000000000..d2b6e8ad4525 --- /dev/null +++ b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua @@ -0,0 +1,312 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local pb = require "pb" +local protoc = require("protoc").new() +local http = require("resty.http") +local socket = require("socket") +local str_util = require("resty.string") +local core = require("apisix.core") +local core_gethostname = require("apisix.core.utils").gethostname +local json = core.json +local json_encode = json.encode +local ngx = ngx +local ngx_time = ngx.time +local ngx_now = ngx.now +local ngx_sha1_bin = ngx.sha1_bin +local ngx_hmac_sha1 = ngx.hmac_sha1 +local fmt = string.format +local table = table +local concat_tab = table.concat +local clear_tab = table.clear +local new_tab = table.new +local insert_tab = table.insert +local ipairs = ipairs +local pairs = pairs +local type = type +local tostring = tostring +local setmetatable = setmetatable +local pcall = pcall + +-- api doc https://www.tencentcloud.com/document/product/614/16873 +local MAX_SINGLE_VALUE_SIZE = 1 * 1024 * 1024 +local MAX_LOG_GROUP_VALUE_SIZE = 5 * 1024 * 1024 -- 5MB + +local cls_api_path = "/structuredlog" +local auth_expire_time = 60 +local cls_conn_timeout = 1000 +local cls_read_timeout = 10000 +local cls_send_timeout = 10000 + +local headers_cache = {} +local params_cache = { + ssl_verify = false, + headers = headers_cache, +} + + +local function get_ip(hostname) + local _, resolved = socket.dns.toip(hostname) + local ip_list = {} + for _, v in ipairs(resolved.ip) do + insert_tab(ip_list, v) + end + return ip_list +end + +local host_ip = tostring(unpack(get_ip(core_gethostname()))) +local log_group_list = {} +local log_group_list_pb = { + logGroupList = log_group_list, +} + + +local function sha1(msg) + return str_util.to_hex(ngx_sha1_bin(msg)) +end + + +local function sha1_hmac(key, msg) + return str_util.to_hex(ngx_hmac_sha1(key, msg)) +end + + +-- sign algorithm https://cloud.tencent.com/document/product/614/12445 +local function sign(secret_id, secret_key) + local method = "post" + local format_params = "" + local format_headers = "" + local sign_algorithm = "sha1" + local http_request_info = fmt("%s\n%s\n%s\n%s\n", + method, cls_api_path, format_params, format_headers) + local cur_time = ngx_time() + local sign_time = fmt("%d;%d", cur_time, cur_time + auth_expire_time) + local string_to_sign = fmt("%s\n%s\n%s\n", sign_algorithm, sign_time, sha1(http_request_info)) + + local sign_key = sha1_hmac(secret_key, sign_time) + local signature = sha1_hmac(sign_key, string_to_sign) + + local arr = { + "q-sign-algorithm=sha1", + "q-ak=" .. secret_id, + "q-sign-time=" .. sign_time, + "q-key-time=" .. sign_time, + "q-header-list=", + "q-url-param-list=", + "q-signature=" .. signature, + } + + return concat_tab(arr, '&') +end + + +-- normalized log data for CLS API +local function normalize_log(log) + local normalized_log = {} + local log_size = 4 -- empty obj alignment + for k, v in pairs(log) do + local v_type = type(v) + local field = { key = k, value = "" } + if v_type == "string" then + field["value"] = v + elseif v_type == "number" then + field["value"] = tostring(v) + elseif v_type == "table" then + field["value"] = json_encode(v) + else + field["value"] = tostring(v) + core.log.warn("unexpected type " .. v_type .. " for field " .. k) + end + if #field.value > MAX_SINGLE_VALUE_SIZE then + core.log.warn(field.key, " value size over ", MAX_SINGLE_VALUE_SIZE, " , truncated") + field.value = field.value:sub(1, MAX_SINGLE_VALUE_SIZE) + end + insert_tab(normalized_log, field) + log_size = log_size + #field.key + #field.value + end + return normalized_log, log_size +end + + +local _M = { version = 0.1 } +local mt = { __index = _M } + +local pb_state +local function init_pb_state() + local old_pb_state = pb.state(nil) + protoc.reload() + local cls_sdk_protoc = protoc.new() + -- proto file in https://www.tencentcloud.com/document/product/614/42787 + local ok, err = pcall(cls_sdk_protoc.load, cls_sdk_protoc, [[ +package cls; + +message Log +{ + message Content + { + required string key = 1; // Key of each field group + required string value = 2; // Value of each field group + } + required int64 time = 1; // Unix timestamp + repeated Content contents = 2; // Multiple key-value pairs in one log +} + +message LogTag +{ + required string key = 1; + required string value = 2; +} + +message LogGroup +{ + repeated Log logs = 1; // Log array consisting of multiple logs + optional string contextFlow = 2; // This parameter does not take effect currently + optional string filename = 3; // Log filename + optional string source = 4; // Log source, which is generally the machine IP + repeated LogTag logTags = 5; +} + +message LogGroupList +{ + repeated LogGroup logGroupList = 1; // Log group list +} + ]], "tencent-cloud-cls/cls.proto") + if not ok then + cls_sdk_protoc:reset() + pb.state(old_pb_state) + return "failed to load cls.proto: ".. err + end + pb_state = pb.state(old_pb_state) +end + + +function _M.new(host, topic, secret_id, secret_key) + if not pb_state then + local err = init_pb_state() + if err then + return nil, err + end + end + local self = { + host = host, + topic = topic, + secret_id = secret_id, + secret_key = secret_key, + } + return setmetatable(self, mt) +end + + +local function do_request_uri(uri, params) + local client = http:new() + client:set_timeouts(cls_conn_timeout, cls_send_timeout, cls_read_timeout) + local res, err = client:request_uri(uri, params) + client:close() + return res, err +end + + +function _M.send_cls_request(self, pb_obj) + -- recovery of stored pb_store + local old_pb_state = pb.state(pb_state) + local ok, pb_data = pcall(pb.encode, "cls.LogGroupList", pb_obj) + pb_state = pb.state(old_pb_state) + if not ok or not pb_data then + core.log.error("failed to encode LogGroupList, err: ", pb_data) + return false, pb_data + end + + clear_tab(headers_cache) + headers_cache["Host"] = self.host + headers_cache["Content-Type"] = "application/x-protobuf" + headers_cache["Authorization"] = sign(self.secret_id, self.secret_key, cls_api_path) + + -- TODO: support lz4/zstd compress + params_cache.method = "POST" + params_cache.body = pb_data + + local cls_url = "http://" .. self.host .. cls_api_path .. "?topic_id=" .. self.topic + core.log.debug("CLS request URL: ", cls_url) + + local res, err = do_request_uri(cls_url, params_cache) + if not res then + return false, err + end + + if res.status ~= 200 then + err = fmt("got wrong status: %s, headers: %s, body, %s", + res.status, json.encode(res.headers), res.body) + -- 413, 404, 401, 403 are not retryable + if res.status == 413 or res.status == 404 or res.status == 401 or res.status == 403 then + core.log.error(err, ", not retryable") + return true + end + + return false, err + end + + core.log.debug("CLS report success") + return true +end + + +function _M.send_to_cls(self, logs) + clear_tab(log_group_list) + local now = ngx_now() * 1000 + + local total_size = 0 + local format_logs = new_tab(#logs, 0) + -- sums of all value in all LogGroup should be no more than 5MB + -- so send whenever size exceed max size + local group_list_start = 1 + for i = 1, #logs, 1 do + local contents, log_size = normalize_log(logs[i]) + if log_size > MAX_LOG_GROUP_VALUE_SIZE then + core.log.error("size of log is over 5MB, dropped") + goto continue + end + total_size = total_size + log_size + if total_size > MAX_LOG_GROUP_VALUE_SIZE then + insert_tab(log_group_list, { + logs = format_logs, + source = host_ip, + }) + local ok, err = self:send_cls_request(log_group_list_pb) + if not ok then + return false, err, group_list_start + end + group_list_start = i + format_logs = new_tab(#logs - i, 0) + total_size = 0 + clear_tab(log_group_list) + end + insert_tab(format_logs, { + time = now, + contents = contents, + }) + :: continue :: + end + + insert_tab(log_group_list, { + logs = format_logs, + source = host_ip, + }) + local ok, err = self:send_cls_request(log_group_list_pb) + return ok, err, group_list_start +end + +return _M diff --git a/apisix/plugins/wolf-rbac.lua b/apisix/plugins/wolf-rbac.lua index 1a2e9867fce0..a6be6474cd9b 100644 --- a/apisix/plugins/wolf-rbac.lua +++ b/apisix/plugins/wolf-rbac.lua @@ -232,7 +232,7 @@ local function check_url_permission(server, appid, action, resName, client_ip, w } end - if res.status ~= 200 and res.status ~= 401 then + if res.status ~= 200 and res.status >= 500 then return { status = 500, err = 'request to wolf-server failed, status:' .. res.status @@ -314,7 +314,7 @@ function _M.rewrite(conf, ctx) core.response.set_header(prefix .. "UserId", userId) core.response.set_header(prefix .. "Username", username) core.response.set_header(prefix .. "Nickname", ngx.escape_uri(nickname)) - core.request.set_header(ctx, prefix .. "UserId", userId, ctx) + core.request.set_header(ctx, prefix .. "UserId", userId) core.request.set_header(ctx, prefix .. "Username", username) core.request.set_header(ctx, prefix .. "Nickname", ngx.escape_uri(nickname)) end @@ -324,9 +324,7 @@ function _M.rewrite(conf, ctx) core.log.error(" check_url_permission(", core.json.delay_encode(perm_item), ") failed, res: ",core.json.delay_encode(res)) - return 401, fail_response("Invalid user permission", - { username = username, nickname = nickname } - ) + return res.status, fail_response(res.err, { username = username, nickname = nickname }) end core.log.info("wolf-rbac check permission passed") end diff --git a/apisix/plugins/workflow.lua b/apisix/plugins/workflow.lua new file mode 100644 index 000000000000..a586a923b9b7 --- /dev/null +++ b/apisix/plugins/workflow.lua @@ -0,0 +1,151 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local limit_count = require("apisix.plugins.limit-count.init") +local expr = require("resty.expr.v1") +local ipairs = ipairs + +local schema = { + type = "object", + properties = { + rules = { + type = "array", + items = { + type = "object", + properties = { + case = { + type = "array", + items = { + type = "array", + }, + minItems = 1, + }, + actions = { + type = "array", + items = { + type = "array", + minItems = 1 + } + } + }, + required = {"case", "actions"} + } + } + } +} + +local plugin_name = "workflow" + +local _M = { + version = 0.1, + priority = 1006, + name = plugin_name, + schema = schema +} + + +local return_schema = { + type = "object", + properties = { + code = { + type = "integer", + minimum = 100, + maximum = 599 + } + }, + required = {"code"} +} + + +local function check_return_schema(conf) + local ok, err = core.schema.check(return_schema, conf) + if not ok then + return false, err + end + return true +end + + +local function exit(conf) + return conf.code, {error_msg = "rejected by workflow"} +end + + +local function rate_limit(conf, ctx) + return limit_count.rate_limit(conf, ctx) +end + + +local support_action = { + ["return"] = { + handler = exit, + check_schema = check_return_schema, + }, + ["limit-count"] = { + handler = rate_limit, + check_schema = limit_count.check_schema, + } +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + for idx, rule in ipairs(conf.rules) do + local ok, err = expr.new(rule.case) + if not ok then + return false, "failed to validate the 'case' expression: " .. err + end + + local actions = rule.actions + for _, action in ipairs(actions) do + + if not support_action[action[1]] then + return false, "unsupported action: " .. action[1] + end + + -- use the action's idx as an identifier to isolate between confs + action[2]["_vid"] = idx + local ok, err = support_action[action[1]].check_schema(action[2], plugin_name) + if not ok then + return false, "failed to validate the '" .. action[1] .. "' action: " .. err + end + end + end + + return true +end + + +function _M.access(conf, ctx) + local match_result + for _, rule in ipairs(conf.rules) do + local expr, _ = expr.new(rule.case) + match_result = expr:eval(ctx.var) + if match_result then + -- only one action is currently supported + local action = rule.actions[1] + return support_action[action[1]].handler(action[2], ctx) + end + end +end + + +return _M diff --git a/apisix/plugins/zipkin.lua b/apisix/plugins/zipkin.lua index 3fafd29f0e2a..ab284cefe9fa 100644 --- a/apisix/plugins/zipkin.lua +++ b/apisix/plugins/zipkin.lua @@ -127,6 +127,8 @@ function _M.rewrite(plugin_conf, ctx) local b3 = headers["b3"] if b3 then -- don't pass b3 header by default + -- TODO: add an option like 'single_b3_header' so we can adapt to the upstream + -- which doesn't support b3 header without always breaking down the header core.request.set_header(ctx, "b3", nil) local err @@ -158,6 +160,12 @@ function _M.rewrite(plugin_conf, ctx) ctx.opentracing_sample = tracer.sampler:sample(per_req_sample_ratio or conf.sample_ratio) if not ctx.opentracing_sample then core.request.set_header(ctx, "x-b3-sampled", "0") + -- pass the trace ids even the sample is rejected + -- see https://github.com/openzipkin/b3-propagation#why-send- + -- trace-ids-with-a-reject-sampling-decision + core.request.set_header(ctx, "x-b3-traceid", trace_id) + core.request.set_header(ctx, "x-b3-parentspanid", parent_span_id) + core.request.set_header(ctx, "x-b3-spanid", request_span_id) return end diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua index 16dccc6d8fa2..59e23542d662 100644 --- a/apisix/schema_def.lua +++ b/apisix/schema_def.lua @@ -277,20 +277,6 @@ local health_checker = { } } }, - default = { - type = "http", - healthy = { - http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, - 300, 301, 302, 303, 304, 305, 306, 307, 308 }, - successes = 0, - }, - unhealthy = { - http_statuses = { 429, 500, 503 }, - tcp_failures = 0, - timeouts = 0, - http_failures = 0, - }, - } } }, anyOf = { @@ -610,9 +596,6 @@ _M.route = { service_id = id_schema, upstream_id = id_schema, - service_protocol = { - enum = {"grpc", "http"} - }, enable_websocket = { description = "enable websocket for request", @@ -940,12 +923,12 @@ _M.id_schema = id_schema _M.plugin_injected_schema = { ["$comment"] = "this is a mark for our injected plugin schema", - disable = { - type = "boolean", - }, _meta = { type = "object", properties = { + disable = { + type = "boolean", + }, error_response = { oneOf = { { type = "string" }, @@ -956,6 +939,11 @@ _M.plugin_injected_schema = { description = "priority of plugins by customized order", type = "integer", }, + filter = { + description = "filter determines whether the plugin ".. + "needs to be executed at runtime", + type = "array", + } } } } diff --git a/apisix/ssl.lua b/apisix/ssl.lua index 7d48f308502e..26fa5c6ef359 100644 --- a/apisix/ssl.lua +++ b/apisix/ssl.lua @@ -22,6 +22,7 @@ local aes = require("resty.aes") local str_lower = string.lower local assert = assert local type = type +local ipairs = ipairs local cert_cache = core.lrucache.new { @@ -55,23 +56,32 @@ function _M.server_name() end -local _aes_128_cbc_with_iv = false +local _aes_128_cbc_with_iv_tbl local function get_aes_128_cbc_with_iv() - if _aes_128_cbc_with_iv == false then + if _aes_128_cbc_with_iv_tbl == nil then + _aes_128_cbc_with_iv_tbl = core.table.new(2, 0) local local_conf = core.config.local_conf() - local iv = core.table.try_read_attr(local_conf, "apisix", "ssl", "key_encrypt_salt") - if type(iv) =="string" and #iv == 16 then - _aes_128_cbc_with_iv = assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv = iv})) - else - _aes_128_cbc_with_iv = nil + local ivs = core.table.try_read_attr(local_conf, "apisix", "ssl", "key_encrypt_salt") + local type_ivs = type(ivs) + + if type_ivs == "table" then + for _, iv in ipairs(ivs) do + local aes_with_iv = assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv = iv})) + core.table.insert(_aes_128_cbc_with_iv_tbl, aes_with_iv) + end + elseif type_ivs == "string" then + local aes_with_iv = assert(aes:new(ivs, nil, aes.cipher(128, "cbc"), {iv = ivs})) + core.table.insert(_aes_128_cbc_with_iv_tbl, aes_with_iv) end end - return _aes_128_cbc_with_iv + + return _aes_128_cbc_with_iv_tbl end function _M.aes_encrypt_pkey(origin) - local aes_128_cbc_with_iv = get_aes_128_cbc_with_iv() + local aes_128_cbc_with_iv_tbl = get_aes_128_cbc_with_iv() + local aes_128_cbc_with_iv = aes_128_cbc_with_iv_tbl[1] if aes_128_cbc_with_iv ~= nil and core.string.has_prefix(origin, "---") then local encrypted = aes_128_cbc_with_iv:encrypt(origin) if encrypted == nil then @@ -86,32 +96,32 @@ function _M.aes_encrypt_pkey(origin) end -local function decrypt_priv_pkey(iv, key) - local decoded_key = ngx_decode_base64(key) - if not decoded_key then - core.log.error("base64 decode ssl key failed. key[", key, "] ") - return nil +local function aes_decrypt_pkey(origin) + if core.string.has_prefix(origin, "---") then + return origin end - local decrypted = iv:decrypt(decoded_key) - if not decrypted then - core.log.error("decrypt ssl key failed. key[", key, "] ") + local aes_128_cbc_with_iv_tbl = get_aes_128_cbc_with_iv() + if #aes_128_cbc_with_iv_tbl == 0 then + return origin end - return decrypted -end - - -local function aes_decrypt_pkey(origin) - if core.string.has_prefix(origin, "---") then - return origin + local decoded_key = ngx_decode_base64(origin) + if not decoded_key then + core.log.error("base64 decode ssl key failed. key[", origin, "] ") + return nil end - local aes_128_cbc_with_iv = get_aes_128_cbc_with_iv() - if aes_128_cbc_with_iv ~= nil then - return decrypt_priv_pkey(aes_128_cbc_with_iv, origin) + for _, aes_128_cbc_with_iv in ipairs(aes_128_cbc_with_iv_tbl) do + local decrypted = aes_128_cbc_with_iv:decrypt(decoded_key) + if decrypted then + return decrypted + end end - return origin + + core.log.error("decrypt ssl key failed") + + return nil end diff --git a/apisix/ssl/router/radixtree_sni.lua b/apisix/ssl/router/radixtree_sni.lua index 891d8d21dd4c..28648f8c9b18 100644 --- a/apisix/ssl/router/radixtree_sni.lua +++ b/apisix/ssl/router/radixtree_sni.lua @@ -247,7 +247,7 @@ end function _M.init_worker() local err - ssl_certificates, err = core.config.new("/ssl", { + ssl_certificates, err = core.config.new("/ssls", { automatic = true, item_schema = core.schema.ssl, checker = function (item, schema_type) @@ -264,7 +264,7 @@ end function _M.get_by_id(ssl_id) local ssl - local ssls = core.config.fetch_created_obj("/ssl") + local ssls = core.config.fetch_created_obj("/ssls") if ssls then ssl = ssls:get(tostring(ssl_id)) end diff --git a/apisix/stream/plugins/mqtt-proxy.lua b/apisix/stream/plugins/mqtt-proxy.lua index 2c421dcc2c49..f075e204db95 100644 --- a/apisix/stream/plugins/mqtt-proxy.lua +++ b/apisix/stream/plugins/mqtt-proxy.lua @@ -15,8 +15,6 @@ -- limitations under the License. -- local core = require("apisix.core") -local upstream = require("apisix.upstream") -local ipmatcher = require("resty.ipmatcher") local bit = require("bit") local ngx = ngx local str_byte = string.byte @@ -32,20 +30,7 @@ local schema = { type = "object", properties = { protocol_name = {type = "string"}, - protocol_level = {type = "integer"}, - upstream = { - description = "Deprecated. We should configure upstream outside of the plugin", - type = "object", - properties = { - ip = {type = "string"}, -- deprecated, use "host" instead - host = {type = "string"}, - port = {type = "number"}, - }, - oneOf = { - {required = {"host", "port"}}, - {required = {"ip", "port"}}, - }, - } + protocol_level = {type = "integer"} }, required = {"protocol_name", "protocol_level"}, } @@ -189,48 +174,6 @@ function _M.preread(conf, ctx) if res.client_id ~= "" then ctx.mqtt_client_id = res.client_id end - - if not conf.upstream then - return - end - - local host = conf.upstream.host - if not host then - host = conf.upstream.ip - end - - if conf.host_is_domain == nil then - conf.host_is_domain = not ipmatcher.parse_ipv4(host) - and not ipmatcher.parse_ipv6(host) - end - - if conf.host_is_domain then - local ip, err = core.resolver.parse_domain(host) - if not ip then - core.log.error("failed to parse host ", host, ", err: ", err) - return 503 - end - - host = ip - end - - local up_conf = { - type = "roundrobin", - nodes = { - {host = host, port = conf.upstream.port, weight = 1}, - } - } - - local ok, err = upstream.check_schema(up_conf) - if not ok then - core.log.error("failed to check schema ", core.json.delay_encode(up_conf), - ", err: ", err) - return 503 - end - - local matched_route = ctx.matched_route - upstream.set(ctx, up_conf.type .. "#route_" .. matched_route.value.id, - ctx.conf_version, up_conf) return end diff --git a/apisix/upstream.lua b/apisix/upstream.lua index 0162ad8137ed..a2a0cd3e899a 100644 --- a/apisix/upstream.lua +++ b/apisix/upstream.lua @@ -19,7 +19,6 @@ local core = require("apisix.core") local discovery = require("apisix.discovery.init").discovery local upstream_util = require("apisix.utils.upstream") local apisix_ssl = require("apisix.ssl") -local balancer = require("ngx.balancer") local error = error local tostring = tostring local ipairs = ipairs @@ -430,7 +429,7 @@ local function check_upstream_conf(in_dp, conf) local ssl_id = conf.tls and conf.tls.client_cert_id if ssl_id then - local key = "/ssl/" .. ssl_id + local key = "/ssls/" .. ssl_id local res, err = core.etcd.get(key) if not res then return nil, "failed to fetch ssl info by " @@ -458,12 +457,6 @@ local function check_upstream_conf(in_dp, conf) end if is_http then - if conf.pass_host == "node" and conf.nodes and - not balancer.recreate_request and core.table.nkeys(conf.nodes) ~= 1 - then - return false, "only support single node for `node` mode currently" - end - if conf.pass_host == "rewrite" and (conf.upstream_host == nil or conf.upstream_host == "") then diff --git a/benchmark/run.sh b/benchmark/run.sh index 8bb1047fba17..f119afb84f48 100755 --- a/benchmark/run.sh +++ b/benchmark/run.sh @@ -68,11 +68,6 @@ else fi echo " -apisix: - admin_key: - - name: admin - key: edd1c9f034335f136f87ad84b625c8f1 - role: admin nginx_config: worker_processes: ${worker_cnt} " > conf/config.yaml @@ -86,7 +81,7 @@ sleep 3 ############################################# echo -e "\n\napisix: $worker_cnt worker + $upstream_cnt upstream + no plugin" -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": { @@ -112,7 +107,7 @@ sleep 1 ############################################# echo -e "\n\napisix: $worker_cnt worker + $upstream_cnt upstream + 2 plugins (limit-count + prometheus)" -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": { diff --git a/bin/apisix b/bin/apisix index 4583fd1b52a0..780764ae9509 100755 --- a/bin/apisix +++ b/bin/apisix @@ -42,10 +42,6 @@ if [[ -e $OR_EXEC && "$OR_VER" -ge 119 ]]; then # use the luajit of openresty echo "$LUAJIT_BIN $APISIX_LUA $*" exec $LUAJIT_BIN $APISIX_LUA $* -elif [[ "$LUA_VERSION" =~ "Lua 5.1" ]]; then - # OpenResty version is < 1.19, use Lua 5.1 by default - echo "lua $APISIX_LUA $*" - exec lua $APISIX_LUA $* else - echo "ERROR: Please check the version of OpenResty and Lua, OpenResty 1.19+ + LuaJIT or OpenResty before 1.19 + Lua 5.1 is required for Apache APISIX." + echo "ERROR: Please check the version of OpenResty and Lua, OpenResty 1.19+ + LuaJIT is required for Apache APISIX." fi diff --git a/ci/common.sh b/ci/common.sh index 6b60a30785a5..e7ecf3796684 100644 --- a/ci/common.sh +++ b/ci/common.sh @@ -79,6 +79,8 @@ install_nodejs () { mv node-v${NODEJS_VERSION}-linux-x64 ${NODEJS_PREFIX} ln -s ${NODEJS_PREFIX}/bin/node /usr/local/bin/node ln -s ${NODEJS_PREFIX}/bin/npm /usr/local/bin/npm + + npm config set registry https://registry.npmjs.org/ } set_coredns() { diff --git a/ci/init-plugin-test-service.sh b/ci/init-plugin-test-service.sh index 5f468502304d..1f973ce36f47 100755 --- a/ci/init-plugin-test-service.sh +++ b/ci/init-plugin-test-service.sh @@ -41,3 +41,8 @@ docker exec -i rmqnamesrv /home/rocketmq/rocketmq-4.6.0/bin/mqadmin updateTopic # prepare vault kv engine docker exec -i vault sh -c "VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault secrets enable -path=kv -version=1 kv" + +# wait for keycloak ready +bash -c 'while true; do curl -s localhost:8080 &>/dev/null; ret=$?; [[ $ret -eq 0 ]] && break; sleep 3; done' +docker cp ci/kcadm_configure_cas.sh apisix_keycloak_new:/tmp/ +docker exec apisix_keycloak_new bash /tmp/kcadm_configure_cas.sh diff --git a/ci/kcadm_configure_cas.sh b/ci/kcadm_configure_cas.sh new file mode 100644 index 000000000000..3486667decbb --- /dev/null +++ b/ci/kcadm_configure_cas.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +export PATH=/opt/keycloak/bin:$PATH + +kcadm.sh config credentials --server http://localhost:8080 --realm master --user admin --password admin + +kcadm.sh create realms -s realm=test -s enabled=true + +kcadm.sh create users -r test -s username=test -s enabled=true +kcadm.sh set-password -r test --username test --new-password test + +clients=("cas1" "cas2") +rootUrls=("http://127.0.0.1:1984" "http://127.0.0.2:1984") + +for i in ${!clients[@]}; do + kcadm.sh create clients -r test -s clientId=${clients[$i]} -s enabled=true \ + -s protocol=cas -s frontchannelLogout=false -s rootUrl=${rootUrls[$i]} -s 'redirectUris=["/*"]' +done diff --git a/ci/linux_openresty_1_17_runner.sh b/ci/linux_openresty_1_19_runner.sh similarity index 96% rename from ci/linux_openresty_1_17_runner.sh rename to ci/linux_openresty_1_19_runner.sh index b0cbde775e2d..ed1751308926 100755 --- a/ci/linux_openresty_1_17_runner.sh +++ b/ci/linux_openresty_1_19_runner.sh @@ -17,5 +17,5 @@ # -export OPENRESTY_VERSION=1.17.8.2 +export OPENRESTY_VERSION=1.19.3.2 . ./ci/linux_openresty_common_runner.sh diff --git a/ci/pod/docker-compose.plugin.yml b/ci/pod/docker-compose.plugin.yml index d0350860096b..4c0c4cb7e8e6 100644 --- a/ci/pod/docker-compose.plugin.yml +++ b/ci/pod/docker-compose.plugin.yml @@ -42,6 +42,28 @@ services: networks: apisix_net: + ## keycloak + # The keycloak official has two types of docker images: + # * legacy WildFly distribution + # * new Quarkus based distribution + # Here we choose new version, because it's mainstream and + # supports kcadm.sh to init the container for test. + # The original keycloak service `apisix_keycloak` is + # third-party personal customized image and for OIDC test only. + # We should unify both containers in future. + apisix_keycloak_new: + container_name: apisix_keycloak_new + image: quay.io/keycloak/keycloak:18.0.2 + # use host network because in CAS auth, + # keycloak needs to send back-channel POST to apisix. + network_mode: host + environment: + KEYCLOAK_ADMIN: admin + KEYCLOAK_ADMIN_PASSWORD: admin + restart: unless-stopped + command: ["start-dev", "--http-port 8080"] + volumes: + - /opt/keycloak-protocol-cas-18.0.2.jar:/opt/keycloak/providers/keycloak-protocol-cas-18.0.2.jar ## kafka-cluster zookeeper-server1: @@ -126,13 +148,19 @@ services: openldap: image: bitnami/openldap:2.5.8 environment: - LDAP_ADMIN_USERNAME: amdin - LDAP_ADMIN_PASSWORD: adminpassword - LDAP_USERS: user01,user02 - LDAP_PASSWORDS: password1,password2 + - LDAP_ADMIN_USERNAME=amdin + - LDAP_ADMIN_PASSWORD=adminpassword + - LDAP_USERS=user01,user02 + - LDAP_PASSWORDS=password1,password2 + - LDAP_ENABLE_TLS=yes + - LDAP_TLS_CERT_FILE=/certs/localhost_slapd_cert.pem + - LDAP_TLS_KEY_FILE=/certs/localhost_slapd_key.pem + - LDAP_TLS_CA_FILE=/certs/apisix.crt ports: - "1389:1389" - "1636:1636" + volumes: + - ./t/certs:/certs rocketmq_namesrv: @@ -191,6 +219,62 @@ services: SPLUNK_HEC_TOKEN: "BD274822-96AA-4DA6-90EC-18940FB2414C" SPLUNK_HEC_SSL: "False" + # Elasticsearch Logger Service + elasticsearch-noauth: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1 + restart: unless-stopped + ports: + - "9200:9200" + - "9300:9300" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + xpack.security.enabled: 'false' + + elasticsearch-auth: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1 + restart: unless-stopped + ports: + - "9201:9201" + - "9301:9301" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + ELASTIC_USERNAME: elastic + ELASTIC_PASSWORD: 123456 + http.port: 9201 + transport.tcp.port: 9301 + xpack.security.enabled: 'true' + + + # The function services of OpenFunction + test-header: + image: test-header-image:latest + restart: unless-stopped + ports: + - "30583:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + test-uri: + image: test-uri-image:latest + restart: unless-stopped + ports: + - "30584:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + test-body: + image: test-body-image:latest + restart: unless-stopped + ports: + - "30585:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + networks: apisix_net: diff --git a/ci/pod/nacos/service/Dockerfile b/ci/pod/nacos/service/Dockerfile index f76ba1585411..d279c74972cc 100644 --- a/ci/pod/nacos/service/Dockerfile +++ b/ci/pod/nacos/service/Dockerfile @@ -15,7 +15,7 @@ # limitations under the License. # -FROM java +FROM eclipse-temurin:8 ENV SUFFIX_NUM=${SUFFIX_NUM:-1} ENV NACOS_ADDR=${NACOS_ADDR:-127.0.0.1:8848} diff --git a/ci/pod/openfunction/build-function-image.sh b/ci/pod/openfunction/build-function-image.sh new file mode 100644 index 000000000000..3ad08447d090 --- /dev/null +++ b/ci/pod/openfunction/build-function-image.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +wget https://github.com/buildpacks/pack/releases/download/v0.27.0/pack-v0.27.0-linux.tgz +tar -zxvf pack-v0.27.0-linux.tgz + +# please update function-example/*/hello.go if you want to update function +./pack build test-uri-image --path ./ci/pod/openfunction/function-example/test-uri --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" +./pack build test-body-image --path ./ci/pod/openfunction/function-example/test-body --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" +./pack build test-header-image --path ./ci/pod/openfunction/function-example/test-header --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" diff --git a/ci/pod/openfunction/function-example/test-body/go.mod b/ci/pod/openfunction/function-example/test-body/go.mod new file mode 100644 index 000000000000..6242ced93022 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-body/go.mod @@ -0,0 +1,5 @@ +module example.com/hello + +go 1.16 + +require github.com/OpenFunction/functions-framework-go v0.3.0 diff --git a/ci/pod/openfunction/function-example/test-body/hello.go b/ci/pod/openfunction/function-example/test-body/hello.go new file mode 100644 index 000000000000..ffa7fad6bd7e --- /dev/null +++ b/ci/pod/openfunction/function-example/test-body/hello.go @@ -0,0 +1,36 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + "net/http" + "io/ioutil" + "github.com/OpenFunction/functions-framework-go/functions" +) + +func init() { + functions.HTTP("HelloWorld", HelloWorld) +} + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + body,_ := ioutil.ReadAll(r.Body) + fmt.Fprintf(w, "Hello, %s!\n", string(body)) +} diff --git a/ci/pod/openfunction/function-example/test-header/go.mod b/ci/pod/openfunction/function-example/test-header/go.mod new file mode 100644 index 000000000000..32c2cadc95a8 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-header/go.mod @@ -0,0 +1,3 @@ +module example.com/hello + +go 1.16 diff --git a/ci/pod/openfunction/function-example/test-header/hello.go b/ci/pod/openfunction/function-example/test-header/hello.go new file mode 100644 index 000000000000..418f9fb80943 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-header/hello.go @@ -0,0 +1,30 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + "net/http" +) + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + header := r.Header + fmt.Fprintf(w, "%s", header["Authorization"]) +} diff --git a/ci/pod/openfunction/function-example/test-uri/go.mod b/ci/pod/openfunction/function-example/test-uri/go.mod new file mode 100644 index 000000000000..c259999831bd --- /dev/null +++ b/ci/pod/openfunction/function-example/test-uri/go.mod @@ -0,0 +1,5 @@ +module example.com/hello + +go 1.17 + +require github.com/OpenFunction/functions-framework-go v0.4.0 diff --git a/ci/pod/openfunction/function-example/test-uri/hello.go b/ci/pod/openfunction/function-example/test-uri/hello.go new file mode 100644 index 000000000000..d726b8e59457 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-uri/hello.go @@ -0,0 +1,38 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + ofctx "github.com/OpenFunction/functions-framework-go/context" + "net/http" + + "github.com/OpenFunction/functions-framework-go/functions" +) + +func init() { + functions.HTTP("HelloWorld", HelloWorld, + functions.WithFunctionPath("/{greeting}")) +} + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + vars := ofctx.VarsFromCtx(r.Context()) + fmt.Fprintf(w, "Hello, %s!\n", vars["greeting"]) +} diff --git a/conf/config-default.yaml b/conf/config-default.yaml old mode 100644 new mode 100755 index f03d31baca3a..78bee860080e --- a/conf/config-default.yaml +++ b/conf/config-default.yaml @@ -29,17 +29,14 @@ apisix: # port: 9082 # enable_http2: true enable_admin: true - enable_admin_cors: true # Admin API support CORS response headers. enable_dev_mode: false # Sets nginx worker_processes to 1 if set to true enable_reuseport: true # Enable nginx SO_REUSEPORT switch if set to true. show_upstream_status_in_response_header: false # when true all upstream status write to `X-APISIX-Upstream-Status` otherwise only 5xx code enable_ipv6: true - config_center: etcd # etcd: use etcd to store the config value - # yaml: fetch the config value from local yaml file `/your_path/conf/apisix.yaml` #proxy_protocol: # Proxy Protocol configuration - #listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and port_admin. - # This port can only receive http request with proxy protocol, but node_listen & port_admin + #listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and admin_listen. + # This port can only receive http request with proxy protocol, but node_listen & admin_listen # can only receive http request. If you enable proxy protocol, you must use this port to # receive http request with proxy protocol #listen_https_port: 9182 # The port with proxy protocol for https @@ -71,35 +68,6 @@ apisix: - name: memory_cache memory_size: 50m - allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow - - 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default. - #- "::/64" - #admin_listen: # use a separate port - # ip: 127.0.0.1 # Specific IP, if not set, the default value is `0.0.0.0`. - # port: 9180 - #port_admin: 9180 # Not recommend: This parameter should be set via the `admin_listen`. - #https_admin: true # enable HTTPS when use a separate port for Admin API. - # Admin API will use conf/apisix_admin_api.crt and conf/apisix_admin_api.key as certificate. - admin_api_mtls: # Depends on `port_admin` and `https_admin`. - admin_ssl_cert: "" # Path of your self-signed server side cert. - admin_ssl_cert_key: "" # Path of your self-signed server side key. - admin_ssl_ca_cert: "" # Path of your self-signed ca cert.The CA is used to sign all admin api callers' certificates. - - # Default token when use API to call for Admin API. - # *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API. - # Disabling this configuration item means that the Admin API does not - # require any authentication. - admin_key: - - - name: admin - key: edd1c9f034335f136f87ad84b625c8f1 - role: admin # admin: manage all configuration data - # viewer: only can view configuration data - - - name: viewer - key: 4054f7cf07e344346cd3f287985e76a2 - role: viewer - delete_uri_tail_slash: false # delete the '/' at the end of the URI # The URI normalization in servlet is a little different from the RFC's. # See https://github.com/jakartaee/servlet/blob/master/spec/src/main/asciidoc/servlet-spec-body.adoc#352-uri-path-canonicalization, @@ -131,14 +99,11 @@ apisix: ssl: enable: true listen: # APISIX listening port in https. - - 9443 - # - port: 9444 - # enable_http2: true # If not set, the default value is `false`. + - port: 9443 + enable_http2: true # - ip: 127.0.0.3 # Specific IP, If not set, the default value is `0.0.0.0`. # port: 9445 # enable_http2: true - enable_http2: true # Not recommend: This parameter should be set via the `listen`. - # listen_port: 9443 # Not recommend: This parameter should be set via the `listen`. #ssl_trusted_certificate: /path/to/ca-cert # Specifies a file path with trusted CA certificates in the PEM format # used to verify the certificate when APISIX needs to do SSL/TLS handshaking # with external services (e.g. etcd) @@ -147,9 +112,10 @@ apisix: ssl_session_tickets: false # disable ssl_session_tickets by default for 'ssl_session_tickets' would make Perfect Forward Secrecy useless. # ref: https://github.com/mozilla/server-side-tls/issues/135 - key_encrypt_salt: edd1c9f0985e76a2 # If not set, will save origin ssl key into etcd. - # If set this, must be a string of length 16. And it will encrypt ssl key with AES-128-CBC - # !!! So do not change it after saving your ssl, it can't decrypt the ssl keys have be saved if you change !! + key_encrypt_salt: # If not set, will save origin ssl key into etcd. + - edd1c9f0985e76a2 # If set this, the key_encrypt_salt should be an array whose elements are string, and the size is also 16, and it will encrypt ssl key with AES-128-CBC + # !!! So do not change it after saving your ssl, it can't decrypt the ssl keys have be saved if you change !! + # Only use the first key to encrypt, and decrypt in the order of the array. #fallback_sni: "my.default.domain" # If set this, when the client doesn't send SNI during handshake, the fallback SNI will be used instead enable_control: true @@ -273,29 +239,8 @@ nginx_config: # config for render the template to generate n introspection: 10m access-tokens: 1m ext-plugin: 1m - kubernetes: 1m tars: 1m - -etcd: - host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. - - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, - # e.g. https://127.0.0.1:2379. - prefix: /apisix # apisix configurations prefix - timeout: 30 # 30 seconds - #resync_delay: 5 # when sync failed and a rest is needed, resync after the configured seconds plus 50% random jitter - #health_check_timeout: 10 # etcd retry the unhealthy nodes after the configured seconds - startup_retry: 2 # the number of retry to etcd during the startup, default to 2 - #user: root # root username for etcd - #password: 5tHkHhYkjr6cQY # root password for etcd - tls: - # To enable etcd client certificate you need to build APISIX-Base, see - # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment - #cert: /path/to/cert # path of certificate used by the etcd client - #key: /path/to/key # path of key used by the etcd client - - verify: true # whether to verify the etcd endpoint certificate when setup a TLS connection to etcd, - # the default value is true, e.g. the certificate will be verified strictly. - #sni: # the SNI for etcd TLS requests. If missed, the host part of the URL will be used. + cas-auth: 10m # HashiCorp Vault storage backend for sensitive data retrieval. The config shows an example of what APISIX expects if you # wish to integrate Vault for secret (sensetive string, public private keys etc.) retrieval. APISIX communicates with Vault @@ -314,6 +259,12 @@ etcd: # dns: # servers: # - "127.0.0.1:8600" # use the real address of your dns server +# order: # order in which to try different dns record types when resolving +# - last # "last" will try the last previously successful type for a hostname. +# - SRV +# - A +# - AAAA +# - CNAME # eureka: # host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster. # - "http://127.0.0.1:8761" @@ -324,6 +275,112 @@ etcd: # connect: 2000 # default 2000ms # send: 2000 # default 2000ms # read: 5000 # default 5000ms +# nacos: +# host: +# - "http://${username}:${password}@${host1}:${port1}" +# prefix: "/nacos/v1/" +# fetch_interval: 30 # default 30 sec +# weight: 100 # default 100 +# timeout: +# connect: 2000 # default 2000 ms +# send: 2000 # default 2000 ms +# read: 5000 # default 5000 ms +# consul_kv: +# servers: +# - "http://127.0.0.1:8500" +# - "http://127.0.0.1:8600" +# prefix: "upstreams" +# skip_keys: # if you need to skip special keys +# - "upstreams/unused_api/" +# timeout: +# connect: 2000 # default 2000 ms +# read: 2000 # default 2000 ms +# wait: 60 # default 60 sec +# weight: 1 # default 1 +# fetch_interval: 3 # default 3 sec, only take effect for keepalive: false way +# keepalive: true # default true, use the long pull way to query consul servers +# default_server: # you can define default server when missing hit +# host: "127.0.0.1" +# port: 20999 +# metadata: +# fail_timeout: 1 # default 1 ms +# weight: 1 # default 1 +# max_fails: 1 # default 1 +# dump: # if you need, when registered nodes updated can dump into file +# path: "logs/consul_kv.dump" +# expire: 2592000 # unit sec, here is 30 day +# kubernetes: +# ### kubernetes service discovery both support single-cluster and multi-cluster mode +# ### applicable to the case where the service is distributed in a single or multiple kubernetes clusters. +# +# ### single-cluster mode ### +# service: +# schema: https #apiserver schema, options [http, https], default https +# host: ${KUBERNETES_SERVICE_HOST} #apiserver host, options [ipv4, ipv6, domain, environment variable], default ${KUBERNETES_SERVICE_HOST} +# port: ${KUBERNETES_SERVICE_PORT} #apiserver port, options [port number, environment variable], default ${KUBERNETES_SERVICE_PORT} +# client: +# # serviceaccount token or path of serviceaccount token_file +# token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} +# # token: |- +# # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif +# # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI +# # kubernetes discovery plugin support use namespace_selector +# # you can use one of [equal, not_equal, match, not_match] filter namespace +# namespace_selector: +# # only save endpoints with namespace equal default +# equal: default +# # only save endpoints with namespace not equal default +# #not_equal: default +# # only save endpoints with namespace match one of [default, ^my-[a-z]+$] +# #match: +# #- default +# #- ^my-[a-z]+$ +# # only save endpoints with namespace not match one of [default, ^my-[a-z]+$ ] +# #not_match: +# #- default +# #- ^my-[a-z]+$ +# # kubernetes discovery plugin support use label_selector +# # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels +# label_selector: |- +# first="a",second="b" +# # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint +# shared_size: 1m #default 1m +# ### single-cluster mode ### +# +# ### multi-cluster mode ### +# - id: release # a custom name refer to the cluster, pattern ^[a-z0-9]{1,8} +# service: +# schema: https #apiserver schema, options [http, https], default https +# host: ${KUBERNETES_SERVICE_HOST} #apiserver host, options [ipv4, ipv6, domain, environment variable] +# port: ${KUBERNETES_SERVICE_PORT} #apiserver port, options [port number, environment variable] +# client: +# # serviceaccount token or path of serviceaccount token_file +# token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} +# # token: |- +# # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif +# # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI +# # kubernetes discovery plugin support use namespace_selector +# # you can use one of [equal, not_equal, match, not_match] filter namespace +# namespace_selector: +# # only save endpoints with namespace equal default +# equal: default +# # only save endpoints with namespace not equal default +# #not_equal: default +# # only save endpoints with namespace match one of [default, ^my-[a-z]+$] +# #match: +# #- default +# #- ^my-[a-z]+$ +# # only save endpoints with namespace not match one of [default, ^my-[a-z]+$ ] +# #not_match: +# #- default +# #- ^my-[a-z]+$ +# # kubernetes discovery plugin support use label_selector +# # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels +# label_selector: |- +# first="a",second="b" +# # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint +# shared_size: 1m #default 1m +# ### multi-cluster mode ### graphql: max_size: 1048576 # the maximum size limitation of graphql in bytes, default 1MiB @@ -352,6 +409,7 @@ plugins: # plugin list (sorted by priority) - uri-blocker # priority: 2900 - request-validation # priority: 2800 - openid-connect # priority: 2599 + - cas-auth # priority: 2597 - authz-casbin # priority: 2560 - authz-casdoor # priority: 2559 - wolf-rbac # priority: 2555 @@ -368,6 +426,7 @@ plugins: # plugin list (sorted by priority) - proxy-mirror # priority: 1010 - proxy-cache # priority: 1009 - proxy-rewrite # priority: 1008 + - workflow # priority: 1006 - api-breaker # priority: 1005 - limit-conn # priority: 1003 - limit-count # priority: 1002 @@ -385,6 +444,7 @@ plugins: # plugin list (sorted by priority) - public-api # priority: 501 - prometheus # priority: 500 - datadog # priority: 495 + - elasticsearch-logger # priority: 413 - echo # priority: 412 - loggly # priority: 411 - http-logger # priority: 410 @@ -399,12 +459,14 @@ plugins: # plugin list (sorted by priority) - udp-logger # priority: 400 - file-logger # priority: 399 - clickhouse-logger # priority: 398 + - tencent-cloud-cls # priority: 397 #- log-rotate # priority: 100 # <- recommend to use priority (0, 100) for your custom plugins - example-plugin # priority: 0 - aws-lambda # priority: -1899 - azure-functions # priority: -1900 - openwhisk # priority: -1901 + - openfunction # priority: -1902 - serverless-post-function # priority: -2000 - ext-plugin-post-req # priority: -3000 - ext-plugin-post-resp # priority: -4000 @@ -431,6 +493,7 @@ plugin_attr: log-rotate: interval: 3600 # rotate interval (unit: second) max_kept: 168 # max number of log files will be kept + max_size: -1 # max size bytes of log files to be rotated, size check would be skipped with a value less than 0 enable_compression: false # enable log file compression(gzip) or not, default false skywalking: service_name: APISIX @@ -458,6 +521,20 @@ plugin_attr: export_addr: ip: 127.0.0.1 port: 9091 + #metrics: + # http_status: + # # extra labels from nginx variables + # extra_labels: + # # the label name doesn't need to be the same as variable name + # # below labels are only examples, you could add any valid variables as you need + # - upstream_addr: $upstream_addr + # - upstream_status: $upstream_status + # http_latency: + # extra_labels: + # - upstream_addr: $upstream_addr + # bandwidth: + # extra_labels: + # - upstream_addr: $upstream_addr server-info: report_ttl: 60 # live time for server info in etcd (unit: second) dubbo-proxy: @@ -478,13 +555,61 @@ plugin_attr: # redirect: # https_port: 8443 # the default port for use by HTTP redirects to HTTPS -#deployment: -# role: traditional -# role_traditional: -# config_provider: etcd -# etcd: -# host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. -# - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, -# # e.g. https://127.0.0.1:2379. -# prefix: /apisix # configuration prefix in etcd -# timeout: 30 # 30 seconds +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + # Default token when use API to call for Admin API. + # *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API. + # Disabling this configuration item means that the Admin API does not + # require any authentication. + admin_key: + - + name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin # admin: manage all configuration data + # viewer: only can view configuration data + - + name: viewer + key: 4054f7cf07e344346cd3f287985e76a2 + role: viewer + + enable_admin_cors: true # Admin API support CORS response headers. + allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow + - 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default. + #- "::/64" + admin_listen: # use a separate port + ip: 0.0.0.0 # Specific IP, if not set, the default value is `0.0.0.0`. + port: 9180 # Specific port, which must be different from node_listen's port. + + #https_admin: true # enable HTTPS when use a separate port for Admin API. + # Admin API will use conf/apisix_admin_api.crt and conf/apisix_admin_api.key as certificate. + + admin_api_mtls: # Depends on `admin_listen` and `https_admin`. + admin_ssl_cert: "" # Path of your self-signed server side cert. + admin_ssl_cert_key: "" # Path of your self-signed server side key. + admin_ssl_ca_cert: "" # Path of your self-signed ca cert.The CA is used to sign all admin api callers' certificates. + + admin_api_version: v3 # The version of admin api, latest version is v3. + + etcd: + host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. + - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, + # e.g. https://127.0.0.1:2379. + prefix: /apisix # configuration prefix in etcd + timeout: 30 # 30 seconds + #resync_delay: 5 # when sync failed and a rest is needed, resync after the configured seconds plus 50% random jitter + #health_check_timeout: 10 # etcd retry the unhealthy nodes after the configured seconds + startup_retry: 2 # the number of retry to etcd during the startup, default to 2 + #user: root # root username for etcd + #password: 5tHkHhYkjr6cQY # root password for etcd + tls: + # To enable etcd client certificate you need to build APISIX-Base, see + # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment + #cert: /path/to/cert # path of certificate used by the etcd client + #key: /path/to/key # path of key used by the etcd client + + verify: true # whether to verify the etcd endpoint certificate when setup a TLS connection to etcd, + # the default value is true, e.g. the certificate will be verified strictly. + #sni: # the SNI for etcd TLS requests. If missed, the host part of the URL will be used. diff --git a/conf/config.yaml b/conf/config.yaml index 421ac0912aa6..a77ce21e6238 100644 --- a/conf/config.yaml +++ b/conf/config.yaml @@ -17,13 +17,21 @@ # If you want to set the specified configuration value, you can set the new # in this file. For example if you want to specify the etcd address: # -# etcd: +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: # host: # - http://127.0.0.1:2379 # # To configure via environment variables, you can use `${{VAR}}` syntax. For instance: # -# etcd: +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: # host: # - http://${{ETCD_HOST}}:2379 # @@ -34,14 +42,22 @@ # Also, If you want to use default value when the environment variable not set, # Use `${{VAR:=default_value}}` instead. For instance: # -# etcd: +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: # host: # - http://${{ETCD_HOST:=localhost}}:2379 # # This will find environment variable `ETCD_HOST` first, and if it's not exist it will use `localhost` as default value. # -apisix: - admin_key: - - name: admin - key: edd1c9f034335f136f87ad84b625c8f1 # using fixed API token has security risk, please update it when you deploy to production environment - role: admin +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 # using fixed API token has security risk, please update it when you deploy to production environment + role: admin diff --git a/docs/assets/other/json/apisix-grafana-dashboard.json b/docs/assets/other/json/apisix-grafana-dashboard.json index 247d9b3bc152..1ea90c10ca34 100644 --- a/docs/assets/other/json/apisix-grafana-dashboard.json +++ b/docs/assets/other/json/apisix-grafana-dashboard.json @@ -1622,6 +1622,111 @@ "timeShift": null, "title": "Nginx metric errors", "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "The free space percent of each nginx shared DICT since APISIX start", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 57 + }, + "hiddenSeries": false, + "id": 35, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(apisix_shared_dict_free_space_bytes * 100) / on (name) apisix_shared_dict_capacity_bytes", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Nginx shared dict free space percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:117", + "decimals": null, + "format": "percent", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:118", + "decimals": null, + "format": "Misc", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "refresh": "5s", diff --git a/docs/en/latest/FAQ.md b/docs/en/latest/FAQ.md index ebf7f0253df0..b8aaf2b0d464 100644 --- a/docs/en/latest/FAQ.md +++ b/docs/en/latest/FAQ.md @@ -118,7 +118,7 @@ There are two different ways to achieve this in Apache APISIX: 1. Using the `vars` field in a [Route](terminology/route.md): ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "vars": [ @@ -131,7 +131,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 } }' -curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "vars": [ @@ -158,7 +158,7 @@ Apache APISIX provides several different ways to achieve this: 1. Setting `http_to_https` to `true` in the [redirect](plugins/redirect.md) Plugin: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "host": "foo.com", @@ -173,7 +173,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 2. Advanced routing with `vars` in the redirect Plugin: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "host": "foo.com", @@ -196,7 +196,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f03433 3. Using the `serverless` Plugin: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": { @@ -267,15 +267,16 @@ To configure Apache APISIX to listen on multiple ports, you can: - 9082 ``` - Similarly for HTTPS requests, modify the parameter `ssl.listen_port` in `conf/config.yaml`: + Similarly for HTTPS requests, modify the parameter `ssl.listen` in `conf/config.yaml`: ``` apisix: ssl: - listen_port: - - 9443 - - 9444 - - 9445 + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 ``` 2. Reload or restart Apache APISIX. @@ -364,8 +365,11 @@ You can follow the steps below to configure this: 1. Configure different ports for Apache APISIX proxy and Admin API. Or, disable the Admin API. ```yaml -apisix: - port_admin: 9180 # use a separate port +deployment: + admin: + admin_listen: # use a separate port + ip: 127.0.0.1 + port: 9180 ``` 2. Add a proxy Route for the Apache APISIX dashboard: @@ -395,7 +399,7 @@ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f03433 You can use the `vars` field in a Route for matching regular expressions: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/*", "vars": [ @@ -431,7 +435,7 @@ For more info on using `vars` refer to [lua-resty-expr](https://github.com/api7/ Yes. The example below shows configuring the FQDN `httpbin.default.svc.cluster.local` (a Kubernetes service): ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/ip", "upstream": { @@ -469,7 +473,7 @@ apisix: Now, to access the Admin API: ```shell -$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' +$ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' { "uris":[ "/*" ], "name":"admin-token-test", @@ -498,9 +502,10 @@ By default, Apache APISIX only allows IPs in the range `127.0.0.0/24` to access To allow IPs in all ranges, you can update your configuration file as show below and restart or reload Apache APISIX. ```yaml -apisix: - allow_admin: - - 0.0.0.0/0 +deployment: + admin: + allow_admin: + - 0.0.0.0/0 ``` **Note**: This should only be used in non-production environments to allow all clients to access Apache APISIX and is not safe for production environments. Always authorize specific IP addresses or address ranges for production environments. @@ -532,7 +537,7 @@ You can check [this post](https://juejin.cn/post/6965778290619449351) for a more To strip a prefix from a path in your route, like to take `/foo/get` and strip it to `/get`, you can use the [proxy-rewrite](plugins/proxy-rewrite.md) Plugin: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/foo/*", "plugins": { @@ -591,6 +596,87 @@ The differences between the two are described in the table below: | Used when there are property changes that needs to be propagated across all configuration instances of a Plugin. | Used when you need to reuse a common set of configuration instances so that it can be extracted to a `plugin-config` and bound to different Routes. | | Takes effect on all the entities bound to the configuration instances of the Plugin. | Takes effect on Routes bound to the `plugin-config`. | +## After deploying Apache APISIX, how to detect the survival of the APISIX data plane? + +You can create a route named `health-info` and enable the [fault-injection](https://apisix.apache.org/docs/apisix/plugins/fault-injection/) plugin (where YOUR-TOKEN is the user's token; 127.0.0.1 is the IP address of the control plane, which can be modified by yourself): + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/health-info \ +-H 'X-API-KEY: YOUR-TOKEN' -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "fine" + } + } + }, + "uri": "/status" +}' +```` + +Verification: + +Access the `/status` of the Apache APISIX data plane to detect APISIX. If the response code is 200, it means APISIX is alive. + +:::note + +This method only detects whether the APISIX data plane is alive or not. It does not mean that the routing and other functions of APISIX are normal. These require more routing-level detection. + +::: + +## What are the scenarios with high APISIX latency related to [etcd](https://etcd.io/) and how to fix them? + +etcd is the data storage component of apisix, and its stability is related to the stability of APISIX. + +In actual scenarios, if APISIX uses a certificate to connect to etcd through HTTPS, the following two problems of high latency for data query or writing may occur: + +1. Query or write data through APISIX Admin API. +2. In the monitoring scenario, Prometheus crawls the APISIX data plane Metrics API timeout. + +These problems related to higher latency seriously affect the service stability of APISIX, and the reason why such problems occur is mainly because etcd provides two modes of operation: HTTP (HTTPS) and gRPC. And APISIX uses the HTTP (HTTPS) protocol to operate etcd. +In this scenario, etcd has a bug about HTTP/2: if etcd is operated over HTTPS (HTTP is not affected), the upper limit of HTTP/2 connections is the default `250` in Golang. Therefore, when the number of APISIX data plane nodes is large, once the number of connections between all APISIX nodes and etcd exceeds this upper limit, the response of APISIX API interface will be very slow. + +In Golang, the default upper limit of HTTP/2 connections is `250`, the code is as follows: + +```go +package http2 + +import ... + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 +) + +``` + +etcd officially maintains two main branches, `3.4` and `3.5`. In the `3.4` series, the recently released `3.4.20` version has fixed this issue. As for the `3.5` version, the official is preparing to release the `3.5.5` version a long time ago, but it has not been released as of now (2022.09.13). So, if you are using etcd version less than `3.5.5`, you can refer to the following ways to solve this problem: + +1. Change the communication method between APISIX and etcd from HTTPS to HTTP. +2. Roll back the etcd to `3.4.20`. +3. Clone the etcd source code and compile the `release-3.5` branch directly (this branch has fixed the problem of HTTP/2 connections, but the new version has not been released yet). + +The way to recompile etcd is as follows: + +```shell +git checkout release-3.5 +make GOOS=linux GOARCH=amd64 +``` + +The compiled binary is in the bin directory, replace it with the etcd binary of your server environment, and then restart etcd: + +For more information, please refer to: + +- [when etcd node have many http long polling connections, it may cause etcd to respond slowly to http requests.](https://github.com/etcd-io/etcd/issues/14185) +- [bug: when apisix starts for a while, its communication with etcd starts to time out](https://github.com/apache/apisix/issues/7078) +- [the prometheus metrics API is tool slow](https://github.com/apache/apisix/issues/7353) +- [Support configuring `MaxConcurrentStreams` for http2](https://github.com/etcd-io/etcd/pull/14169) + ## Where can I find more answers? You can find more answers on: diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md index deaf1c4c7ce1..e8173e198bf3 100644 --- a/docs/en/latest/admin-api.md +++ b/docs/en/latest/admin-api.md @@ -23,9 +23,123 @@ title: Admin API The Admin API lets users control their deployed Apache APISIX instance. The [architecture design](./architecture-design/apisix.md) gives an idea about how everything fits together. -By default, the Admin API listens to port `9080` (`9443` for HTTPS) when APISIX is launched. This can be changed by modifying your configuration file ([conf/config.yaml](https://github.com/apache/apisix/blob/master/conf/config.yaml)). +By default, the Admin API listens to port `9180` when APISIX is launched. This can be changed by modifying your configuration file ([conf/config.yaml](https://github.com/apache/apisix/blob/master/conf/config.yaml)). -**Note**: Mentions of `X-API-KEY` in this document refers to `apisix.admin_key.key`—the access token for Admin API—in your configuration file. +**Note**: Mentions of `X-API-KEY` in this document refers to `deployment.admin.admin_key.key`—the access token for Admin API—in your configuration file. + +## V3 + +The Admin API has made some breaking changes in V3 version, as well as supporting additional features. + +### Support new response body format + +1. Remove `action` field in response body; +2. Adjust the response body structure when fetching the list of resources, the new response body structure like: + +Return single resource: + +```json +{ + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 +} +``` + +Return multiple resources: + +```json +{ + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 +} +``` + +### Support paging query + +Paging query is supported when getting the resource list, paging parameters include: + +| parameter | Default | Valid range | Description | +| --------- | ------ | ----------- | ---------------------------- | +| page | 1 | [1, ...] | Number of pages | +| page_size | | [10, 500] | Number of resources per page | + +The example is as follows: + +```shell +$ curl "http://127.0.0.1:9180/apisix/admin/routes?page=1&page_size=10" \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X GET -i -d ' +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +Resources that support paging queries: + +- Consumer +- Global Rules +- Plugin Config +- Proto +- Route +- Service +- SSL +- Stream Route +- Upstream + +### Support filtering query + +When getting a list of resources, it supports filtering resources based on `name`, `label`, `uri`. + +| parameter | parameter | +| --------- | ------------------------------------------------------------ | +| name | Query resource by their `name`, which will not appear in the query results if the resource itself does not have `name`. | +| label | Query resource by their `label`, which will not appear in the query results if the resource itself does not have `label`. | +| uri | Supported on Route resources only. If the `uri` of a Route is equal to the uri of the query or if the `uris` contains the uri of the query, the Route resource appears in the query results. | + +When multiple filter parameters are enabled, use the intersection of the query results for different filter parameters. + +The following example will return a list of routes, and all routes in the list satisfy: the `name` of the route contains the string "test", the `uri` contains the string "foo", and there is no restriction on the `label` of the route, since the label of the query is the empty string. + +```shell +$ curl 'http://127.0.0.1:9180/apisix/admin/routes?name=test&uri=foo&label=' \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X GET -i -d ' +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` ## Route @@ -33,7 +147,7 @@ By default, the Admin API listens to port `9080` (`9443` for HTTPS) when APISIX [Routes](./terminology/route.md) match the client's request based on defined rules, loads and executes the corresponding [plugins](#plugin), and forwards the request to the specified [Upstream](#upstream). -**Note**: When the Admin API is enabled, to avoid conflicts with your design API, use a different port for the Admin API. This can be set in your configuration file by changing the `port_admin` key. +**Note**: When the Admin API is enabled, to avoid conflicts with your design API, use a different port for the Admin API. This can be set in your configuration file by changing the `admin_listen` key. ### Request Methods @@ -111,7 +225,7 @@ Example API usage: ```shell # Create a route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "hosts": ["foo.com", "*.bar.com"], @@ -131,7 +245,7 @@ Date: Sat, 31 Aug 2019 01:17:15 GMT ... # Create a route expires after 60 seconds, then it's deleted automatically -$ curl http://127.0.0.1:9080/apisix/admin/routes/2?ttl=60 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl 'http://127.0.0.1:9180/apisix/admin/routes/2?ttl=60' -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/aa/index.html", "upstream": { @@ -148,7 +262,7 @@ Date: Sat, 31 Aug 2019 01:17:15 GMT # Add an upstream node to the Route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -167,7 +281,7 @@ After successful execution, upstream nodes will be updated to: # Update the weight of an upstream node to the Route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -186,7 +300,7 @@ After successful execution, upstream nodes will be updated to: # Delete an upstream node for the Route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -204,7 +318,7 @@ After successful execution, upstream nodes will be updated to: # Replace methods of the Route -- array -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '{ +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '{ "methods": ["GET", "POST"] }' HTTP/1.1 200 OK @@ -215,7 +329,7 @@ After successful execution, methods will not retain the original data, and the e # Replace upstream nodes of the Route -- sub path -$ curl http://127.0.0.1:9080/apisix/admin/routes/1/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -229,7 +343,7 @@ After successful execution, nodes will not retain the original data, and the ent # Replace methods of the Route -- sub path -$ curl http://127.0.0.1:9080/apisix/admin/routes/1/methods -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d'["POST", "DELETE", " PATCH"]' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1/methods -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d'["POST", "DELETE", " PATCH"]' HTTP/1.1 200 OK ... @@ -238,7 +352,7 @@ After successful execution, methods will not retain the original data, and the e # disable route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "status": 0 }' @@ -252,7 +366,7 @@ After successful execution, status nodes will be updated to: # enable route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "status": 1 }' @@ -326,7 +440,7 @@ Example configuration: Example API usage: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "plugins": { "limit-count": { @@ -350,7 +464,7 @@ HTTP/1.1 201 Created # Add an upstream node to the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -369,7 +483,7 @@ After successful execution, upstream nodes will be updated to: # Update the weight of an upstream node to the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -388,7 +502,7 @@ After successful execution, upstream nodes will be updated to: # Delete an upstream node for the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -406,7 +520,7 @@ After successful execution, upstream nodes will be updated to: # Replace upstream nodes of the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -467,7 +581,7 @@ When bound to a Route or Service, the Authentication Plugin infers the Consumer Example API usage: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "username": "jack", "plugins": { @@ -486,7 +600,7 @@ HTTP/1.1 200 OK Date: Thu, 26 Dec 2019 08:17:49 GMT ... -{"node":{"value":{"username":"jack","plugins":{"key-auth":{"key":"auth-one"},"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":64,"key":"\/apisix\/consumers\/jack","modifiedIndex":64},"prevNode":{"value":"{\"username\":\"jack\",\"plugins\":{\"key-auth\":{\"key\":\"auth-one\"},\"limit-count\":{\"time_window\":60,\"count\":2,\"rejected_code\":503,\"key\":\"remote_addr\",\"policy\":\"local\"}}}","createdIndex":63,"key":"\/apisix\/consumers\/jack","modifiedIndex":63},"action":"set"} +{"node":{"value":{"username":"jack","plugins":{"key-auth":{"key":"auth-one"},"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":64,"key":"\/apisix\/consumers\/jack","modifiedIndex":64},"prevNode":{"value":"{\"username\":\"jack\",\"plugins\":{\"key-auth\":{\"key\":\"auth-one\"},\"limit-count\":{\"time_window\":60,\"count\":2,\"rejected_code\":503,\"key\":\"remote_addr\",\"policy\":\"local\"}}}","createdIndex":63,"key":"\/apisix\/consumers\/jack","modifiedIndex":63}} ``` Since `v2.2`, we can bind multiple authentication plugins to the same consumer. @@ -524,7 +638,7 @@ In addition to the equalization algorithm selections, Upstream also supports pas | Name | Optional | Description | Example | | --------------------------- | ------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | | type | required | Load balancing algorithm to be used. | | -| nodes | required, can't be used with `service_name` | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80` | +| nodes | required, can't be used with `service_name` | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. For hash table case, if the key is IPv6 address with port, then the IPv6 address must be quoted with square brackets. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80`, `[::1]:80` | | service_name | required, can't be used with `nodes` | Service name used for [service discovery](discovery.md). | `a-bootiful-client` | | discovery_type | required, if `service_name` is used | The type of service [discovery](discovery.md). | `eureka` | | hash_on | optional | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | | @@ -604,7 +718,7 @@ Example 1: Create an Upstream and modify the data in `nodes` ```shell # Create upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' { "type":"roundrobin", "nodes":{ @@ -616,7 +730,7 @@ HTTP/1.1 201 Created # Add a node to the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1981": 1 @@ -633,7 +747,7 @@ After successful execution, nodes will be updated to: # Update the weight of a node to the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1981": 10 @@ -650,7 +764,7 @@ After successful execution, nodes will be updated to: # Delete a node for the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1980": null @@ -666,7 +780,7 @@ After successful execution, nodes will be updated to: # Replace the nodes of the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -685,7 +799,7 @@ Example 2: Proxy client request to `https` Upstream service 1. Create a route and configure the upstream scheme as `https`. ```shell -$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/get", "upstream": { @@ -766,17 +880,17 @@ Currently, the response is returned from etcd. ## SSL -**API**:/apisix/admin/ssl/{id} +**API**:/apisix/admin/ssls/{id} ### Request Methods | Method | Request URI | Request Body | Description | | ------ | ---------------------- | ------------ | ----------------------------------------------- | -| GET | /apisix/admin/ssl | NULL | Fetches a list of all configured SSL resources. | -| GET | /apisix/admin/ssl/{id} | NULL | Fetch specified resource by id. | -| PUT | /apisix/admin/ssl/{id} | {...} | Creates a resource with the specified id. | -| POST | /apisix/admin/ssl | {...} | Creates a resource and assigns a random id. | -| DELETE | /apisix/admin/ssl/{id} | NULL | Removes the resource with the specified id. | +| GET | /apisix/admin/ssls | NULL | Fetches a list of all configured SSL resources. | +| GET | /apisix/admin/ssls/{id} | NULL | Fetch specified resource by id. | +| PUT | /apisix/admin/ssls/{id} | {...} | Creates a resource with the specified id. | +| POST | /apisix/admin/ssls | {...} | Creates a resource and assigns a random id. | +| DELETE | /apisix/admin/ssls/{id} | NULL | Removes the resource with the specified id. | ### Request Body Parameters @@ -881,7 +995,7 @@ A JSON object defined according to the `metadata_schema` of the Plugin ({plugin_ Example Configuration: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/example-plugin -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/example-plugin -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' { "skey": "val", "ikey": 1 @@ -911,11 +1025,11 @@ The Plugin ({plugin_name}) of the data structure. Example API usage: ```shell -$ curl "http://127.0.0.1:9080/apisix/admin/plugins/list" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +$ curl "http://127.0.0.1:9180/apisix/admin/plugins/list" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ["zipkin","request-id",...] -$ curl "http://127.0.0.1:9080/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -{"properties":{"disable":{"type":"boolean"}},"additionalProperties":false,"type":"object"} +$ curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +{"$comment":"this is a mark for our injected plugin schema","properties":{"header":{"default":"apikey","type":"string"},"hide_credentials":{"default":false,"type":"boolean"},"_meta":{"properties":{"filter":{"type":"array","description":"filter determines whether the plugin needs to be executed at runtime"},"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"priority":{"type":"integer","description":"priority of plugins by customized order"}},"type":"object"},"query":{"default":"apikey","type":"string"}},"type":"object"} ``` **API**: /apisix/admin/plugins?all=true diff --git a/docs/en/latest/apisix-variable.md b/docs/en/latest/apisix-variable.md index 3da45f213ac5..9c0f01b5e62d 100644 --- a/docs/en/latest/apisix-variable.md +++ b/docs/en/latest/apisix-variable.md @@ -1,5 +1,10 @@ --- title: APISIX variable +keywords: + - Apache APISIX + - API Gateway + - APISIX variable +description: This article describes the variables supported by Apache APISIX. --- -Besides [Nginx variable](http://nginx.org/en/docs/varindex.html), APISIX also provides +## Description + +Besides [NGINX variable](http://nginx.org/en/docs/varindex.html), APISIX also provides additional variables. -List in alphabetical order: +## List of variables -| Variable Name | Origin | Description | Example | -|------------------|---------|--------------------| --------- | -| balancer_ip | core | the IP of picked upstream server | 1.1.1.1 | -| balancer_port | core | the port of picked upstream server | 80 | -| consumer_name | core | username of `consumer` | | -| graphql_name | core | the [operation name](https://graphql.org/learn/queries/#operation-name) of GraphQL | HeroComparison | -| graphql_operation | core | the operation type of GraphQL | mutation | -| graphql_root_fields | core | the top level fields of GraphQL | ["hero"] | -| mqtt_client_id | mqtt-proxy | the client id in MQTT protocol | | -| route_id | core | id of `route` | | -| route_name | core | name of `route` | | -| service_id | core | id of `service` | | -| service_name | core | name of `service` | | -| redis_cmd_line | Redis | the content of Redis command | | -| rpc_time | xRPC | time spent at the rpc request level | | +| Variable Name | Origin | Description | Example | +|-------------------- | ---------- | ----------------------------------------------------------------------------------- | ------------- | +| balancer_ip | core | The IP of picked upstream server. | 192.168.1.2 | +| balancer_port | core | The port of picked upstream server. | 80 | +| consumer_name | core | Username of Consumer. | | +| graphql_name | core | The [operation name](https://graphql.org/learn/queries/#operation-name) of GraphQL. | HeroComparison | +| graphql_operation | core | The operation type of GraphQL. | mutation | +| graphql_root_fields | core | The top level fields of GraphQL. | ["hero"] | +| mqtt_client_id | mqtt-proxy | The client id in MQTT protocol. | | +| route_id | core | Id of Route. | | +| route_name | core | Name of Route. | | +| service_id | core | Id of Service. | | +| service_name | core | Name of Service. | | +| redis_cmd_line | Redis | The content of Redis command. | | +| rpc_time | xRPC | Time spent at the rpc request level. | | -You can also [register your own variable](./plugin-develop.md#register-custom-variable). +You can also register your own [variable](./plugin-develop.md#register-custom-variable). diff --git a/docs/en/latest/architecture-design/apisix.md b/docs/en/latest/architecture-design/apisix.md index 8bef62289192..2e76aab68941 100644 --- a/docs/en/latest/architecture-design/apisix.md +++ b/docs/en/latest/architecture-design/apisix.md @@ -1,7 +1,11 @@ --- -title: APISIX +title: Architecture +keywords: + - API gateway + - Apache APISIX + - APISIX architecture +description: Architecture of Apache APISIX—the Cloud Native API Gateway. --- - -## Apache APISIX : Software Architecture - -![flow-software-architecture](../../../assets/images/flow-software-architecture.png) - -## Plugin Loading Process - -![flow-load-plugin](../../../assets/images/flow-load-plugin.png) - -## Plugin Hierarchy Structure - -![flow-plugin-internal](../../../assets/images/flow-plugin-internal.png) - -## Configuring APISIX +APISIX is built on top of Nginx and [ngx_lua](https://github.com/openresty/lua-nginx-module) leveraging the power offered by LuaJIT. See [Why Apache APISIX chose Nginx and Lua to build API Gateway?](https://apisix.apache.org/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/). -Apache APISIX can be configured in two ways: +![flow-software-architecture](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-software-architecture.png) -1. By directly changing `conf/config.yaml`. -2. Using the `--config` or the `-c` flag to pass in the file path of your config file while starting APISIX (`apisix start -c `). +APISIX has two main parts: -Configurations can be added to this YAML file and Apache APISIX will fall back to the default configurations for anything that is not configured in this file. +1. APISIX core, Lua plugin, multi-language Plugin runtime, and the WASM plugin runtime. +2. Built-in Plugins that adds features for observability, security, traffic control, etc. -For example, to set the default listening port to 8000 while keeping other configurations as default, your configuration file (`config.yaml`) would look like: +The APISIX core handles the important functions like matching Routes, load balancing, service discovery, configuration management, and provides a management API. It also includes APISIX Plugin runtime supporting Lua and multilingual Plugins (Go, Java , Python, JavaScript, etc) including the experimental WASM Plugin runtime. -```yaml -apisix: - node_listen: 8000 # APISIX listening port -``` +APISIX also has a set of [built-in Plugins](https://apisix.apache.org/docs/apisix/plugins/batch-requests) that adds features like authentication, security, observability, etc. They are written in Lua. -Similarly, to set the listening port to 8000 and set the etcd address to `http://foo:2379` while keeping other configurations as default, your configuration file would look like: +## Request handling process -```yaml -apisix: - node_listen: 8000 # APISIX listening port +The diagram below shows how APISIX handles an incoming request and applies corresponding Plugins: -etcd: - host: "http://foo:2379" # etcd address -``` +![flow-load-plugin](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-load-plugin.png) -Default configurations of APISIX can be found in the `conf/config-default.yaml` file. +## Plugin hierarchy -**Note**: This file is bound to the APISIX source code and should **NOT** be modified. The configuration should only be changed by the methods mentioned above. +The chart below shows the order in which different types of Plugin are applied to a request: -**Note**: The `conf/nginx.conf` file is automatically generated by APISIX and should **NOT** be edited. +![flow-plugin-internal](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-plugin-internal.png) diff --git a/docs/en/latest/architecture-design/debug-mode.md b/docs/en/latest/architecture-design/debug-mode.md deleted file mode 100644 index 479bdec69bac..000000000000 --- a/docs/en/latest/architecture-design/debug-mode.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Debug Mode ---- - - - -### Basic Debug Mode - -You can enable the basic debug mode by adding this line to your `conf/debug.yaml` file. - -``` -basic: - enable: true -``` - -**Note**: Before Apache APISIX 2.10, basic debug mode was enabled by setting `apisix.enable_debug = true` in the `conf/config.yaml` file. - -For example, if we are using two plugins `limit-conn` and `limit-count` for a Route `/hello`, we will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when we enable the basic debug mode. - -```shell -$ curl http://127.0.0.1:1984/hello -i -HTTP/1.1 200 OK -Content-Type: text/plain -Transfer-Encoding: chunked -Connection: keep-alive -Apisix-Plugins: limit-conn, limit-count -X-RateLimit-Limit: 2 -X-RateLimit-Remaining: 1 -Server: openresty - -hello world -``` - -If the debug information cannot be included in a response header (say when the plugin is in a stream subsystem), the information will be logged in the error log at a `warn` level. - -### Advanced Debug Mode - -Advanced debug mode can also be enabled by modifying the configuration in the `conf/debug.yaml` file. - -Enable advanced debug mode by modifying the configuration in `conf/debug.yaml` file. - -The checker checks every second for changes to the configuration files. An `#END` flag is added to let the checker know that it should only look for changes till that point. - -The checker would only check this if the file was updated by checking its last modification time. - -| Key | Optional | Description | Default | -| ------------------------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| hook_conf.enable | required | Enable/Disable hook debug trace. Target module function's input arguments or returned value would be printed once this option is enabled. | false | -| hook_conf.name | required | The module list name of the hook which has enabled debug trace. | | -| hook_conf.log_level | required | Logging levels for input arguments & returned values. | warn | -| hook_conf.is_print_input_args | required | Enable/Disable printing input arguments. | true | -| hook_conf.is_print_return_value | required | Enable/Disable printing returned values. | true | - -Example: - -```yaml -hook_conf: - enable: false # Enable/Disable Hook Debug Trace - name: hook_phase # The Module List Name of Hook which has enabled Debug Trace - log_level: warn # Logging Levels - is_print_input_args: true # Enable/Disable Input Arguments Print - is_print_return_value: true # Enable/Disable Returned Value Print - -hook_phase: # Module Function List, Name: hook_phase - apisix: # Referenced Module Name - - http_access_phase # Function Names:Array - - http_header_filter_phase - - http_body_filter_phase - - http_log_phase -#END -``` - -### Enable Advanced Debug Mode Dynamically - -You can also enable the advanced debug mode to take effect on particular requests. - -For example, to dynamically enable advanced debugging mode on requests with a particular header name `X-APISIX-Dynamic-Debug` you can configure: - -```yaml -http_filter: - enable: true # Enable/Disable Advanced Debug Mode Dynamically - enable_header_name: X-APISIX-Dynamic-Debug # Trace for the request with this header -...... -#END -``` - -This will enable the advanced debug mode for requests like: - -```shell -curl 127.0.0.1:9090/hello --header 'X-APISIX-Dynamic-Debug: foo' -``` - -**Note**: The `apisix.http_access_phase` module cannot be hooked for dynamic rules as the advanced debug mode is enabled based on the request. diff --git a/docs/en/latest/architecture-design/deployment-role.md b/docs/en/latest/architecture-design/deployment-role.md deleted file mode 100644 index 5e750e7f17dd..000000000000 --- a/docs/en/latest/architecture-design/deployment-role.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Deployment Role ---- - - - -## Concept - -Previously, the DP (Data Plane) and the CP (Control Plane) are not separate explicitly. - -Although we clearly distinguish the different responsibilities of DP and CP in the documentation, not everyone has correctly deployed APISIX in the production environment. - -Therefore, we introduce new concepts called deployment modes/roles, to help users deploy APISIX easily and safely. - -APISIX under different deployment modes will act differently. - -The table below shows the relationship among deployment modes and roles: - -| Deployment Modes | Role | Description | -|------------------|----------------------------|------------------------------------------------------------------------------------------| -| traditional | traditional | DP + CP are deployed together by default. People need to disable `enable_admin` manually | -| decoupled | data_plane / control_plane | DP and CP are deployed independently. | -| standalone | data_plane | Only DP, load the all configurations from local yaml file | - -## Deployment Modes - -### Traditional - -![traditional](../../../assets/images/deployment-traditional.png) - -In the traditional deployment mode, one instance can be both DP & CP. - -There will be a `conf server` listens on UNIX socket and acts as a proxy between APISIX and etcd. - -Both the DP part and CP part of the instance will connect to the `conf server` via HTTP protocol. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - http://xxxx - prefix: /apisix - timeout: 30 -``` - -### Decoupled - -![decoupled](../../../assets/images/deployment-cp_and_dp.png) - -The instance deployed as data_plane will: - -1. Fetch configurations from the CP, the default port is 9280 -2. Before the DP service starts, it will perform a health check on all CP addresses - - If all CP addresses are unavailable, the startup fails and an exception message is output to the screen. - - If at least one CP address is available, print the unhealthy CP check result log, and then start the APISIX service. - - If all CP addresses are normal, start the APISIX service normally. -3. Handle user requests. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: data_plane - role_data_plane: - config_provider: control_plane - control_plane: - host: - - xxxx:9280 - timeout: 30 - certs: - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - trusted_ca_cert: /path/to/ca-cert -``` - -The instance deployed as control_plane will: - -1. Listen on 9180 by default, and provide Admin API for Admin user -2. Provide `conf server` which listens on port 9280 by default. Both the DP instances and this CP instance will connect to the `conf server` via HTTPS enforced by mTLS. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: control_plane - role_control_plan: - config_provider: etcd - conf_server: - listen: 0.0.0.0:9280 - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - client_ca_cert: /path/to/ca-cert - etcd: - host: - - https://xxxx - prefix: /apisix - timeout: 30 - certs: - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - trusted_ca_cert: /path/to/ca-cert -``` - -### Standalone - -In this mode, APISIX is deployed as DP and reads configurations from yaml file in the local file system. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: data_plane - role_data_plane: - config_provider: yaml -``` diff --git a/docs/en/latest/batch-processor.md b/docs/en/latest/batch-processor.md index a790dbcd2139..0e7020930447 100644 --- a/docs/en/latest/batch-processor.md +++ b/docs/en/latest/batch-processor.md @@ -83,7 +83,7 @@ The batch processor's configuration will be set inside the plugin's configuratio For example: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "http-logger": { diff --git a/docs/en/latest/benchmark.md b/docs/en/latest/benchmark.md index dc25c781dac4..b8482603560c 100644 --- a/docs/en/latest/benchmark.md +++ b/docs/en/latest/benchmark.md @@ -53,7 +53,7 @@ The result of Flame Graph: And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", @@ -98,7 +98,7 @@ The result of Flame Graph: And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", diff --git a/docs/en/latest/building-apisix.md b/docs/en/latest/building-apisix.md index 1fd7246e6d1b..eb7dd7fd9886 100644 --- a/docs/en/latest/building-apisix.md +++ b/docs/en/latest/building-apisix.md @@ -52,7 +52,7 @@ curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-depend Then, create a directory and set the environment variable `APISIX_VERSION`: ```shell -APISIX_VERSION='2.14.1' +APISIX_VERSION='2.99.0' mkdir apisix-${APISIX_VERSION} ``` diff --git a/docs/en/latest/certificate.md b/docs/en/latest/certificate.md index 5507e5ee3ba8..4bcfa5ae240f 100644 --- a/docs/en/latest/certificate.md +++ b/docs/en/latest/certificate.md @@ -50,7 +50,7 @@ with open(sys.argv[2]) as f: key = f.read() sni = sys.argv[3] api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ "cert": cert, "key": key, "snis": [sni], @@ -66,7 +66,7 @@ print(resp.text) ./ssl.py t.crt t.key test.com # create Router object -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["test.com"], @@ -111,7 +111,7 @@ Here is an example, note that the value we pass as `sni` is `*.test.com`. ```shell ./ssl.py t.crt t.key '*.test.com' -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["*.test.com"], @@ -171,3 +171,153 @@ private keys by `certs` and `keys`. `APISIX` will pair certificate and private key with the same indice as a SSL key pair. So the length of `certs` and `keys` must be same. + +### set up multiple CA certificates + +APISIX currently uses CA certificates in several places, such as [Protect Admin API](./mtls.md#protect-admin-api), [etcd with mTLS](./mtls.md#etcd-with-mtls), and [Deployment Modes](./architecture-design/deployment-role.md). + +In these places, `ssl_trusted_certificate` or `trusted_ca_cert` will be used to set up the CA certificate, but these configurations will eventually be translated into [lua_ssl_trusted_certificate](https://github.com/openresty/lua-nginx-module#lua_ssl_trusted_certificate) directive in OpenResty. + +If you need to set up different CA certificates in different places, then you can package these CA certificates into a CA bundle file and point to this file when you need to set up CAs. This will avoid the problem that the generated `lua_ssl_trusted_certificate` has multiple locations and overwrites each other. + +The following is a complete example to show how to set up multiple CA certificates in APISIX. + +Suppose we let client and APISIX Admin API, APISIX and ETCD communicate with each other using mTLS protocol, and currently there are two CA certificates, `foo_ca.crt` and `bar_ca.crt`, and use each of these two CA certificates to issue client and server certificate pairs, `foo_ca.crt` and its issued certificate pair are used to protect Admin API, and `bar_ca.crt` and its issued certificate pair are used to protect ETCD. + +The following table details the configurations involved in this example and what they do: + +| Configuration | Type | Description | +| ------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| foo_ca.crt | CA cert | Issues the secondary certificate required for the client to communicate with the APISIX Admin API over mTLS. | +| foo_client.crt | cert | A certificate issued by `foo_ca.crt` and used by the client to prove its identity when accessing the APISIX Admin API. | +| foo_client.key | key | Issued by `foo_ca.crt`, used by the client, the key file required to access the APISIX Admin API. | +| foo_server.crt | cert | Issued by `foo_ca.crt`, used by APISIX, corresponding to the `admin_api_mtls.admin_ssl_cert` configuration entry. | +| foo_server.key | key | Issued by `foo_ca.crt`, used by APISIX, corresponding to the `admin_api_mtls.admin_ssl_cert_key` configuration entry. | +| admin.apisix.dev | doname | Common Name used in issuing `foo_server.crt` certificate, through which the client accesses APISIX Admin API | +| bar_ca.crt | CA cert | Issues the secondary certificate required for APISIX to communicate with ETCD over mTLS. | +| bar_etcd.crt | cert | Issued by `bar_ca.crt` and used by ETCD, corresponding to the `-cert-file` option in the ETCD startup command. | +| bar_etcd.key | key | Issued by `bar_ca.crt` and used by ETCD, corresponding to the `--key-file` option in the ETCD startup command. | +| bar_apisix.crt | cert | Issued by `bar_ca.crt`, used by APISIX, corresponding to the `etcd.tls.cert` configuration entry. | +| bar_apisix.key | key | Issued by `bar_ca.crt`, used by APISIX, corresponding to the `etcd.tls.key` configuration entry. | +| etcd.cluster.dev | key | Common Name used in issuing `bar_etcd.crt` certificate, which is used as SNI when APISIX communicates with ETCD over mTLS. corresponds to `etcd.tls.sni` configuration item. | +| apisix.ca-bundle | CA bundle | Merged from `foo_ca.crt` and `bar_ca.crt`, replacing `foo_ca.crt` and `bar_ca.crt`. | + +1. Create CA bundle files + +``` +cat /path/to/foo_ca.crt /path/to/bar_ca.crt > apisix.ca-bundle +``` + +2. Start the ETCD cluster and enable client authentication + +Start by writing a `goreman` configuration named `Procfile-single-enable-mtls`, the content as: + +```text +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: etcd --name infra1 --listen-client-urls https://127.0.0.1:12379 --advertise-client-urls https://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd2: etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd3: etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +``` + +Use `goreman` to start the ETCD cluster: + +```shell +goreman -f Procfile-single-enable-mtls start > goreman.log 2>&1 & +``` + +3. Update `config.yaml` + +```yaml +deployment: + admin: + admin_key + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_ca_cert: /path/to/apisix.ca-bundle + admin_ssl_cert: /path/to/foo_server.crt + admin_ssl_cert_key: /path/to/foo_server.key + +apisix: + ssl: + ssl_trusted_certificate: /path/to/apisix.ca-bundle + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + - "https://127.0.0.1:22379" + - "https://127.0.0.1:32379" + tls: + cert: /path/to/bar_apisix.crt + key: /path/to/bar_apisix.key + sni: etcd.cluster.dev +``` + +4. Test APISIX Admin API + +Start APISIX, if APISIX starts successfully and there is no abnormal output in `logs/error.log`, it means that mTLS communication between APISIX and ETCD is normal. + +Use curl to simulate a client, communicate with APISIX Admin API with mTLS, and create a route: + +```shell +curl -vvv \ + --resolve 'admin.apisix.dev:9180:127.0.0.1' https://admin.apisix.dev:9180/apisix/admin/routes/1 \ + --cert /path/to/foo_client.crt \ + --key /path/to/foo_client.key \ + --cacert /path/to/apisix.ca-bundle \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +A successful mTLS communication between curl and the APISIX Admin API is indicated if the following SSL handshake process is output: + +```shell +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Request CERT (13): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Certificate (11): +* TLSv1.3 (OUT), TLS handshake, CERT verify (15): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +5. Verify APISIX proxy + +```shell +curl http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 298 +Connection: keep-alive +Date: Tue, 26 Jul 2022 16:31:00 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/2.14.1 + +... +``` + +APISIX proxied the request to the `/get` path of the upstream `httpbin.org` and returned `HTTP/1.1 200 OK`. The whole process is working fine using CA bundle instead of CA certificate. diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json index 46c6ab4e9ce6..5f75c3249f69 100644 --- a/docs/en/latest/config.json +++ b/docs/en/latest/config.json @@ -1,14 +1,25 @@ { - "version": "2.14.1", + "version": "2.99.0", "sidebar": [ + { + "type": "doc", + "id": "getting-started" + }, + { + "type": "doc", + "id": "installation-guide" + }, + { + "type": "doc", + "id": "architecture-design/apisix" + }, { "type": "category", - "label": "Architecture Design", + "label": "Tutorials", "items": [ - "architecture-design/apisix", - "architecture-design/plugin-config", - "architecture-design/debug-mode", - "architecture-design/deployment-role" + "tutorials/expose-api", + "tutorials/protect-api", + "tutorials/observe-your-api" ] }, { @@ -19,6 +30,7 @@ "terminology/consumer", "terminology/global-rule", "terminology/plugin", + "terminology/plugin-config", "terminology/route", "terminology/router", "terminology/script", @@ -26,14 +38,6 @@ "terminology/upstream" ] }, - { - "type": "doc", - "id": "getting-started" - }, - { - "type": "doc", - "id": "installation-guide" - }, { "type": "category", "label": "Plugins", @@ -76,6 +80,7 @@ "plugins/authz-casdoor", "plugins/wolf-rbac", "plugins/openid-connect", + "plugins/cas-auth", "plugins/hmac-auth", "plugins/authz-casbin", "plugins/ldap-auth", @@ -154,7 +159,9 @@ "plugins/google-cloud-logging", "plugins/splunk-hec-logging", "plugins/file-logger", - "plugins/loggly" + "plugins/loggly", + "plugins/elasticsearch-logger", + "plugins/tencent-cloud-cls" ] } ] @@ -166,7 +173,9 @@ "plugins/serverless", "plugins/azure-functions", "plugins/openwhisk", - "plugins/aws-lambda" + "plugins/aws-lambda", + "plugins/workflow", + "plugins/openfunction" ] }, { @@ -201,6 +210,35 @@ { "type": "doc", "id": "building-apisix" + }, + { + "type": "doc", + "id": "external-plugin" + }, + { + "type": "doc", + "id": "wasm" + }, + { + "type": "link", + "label": "CODE_STYLE", + "href": "https://github.com/apache/apisix/blob/master/CODE_STYLE.md" + }, + { + "type": "category", + "label": "internal", + "items": [ + "internal/plugin-runner", + "internal/testing-framework" + ] + }, + { + "type": "doc", + "id": "plugin-develop" + }, + { + "type": "doc", + "id": "debug-mode" } ] }, @@ -241,6 +279,10 @@ "xrpc" ] }, + { + "type": "doc", + "id": "deployment-modes" + }, { "type": "doc", "id": "health-check" @@ -281,27 +323,10 @@ "type": "doc", "id": "install-dependencies" }, - { - "type": "doc", - "id": "plugin-develop" - }, { "type": "doc", "id": "apisix-variable" }, - { - "type": "doc", - "id": "external-plugin" - }, - { - "type": "doc", - "id": "wasm" - }, - { - "type": "link", - "label": "CODE_STYLE", - "href": "https://github.com/apache/apisix/blob/master/CODE_STYLE.md" - }, { "type": "doc", "id": "aws" @@ -314,14 +339,6 @@ "type": "doc", "id": "debug-function" }, - { - "type": "category", - "label": "internal", - "items": [ - "internal/plugin-runner", - "internal/testing-framework" - ] - }, { "type": "doc", "id": "profile" diff --git a/docs/en/latest/control-api.md b/docs/en/latest/control-api.md index ff7afc28f6ff..7dd55e24fd78 100644 --- a/docs/en/latest/control-api.md +++ b/docs/en/latest/control-api.md @@ -214,7 +214,7 @@ Triggers a full garbage collection in the HTTP subsystem. ### GET /v1/routes -Introduced in [v3.0](https://github.com/apache/apisix/releases/tag/3.0). +Introduced in [v2.10.0](https://github.com/apache/apisix/releases/tag/2.10.0). Returns all configured [Routes](./terminology/route.md): @@ -254,7 +254,7 @@ Returns all configured [Routes](./terminology/route.md): ### GET /v1/route/{route_id} -Introduced in [v3.0](https://github.com/apache/apisix/releases/tag/3.0). +Introduced in [v2.10.0](https://github.com/apache/apisix/releases/tag/2.10.0). Returns the Route with the specified `route_id`: @@ -292,7 +292,7 @@ Returns the Route with the specified `route_id`: ### GET /v1/services -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Returns all the Services: @@ -340,7 +340,7 @@ Returns all the Services: ### GET /v1/service/{service_id} -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Returns the Service with the specified `service_id`: @@ -374,7 +374,7 @@ Returns the Service with the specified `service_id`: ### GET /v1/upstreams -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Dumps all Upstreams: @@ -415,7 +415,7 @@ Dumps all Upstreams: ### GET /v1/upstream/{upstream_id} -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Dumps the Upstream with the specified `upstream_id`: @@ -451,3 +451,40 @@ Dumps the Upstream with the specified `upstream_id`: "modifiedIndex":1225 } ``` + +### GET /v1/plugin_metadatas + +Introduced in [v3.0.0](https://github.com/apache/apisix/releases/tag/3.0.0). + +Dumps all plugin_metadatas: + +```json +[ + { + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" + }, + { + "ikey": 1, + "skey": "val", + "id": "example-plugin" + } +] +``` + +### GET /v1/plugin_metadata/{plugin_name} + +Introduced in [v3.0.0](https://github.com/apache/apisix/releases/tag/3.0.0). + +Dumps the metadata with the specified `plugin_name`: + +```json +{ + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" +} +``` diff --git a/docs/en/latest/debug-function.md b/docs/en/latest/debug-function.md index 1eecfbd1076e..9b9883fb2be5 100644 --- a/docs/en/latest/debug-function.md +++ b/docs/en/latest/debug-function.md @@ -34,7 +34,7 @@ In the response header of the request, through the response header of `X-APISIX- >Example 1: `502` response status code comes from `Upstream` (IP address is not available) ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "upstream": { @@ -75,7 +75,7 @@ It has a response header of `X-APISIX-Upstream-Status: 502`. >Example 2: `502` response status code comes from `APISIX` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -109,7 +109,7 @@ There is no response header for `X-APISIX-Upstream-Status`. >Example 3: `Upstream` has multiple nodes, and all nodes are unavailable ```shell -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "nodes": { "127.0.0.3:1": 1, @@ -122,7 +122,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034 ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream_id": "1" diff --git a/docs/en/latest/debug-mode.md b/docs/en/latest/debug-mode.md new file mode 100644 index 000000000000..e1438d0c3438 --- /dev/null +++ b/docs/en/latest/debug-mode.md @@ -0,0 +1,137 @@ +--- +id: debug-mode +title: Debug mode +keywords: + - API gateway + - Apache APISIX + - Debug mode +description: Guide for enabling debug mode in Apache APISIX. +--- + + + +You can use APISIX's debug mode to troubleshoot your configuration. + +## Basic debug mode + +You can enable the basic debug mode by adding this line to your debug configuration file (`conf/debug.yaml`): + +```yaml title="conf/debug.yaml" +basic: + enable: true +``` + +:::note + +For APISIX releases prior to v2.10, basic debug mode is enabled by setting `apisix.enable_debug = true` in your configuration file (`conf/config.yaml`). + +::: + +If you have configured two Plgins `limit-conn` and `limit-count` on the Route `/hello`, you will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when you enable the basic debug mode. + +```shell +curl http://127.0.0.1:1984/hello -i +``` + +```shell +HTTP/1.1 200 OK +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Apisix-Plugins: limit-conn, limit-count +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 1 +Server: openresty + +hello world +``` + +:::info IMPORTANT + +If the debug information cannot be included in a response header (for example, when the Plugin is in a stream subsystem), the debug information will be logged as an error log at a `warn` level. + +::: + +## Advanced debug mode + +You can configure advanced options in debug mode by modifying your debug configuration file (`conf/debug.yaml`). + +The following configurations are available: + +| Key | Required | Default | Description | +|---------------------------------|----------|---------|-----------------------------------------------------------------------------------------------------------------------| +| hook_conf.enable | True | false | Enables/disables hook debug trace. i.e. if enabled, will print the target module function's inputs or returned value. | +| hook_conf.name | True | | Module list name of the hook that enabled the debug trace. | +| hook_conf.log_level | True | warn | Log level for input arguments & returned values. | +| hook_conf.is_print_input_args | True | true | When set to `true` enables printing input arguments. | +| hook_conf.is_print_return_value | True | true | When set to `true` enables printing returned values. | + +:::note + +A checker would check every second for changes to the configuration file. It will only check a file if the file was updated based on its last modification time. + +You can add an `#END` flag to indicate to the checker to only look for changes until that point. + +::: + +The example below shows how you can configure advanced options in debug mode: + +```yaml title="conf/debug.yaml" +hook_conf: + enable: false # Enables/disables hook debug trace + name: hook_phase # Module list name of the hook that enabled the debug trace + log_level: warn # Log level for input arguments & returned values + is_print_input_args: true # When set to `true` enables printing input arguments + is_print_return_value: true # When set to `true` enables printing returned values + +hook_phase: # Module function list, Name: hook_phase + apisix: # Referenced module name + - http_access_phase # Function names:Array + - http_header_filter_phase + - http_body_filter_phase + - http_log_phase +#END +``` + +### Dynamically enable advanced debug mode + +You can also enable advanced debug mode only on particular requests. + +The example below shows how you can enable it on requests with the header `X-APISIX-Dynamic-Debug`: + +```yaml title="conf/debug.yaml" +http_filter: + enable: true # Enable/disable advanced debug mode dynamically + enable_header_name: X-APISIX-Dynamic-Debug # Trace for the request with this header +... +#END +``` + +This will enable the advanced debug mode only for requests like: + +```shell +curl 127.0.0.1:9090/hello --header 'X-APISIX-Dynamic-Debug: foo' +``` + +:::note + +The `apisix.http_access_phase` module cannot be hooked for this dynamic rule as the advanced debug mode is enabled based on the request. + +::: diff --git a/docs/en/latest/deployment-modes.md b/docs/en/latest/deployment-modes.md new file mode 100644 index 000000000000..645782dcd88e --- /dev/null +++ b/docs/en/latest/deployment-modes.md @@ -0,0 +1,168 @@ +--- +title: Deployment modes +keywords: + - API gateway + - Apache APISIX + - APISIX deployment modes +description: Documentation about the three deployment modes of Apache APISIX. +--- + + +APISIX has three different deployment modes for different production use cases. The table below summarises the deployment modes: + +| Deployment mode | Roles | Description | +|-----------------|----------------------------|-----------------------------------------------------------------------------------------------------------| +| traditional | traditional | Data plane and control plane are deployed together. `enable_admin` attribute should be disabled manually. | +| decoupled | data_plane / control_plane | Data plane and control plane are deployed independently. | +| standalone | data_plane | Only data plane is deployed and the configurations are loaded from a local YAML file. | + +Each of these deployment modes are explained in detail below. + +## Traditional + +In the traditional deployment mode, one instance of APISIX will be both the data plane and the control plane. + +![traditional deployment mode](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-traditional.png) + +There will be a conf server that listens on the UNIX socket and acts as a proxy between APISIX and etcd. Both the data and the control planes connect to this conf server via HTTP. + +An example configuration of the traditional deployment mode is shown below: + +```yaml title="conf/config.yaml" +apisix: + node_listen: + - port: 9080 +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_listen: + port: 9180 + etcd: + host: + - http://${IP}:${Port} + prefix: /apisix + timeout: 30 +``` + +The instance of APISIX deployed as the traditional role will: + +1. Listen on port `9080` to handle user requests, controlled by `node_listen`. +2. Listen on port `9180` to handle Admin API requests, controlled by `admin_listen`. + +## Decoupled + +In the decoupled deployment mode the data plane and control plane instances of APISIX are deployed separately. i.e one instance of APISIX is configured to be a data plane and the other to be a control plane. + +![decoupled](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-cp_and_dp.png) + +The instance of APISIX deployed as the data plane will: + +1. Fetch the configuration from the control plane. The default port is `9280`. +2. Performs a health check on all configured control plane addresses before starting the service. + 1. If the control plane addresses are unavailable, the startup fails and an exception is thrown. + 2. If at least one control plane address is available, it prints the unhealthy control planes logs, and starts the APISIX service. + 3. If all control planes are normal, APISIX service is started normally. +3. Once the service is started, it will handle the user requests. + +The example below shows the configuration of an APISIX instance as data plane in the decoupled mode: + +```yaml title="conf/config.yaml" +deployment: + role: data_plane + role_data_plane: + config_provider: control_plane + control_plane: + host: + - ${IP}:9280 + timeout: 30 + certs: + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + trusted_ca_cert: /path/to/ca-cert +``` + +The instance of APISIX deployed as the control plane will: + +1. Listen on port `9180` and handle Admin API requests. +2. Provide the conf server which will listen on port `9280`. Both the control plane and the data plane will connect to this via HTTPS enforced by mTLS. + +The example below shows the configuration of an APISIX instance as control plane in the decoupled mode: + +```yaml title="conf/config.yaml" +deployment: + role: control_plane + role_control_plan: + config_provider: etcd + conf_server: + listen: 0.0.0.0:9280 + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + client_ca_cert: /path/to/ca-cert + etcd: + host: + - https://${IP}:${Port} + prefix: /apisix + timeout: 30 + certs: + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + trusted_ca_cert: /path/to/ca-cert +``` + +:::tip + +As OpenResty <= 1.21.4 does not support sending mTLS requests, to accept connections from APISIX running on these OpenResty versions, you need to disable the client certificate verification in the control plane instance as shown below: + +```yaml title="conf/config.yaml" +deployment: + role: control_plane + role_control_plan: + config_provider: etcd + conf_server: + listen: 0.0.0.0:9280 + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + etcd: + host: + - https://${IP}:${Port} + prefix: /apisix + timeout: 30 + certs: + trusted_ca_cert: /path/to/ca-cert +``` + +::: + +## Standalone + +In the standalone deployment mode, APISIX is deployed as a data plane and it reads in configurations from a YAML file (`apisix.yaml`) in the local file system. + +This deployment mode is useful when you have to declaratively define the configuration or when you are using a different configuration center other than etcd. + +To configure APISIX in standalone mode: + +```yaml title="conf/config.yaml" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +``` diff --git a/docs/en/latest/discovery.md b/docs/en/latest/discovery.md index 5442c874eb29..9a7bbfef7742 100644 --- a/docs/en/latest/discovery.md +++ b/docs/en/latest/discovery.md @@ -51,7 +51,7 @@ It is very easy for APISIX to extend the discovery client, the basic steps are a First, create a directory `eureka` under `apisix/discovery`; -After that, add [`init.lua`](../../../apisix/discovery/eureka/init.lua) in the `apisix/discovery/eureka` directory; +After that, add [`init.lua`](https://github.com/apache/apisix/blob/master/apisix/discovery/init.lua) in the `apisix/discovery/eureka` directory; Then implement the `_M.init_worker()` function for initialization and the `_M.nodes(service_name)` function for obtaining the list of service instance nodes in `init.lua`: @@ -185,7 +185,7 @@ discovery: Here is an example of routing a request with a URL of "/user/*" to a service which named "user-service" and use eureka discovery client in the registry : ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/user/*", "upstream": { @@ -202,13 +202,13 @@ Transfer-Encoding: chunked Connection: keep-alive Server: APISIX web server -{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} ``` Because the upstream interface URL may have conflict, usually in the gateway by prefix to distinguish: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/a/*", "plugins": { @@ -223,7 +223,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f } }' -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/b/*", "plugins": { diff --git a/docs/en/latest/discovery/consul_kv.md b/docs/en/latest/discovery/consul_kv.md index 7826c66b753b..b370b6bcd38c 100644 --- a/docs/en/latest/discovery/consul_kv.md +++ b/docs/en/latest/discovery/consul_kv.md @@ -134,7 +134,7 @@ To avoid confusion, use the full consul key url path as service name in practice Here is an example of routing a request with a URL of "/*" to a service which named "http://127.0.0.1:8500/v1/kv/upstreams/webpages/" and use consul_kv discovery client in the registry : ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/*", "upstream": { @@ -166,8 +166,7 @@ The format response as below: "status": 1 }, "key": "/apisix/routes/1" - }, - "action": "set" + } } ``` diff --git a/docs/en/latest/discovery/dns.md b/docs/en/latest/discovery/dns.md index 83d306e9f8a1..884286e80ade 100644 --- a/docs/en/latest/discovery/dns.md +++ b/docs/en/latest/discovery/dns.md @@ -64,8 +64,23 @@ and `test.consul.service` be resolved as `1.1.1.1` and `1.1.1.2`, this result wi Note that all the IPs from `test.consul.service` share the same weight. The resolved records will be cached according to their TTL. -For service whose record is not in the cache, we will query it in the order of `SRV -> A -> AAAA -> CNAME`. +For service whose record is not in the cache, we will query it in the order of `SRV -> A -> AAAA -> CNAME` by default. When we refresh the cache record, we will try from the last previously successful type. +We can also customize the order by modifying the configuration file. + +```yaml +# add this to config.yaml +discovery: + dns: + servers: + - "127.0.0.1:8600" # use the real address of your dns server + order: # order in which to try different dns record types when resolving + - last # "last" will try the last previously successful type for a hostname. + - SRV + - A + - AAAA + - CNAME +``` If you want to specify the port for the upstream server, you can add it to the `service_name`: diff --git a/docs/en/latest/discovery/kubernetes.md b/docs/en/latest/discovery/kubernetes.md index 0bf743128951..04e01f6ca2b8 100644 --- a/docs/en/latest/discovery/kubernetes.md +++ b/docs/en/latest/discovery/kubernetes.md @@ -1,5 +1,12 @@ --- title: Kubernetes +keywords: + - Kubernetes + - Apache APISIX + - Service discovery + - Cluster + - API Gateway +description: This article introduce how to perform service discovery based on Kubernetes in Apache APISIX and summarize related issues. --- + +## Description + +The `cas-auth` Plugin can be used to access CAS (Central Authentication Service 2.0) IdP (Identity Provider) +to do authentication, from the SP (service provider) perspective. + +## Attributes + +| Name | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | +| `idp_uri` | string | True | URI of IdP. | +| `cas_callback_uri` | string | True | redirect uri used to callback the SP from IdP after login or logout. | +| `logout_uri` | string | True | logout uri to trigger logout. | + +## Enabling the Plugin + +You can enable the Plugin on a specific Route as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/cas1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET", "POST"], + "host" : "127.0.0.1", + "uri": "/anything/*", + "plugins": { + "cas-auth": { + "idp_uri": "http://127.0.0.1:8080/realms/test/protocol/cas", + "cas_callback_uri": "/anything/cas_callback", + "logout_uri": "/anything/logout" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' + +``` + +## Configuration description + +Once you have enabled the Plugin, a new user visiting this Route would first be processed by the `cas-auth` Plugin. +If no login session exists, the user would be redirected to the login page of `idp_uri`. + +After successfully logging in from IdP, IdP will redirect this user to the `cas_callback_uri` with +GET parameters CAS ticket specified. If the ticket gets verified, the login session would be created. + +This process is only done once and subsequent requests are left uninterrupted. +Once this is done, the user is redirected to the original URL they wanted to visit. + +Later, the user could visit `logout_uri` to start logout process. The user would be redirected to `idp_uri` to do logout. + +Note that, `cas_callback_uri` and `logout_uri` should be +either full qualified address (e.g. `http://127.0.0.1:9080/anything/logout`), +or path only (e.g. `/anything/logout`), but it is recommended to be path only to keep consistent. + +These uris need to be captured by the route where the current APISIX is located. +For example, if the `uri` of the current route is `/api/v1/*`, `cas_callback_uri` can be filled in as `/api/v1/cas_callback`. + +## Disable Plugin + +To disable the `cas-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/cas1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET", "POST"], + "uri": "/anything/*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/clickhouse-logger.md b/docs/en/latest/plugins/clickhouse-logger.md index 505a26cd3160..22786f4b1e2a 100644 --- a/docs/en/latest/plugins/clickhouse-logger.md +++ b/docs/en/latest/plugins/clickhouse-logger.md @@ -35,7 +35,8 @@ The `clickhouse-logger` Plugin is used to push logs to [ClickHouse](https://clic | Name | Type | Required | Default | Valid values | Description | |---------------|---------|----------|---------------------|--------------|----------------------------------------------------------------| -| endpoint_addr | string | True | | | ClickHouse endpoint. | +| endpoint_addr | Deprecated | True | | | Use `endpoint_addrs` instead. ClickHouse endpoints. | +| endpoint_addrs | array | True | | | ClickHouse endpoints. | | database | string | True | | | Name of the database to store the logs. | | logtable | string | True | | | Table name to store the logs. | | user | string | True | | | ClickHouse username. | @@ -63,7 +64,7 @@ Configuring the Plugin metadata is global in scope. This means that it will take The example below shows how you can configure through the Admin API: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/clickhouse-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/clickhouse-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "log_format": { "host": "$host", @@ -80,6 +81,7 @@ CREATE TABLE default.test ( `host` String, `client_ip` String, `route_id` String, + `service_id` String, `@timestamp` String, PRIMARY KEY(`@timestamp`) ) ENGINE = MergeTree() @@ -95,10 +97,11 @@ Now, if you run `select * from default.test;`, you will get the following row: ## Enabling the Plugin +If multiple endpoints are configured, they will be written randomly. The example below shows how you can enable the Plugin on a specific Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "clickhouse-logger": { @@ -106,7 +109,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 "password": "a", "database": "default", "logtable": "test", - "endpoint_addr": "http://127.0.0.1:8123" + "endpoint_addrs": ["http://127.0.0.1:8123"] } }, "upstream": { @@ -132,7 +135,7 @@ curl -i http://127.0.0.1:9080/hello To disable the `clickhouse-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/docs/en/latest/plugins/client-control.md b/docs/en/latest/plugins/client-control.md index f142401ebc96..ceebd19e017e 100644 --- a/docs/en/latest/plugins/client-control.md +++ b/docs/en/latest/plugins/client-control.md @@ -2,10 +2,9 @@ title: client-control keywords: - APISIX - - Plugin + - API Gateway - Client Control - - client-control -description: This document contains information about the Apache APISIX client-control Plugin. +description: This document describes the Apache APISIX client-control Plugin, you can use it to control NGINX behavior to handle a client request dynamically. --- + +## Description + +The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage. + +When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Type | Required | Default | Description | +| ------------- | ------- | -------- | --------------------------- | ------------------------------------------------------------ | +| endpoint_addr | string | True | | Elasticsearch API. | +| field | array | True | | Elasticsearch `field` configuration. | +| field.index | string | True | | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field). | +| field.type | string | False | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field). | +| auth | array | False | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration. | +| auth.username | string | True | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username. | +| auth.password | string | True | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password. | +| ssl_verify | boolean | False | true | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | +| timeout | integer | False | 10 | Elasticsearch send data timeout in seconds. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Enabling the Plugin + +### Full configuration + +The example below shows a complete configuration of the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services", + "type":"collector" + }, + "auth":{ + "username":"elastic", + "password":"123456" + }, + "ssl_verify":false, + "timeout": 60, + "retry_delay":1, + "buffer_duration":60, + "max_retry_count":0, + "batch_max_size":1000, + "inactive_timeout":5, + "name":"elasticsearch-logger" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +### Minimal configuration example + +The example below shows a bare minimum configuration of the Plugin on a Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services" + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +## Example usage + +Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Elasticsearch server: + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +You should be able to get the log from elasticsearch: + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "M1qAxYIBRmRqWkmH4Wya", + "_score": 1, + "_source": { + "apisix_latency": 0, + "route_id": "1", + "server": { + "version": "2.15.0", + "hostname": "apisix" + }, + "request": { + "size": 102, + "uri": "/elasticsearch.do?q=hello", + "querystring": { + "q": "hello" + }, + "headers": { + "user-agent": "curl/7.29.0", + "host": "127.0.0.1:9080", + "accept": "*/*" + }, + "url": "http://127.0.0.1:9080/elasticsearch.do?q=hello", + "method": "GET" + }, + "service_id": "", + "latency": 0, + "upstream": "127.0.0.1:1980", + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "start_time": 1661170929107, + "response": { + "size": 192, + "headers": { + "date": "Mon, 22 Aug 2022 12:22:09 GMT", + "server": "APISIX/2.15.0", + "content-type": "text/plain; charset=utf-8", + "connection": "close", + "transfer-encoding": "chunked" + }, + "status": 200 + } + } + } + ] + } +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `elasticsearch-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + + make a request to APISIX again: + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +You should be able to get this log from elasticsearch: + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 1, + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "NVqExYIBRmRqWkmH4WwG", + "_score": 1, + "_source": { + "@timestamp": "2022-08-22T20:26:31+08:00", + "client_ip": "127.0.0.1", + "host": "127.0.0.1", + "route_id": "1" + } + } + ] + } +} +``` + +### Disable Metadata + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +``` + +## Disable Plugin + +To disable the `elasticsearch-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{}, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` diff --git a/docs/en/latest/plugins/error-log-logger.md b/docs/en/latest/plugins/error-log-logger.md index cdcf47a2e551..a37a7a5ffbbd 100644 --- a/docs/en/latest/plugins/error-log-logger.md +++ b/docs/en/latest/plugins/error-log-logger.md @@ -48,10 +48,6 @@ It might take some time to receive the log data. It will be automatically sent a | clickhouse.password | String | False | | | ClickHouse password. | | clickhouse.database | String | False | | | Name of the database to store the logs. | | clickhouse.logtable | String | False | | | Table name to store the logs. | -| host | string | False | | | Deprecated. Use `tcp.host` attribute instead. IP address or the hostname of the TCP server. | -| port | integer | False | | [0,...] | Deprecated. Use `tcp.port` instead. Target Upstream port. | -| tls | boolean | False | false | | Deprecated. Use `tcp.tls` instead. When set to `true` performs SSL verification. | -| tls_server_name | string | False | | | Deprecated. Use `tcp.tls_server_name` instead. Server name for the new TLS extension SNI. | | timeout | integer | False | 3 | [1,...] | Timeout (in seconds) for the upstream to connect and send data. | | keepalive | integer | False | 30 | [1,...] | Time in seconds to keep the connection alive after sending data. | | level | string | False | WARN | ["STDERR", "EMERG", "ALERT", "CRIT", "ERR", "ERROR", "WARN", "NOTICE", "INFO", "DEBUG"] | Log level to filter the error logs. `ERR` is same as `ERROR`. | @@ -77,7 +73,7 @@ Once you have enabled the Plugin, you can configure it through the Plugin metada You can set the TCP server address by configuring the Plugin metadata as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/error-log-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "tcp": { "host": "127.0.0.1", @@ -92,7 +88,7 @@ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/error-log-logger -H 'X-A You can configure the SkyWalking OAP server address as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/error-log-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "skywalking": { "endpoint_addr":"http://127.0.0.1:12800/v3/logs" @@ -108,7 +104,7 @@ The Plugin sends the error log as a string to the `data` field of a table in you You can configure it as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/error-log-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "clickhouse": { "user": "default", diff --git a/docs/en/latest/plugins/ext-plugin-post-resp.md b/docs/en/latest/plugins/ext-plugin-post-resp.md index 9a79dba914dd..d47f6f2a5a87 100644 --- a/docs/en/latest/plugins/ext-plugin-post-resp.md +++ b/docs/en/latest/plugins/ext-plugin-post-resp.md @@ -45,10 +45,6 @@ See [External Plugin](../external-plugin.md) to learn more. Execution of External Plugins will affect the response of the current request. -External Plugin does not yet support getting request context information. - -External Plugin does not yet support getting the response body of an upstream response. - ::: ## Attributes @@ -63,7 +59,7 @@ External Plugin does not yet support getting the response body of an upstream re The example below enables the `ext-plugin-post-resp` Plugin on a specific Route: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -96,7 +92,7 @@ This will reach the configured Plugin Runner and the `ext-plugin-A` will be exec To disable the `ext-plugin-post-resp` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "upstream": { diff --git a/docs/en/latest/plugins/ext-plugin-pre-req.md b/docs/en/latest/plugins/ext-plugin-pre-req.md index efe2dbf8a8aa..3992d059eeed 100644 --- a/docs/en/latest/plugins/ext-plugin-pre-req.md +++ b/docs/en/latest/plugins/ext-plugin-pre-req.md @@ -50,7 +50,7 @@ Execution of External Plugins will affect the behavior of the current request. The example below enables the `ext-plugin-pre-req` Plugin on a specific Route: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -83,7 +83,7 @@ This will reach the configured Plugin Runner and the `ext-plugin-A` will be exec To disable the `ext-plugin-pre-req` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "upstream": { diff --git a/docs/en/latest/plugins/fault-injection.md b/docs/en/latest/plugins/fault-injection.md index 296786a28237..4aa20c6b9582 100644 --- a/docs/en/latest/plugins/fault-injection.md +++ b/docs/en/latest/plugins/fault-injection.md @@ -78,7 +78,7 @@ This means that the relationship between the first two expressions is AND, and t You can enable the `fault-injection` Plugin on a specific Route as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -101,7 +101,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 Similarly, to enable a `delay` fault: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -123,7 +123,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 You can also enable the Plugin with both `abort` and `delay` which can have `vars` for matching: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -202,7 +202,7 @@ sys 0m0.010s You can enable the `fault-injection` Plugin with the `vars` attribute to set specific rules: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -268,7 +268,7 @@ Fault Injection! To disable the `fault-injection` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/docs/en/latest/plugins/file-logger.md b/docs/en/latest/plugins/file-logger.md index 8ad5cc1dea0c..39155c472876 100644 --- a/docs/en/latest/plugins/file-logger.md +++ b/docs/en/latest/plugins/file-logger.md @@ -31,6 +31,16 @@ description: This document contains information about the Apache APISIX file-log The `file-logger` Plugin is used to push log streams to a specific location. +:::tip + +- `file-logger` plugin can count request and response data for individual routes locally, which is useful for [debugging](../debug-mode.md). +- `file-logger` plugin can get [APISIX variables](../apisix-variable.md) and [NGINX variables](http://nginx.org/en/docs/varindex.html), while `access.log` can only use NGINX variables. +- `file-logger` plugin support hot-loaded so that we can change its configuration at any time with immediate effect. +- `file-logger` plugin saves every data in JSON format. +- The user can modify the functions executed by the `file-logger` during the `log phase` to collect the information they want. + +::: + ## Attributes | Name | Type | Required | Description | @@ -48,7 +58,7 @@ You can also set the format of the logs by configuring the Plugin metadata. The The example below shows how you can configure through the Admin API: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/file-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/file-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "log_format": { "host": "$host", @@ -70,7 +80,7 @@ With this configuration, your logs would be formatted as shown below: The example below shows how you can enable the Plugin on a specific Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "file-logger": { @@ -102,7 +112,7 @@ You will be able to find the `file.log` file in the configured `logs` directory. To disable the `file-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", diff --git a/docs/en/latest/plugins/forward-auth.md b/docs/en/latest/plugins/forward-auth.md index 7aa928ce42c1..2620959a6c96 100644 --- a/docs/en/latest/plugins/forward-auth.md +++ b/docs/en/latest/plugins/forward-auth.md @@ -61,7 +61,7 @@ APISIX will generate and the send the request headers listed below to the author First, you need to setup your external authorization service. The example below uses Apache APISIX's [serverless](./serverless.md) Plugin to mock the service: ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/auth' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/auth' \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ -H 'Content-Type: application/json' \ -d '{ @@ -91,7 +91,7 @@ curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/auth' \ Now you can configure the `forward-auth` Plugin to a specific Route: ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/1' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/1' \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ -d '{ "uri": "/headers", @@ -159,7 +159,7 @@ Location: http://example.com/auth To disable the `forward-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:2379/apisix/admin/routes/1 -X PUT -d value=' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", diff --git a/docs/en/latest/plugins/google-cloud-logging.md b/docs/en/latest/plugins/google-cloud-logging.md index 19c98bbc750e..d243aca87987 100644 --- a/docs/en/latest/plugins/google-cloud-logging.md +++ b/docs/en/latest/plugins/google-cloud-logging.md @@ -39,7 +39,7 @@ This plugin also allows to push logs as a batch to your Google Cloud Logging Ser | auth_config | True | | Either `auth_config` or `auth_file` must be provided. | | auth_config.private_key | True | | Private key of the Google Cloud service account. | | auth_config.project_id | True | | Project ID in the Google Cloud service account. | -| auth_config.token_uri | False | https://oauth2.googleapis.com/token | Token URI of the Google Cloud service account. | +| auth_config.token_uri | True | https://oauth2.googleapis.com/token | Token URI of the Google Cloud service account. | | auth_config.entries_uri | False | https://logging.googleapis.com/v2/entries:write | Google Cloud Logging Service API. | | auth_config.scopes | False | ["https://www.googleapis.com/auth/logging.read", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/cloud-platform"] | Access scopes of the Google Cloud service account. See [OAuth 2.0 Scopes for Google APIs](https://developers.google.com/identity/protocols/oauth2/scopes#logging). | | auth_file | True | | Path to the Google Cloud service account authentication JSON file. Either `auth_config` or `auth_file` must be provided. | @@ -56,7 +56,7 @@ This Plugin supports using batch processors to aggregate and process entries (lo The example below shows a complete configuration of the Plugin on a specific Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "google-cloud-logging": { @@ -95,7 +95,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 The example below shows a bare minimum configuration of the Plugin on a Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "google-cloud-logging": { @@ -130,7 +130,7 @@ You can then login and view the logs in [Google Cloud Logging Service](https://c To disable the `google-cloud-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/docs/en/latest/plugins/grpc-transcode.md b/docs/en/latest/plugins/grpc-transcode.md index f198de8e2ff1..024af7994386 100644 --- a/docs/en/latest/plugins/grpc-transcode.md +++ b/docs/en/latest/plugins/grpc-transcode.md @@ -58,10 +58,10 @@ APISIX takes in an HTTP request, transcodes it and forwards it to a gRPC service Before enabling the Plugin, you have to add the content of your `.proto` or `.pb` files to APISIX. -You can use the `/admin/proto/id` endpoint and add the contents of the file to the `content` field: +You can use the `/admin/protos/id` endpoint and add the contents of the file to the `content` field: ```shell -curl http://127.0.0.1:9080/apisix/admin/proto/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/protos/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; package helloworld; @@ -122,7 +122,7 @@ api_key = "edd1c9f034335f136f87ad84b625c8f1" # use a different API key reqParam = { "content": content, } -resp = requests.put("http://127.0.0.1:9080/apisix/admin/proto/" + id, json=reqParam, headers={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/protos/" + id, json=reqParam, headers={ "X-API-KEY": api_key, }) print(resp.status_code) @@ -145,13 +145,13 @@ Response: ``` # 200 -# {"node":{"value":{"create_time":1643879753,"update_time":1643883085,"content":"CmgKEnByb3RvL2ltcG9ydC5wcm90bxIDcGtnIhoKBFVzZXISEgoEbmFtZRgBIAEoCVIEbmFtZSIeCghSZXNwb25zZRISCgRib2R5GAEgASgJUgRib2R5QglaBy4vcHJvdG9iBnByb3RvMwq9AQoPcHJvdG8vc3JjLnByb3RvEgpoZWxsb3dvcmxkGhJwcm90by9pbXBvcnQucHJvdG8iPAoHUmVxdWVzdBIdCgR1c2VyGAEgASgLMgkucGtnLlVzZXJSBHVzZXISEgoEYm9keRgCIAEoCVIEYm9keTI5CgpUZXN0SW1wb3J0EisKA1J1bhITLmhlbGxvd29ybGQuUmVxdWVzdBoNLnBrZy5SZXNwb25zZSIAQglaBy4vcHJvdG9iBnByb3RvMw=="},"key":"\/apisix\/proto\/1"},"action":"set"} +# {"node":{"value":{"create_time":1643879753,"update_time":1643883085,"content":"CmgKEnByb3RvL2ltcG9ydC5wcm90bxIDcGtnIhoKBFVzZXISEgoEbmFtZRgBIAEoCVIEbmFtZSIeCghSZXNwb25zZRISCgRib2R5GAEgASgJUgRib2R5QglaBy4vcHJvdG9iBnByb3RvMwq9AQoPcHJvdG8vc3JjLnByb3RvEgpoZWxsb3dvcmxkGhJwcm90by9pbXBvcnQucHJvdG8iPAoHUmVxdWVzdBIdCgR1c2VyGAEgASgLMgkucGtnLlVzZXJSBHVzZXISEgoEYm9keRgCIAEoCVIEYm9keTI5CgpUZXN0SW1wb3J0EisKA1J1bhITLmhlbGxvd29ybGQuUmVxdWVzdBoNLnBrZy5SZXNwb25zZSIAQglaBy4vcHJvdG9iBnByb3RvMw=="},"key":"\/apisix\/proto\/1"}} ``` Now, we can enable the `grpc-transcode` Plugin to a specific Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/111 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/111 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/grpctest", @@ -205,7 +205,7 @@ Proxy-Connection: keep-alive You can also configure the `pb_option` as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/23 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/23 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/zeebe/WorkflowInstanceCreate", @@ -253,7 +253,7 @@ Trailer: grpc-message To disable the `grpc-transcode` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/111 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/111 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/grpctest", "plugins": {}, diff --git a/docs/en/latest/plugins/grpc-web.md b/docs/en/latest/plugins/grpc-web.md index b820c623d853..4e77f2dc5c16 100644 --- a/docs/en/latest/plugins/grpc-web.md +++ b/docs/en/latest/plugins/grpc-web.md @@ -36,7 +36,7 @@ The `grpc-web` Plugin is a proxy Plugin that can process [gRPC Web](https://gith You can enable the `grpc-web` Plugin on a specific Route as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri":"/grpc/web/*", "plugins":{ @@ -79,7 +79,7 @@ The supported `Content-Type` includes `application/grpc-web`, `application/grpc- To disable the `grpc-web` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri":"/grpc/web/*", "plugins":{}, diff --git a/docs/en/latest/plugins/gzip.md b/docs/en/latest/plugins/gzip.md index 69b7df762e3a..2fe1ab8db174 100644 --- a/docs/en/latest/plugins/gzip.md +++ b/docs/en/latest/plugins/gzip.md @@ -53,7 +53,7 @@ This Plugin requires APISIX to run on [APISIX-Base](../FAQ.md#how-do-i-build-the The example below enables the `gzip` Plugin on the specified Route: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -99,7 +99,7 @@ Warning: " to save to a file. To disable the `gzip` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "upstream": { diff --git a/docs/en/latest/plugins/hmac-auth.md b/docs/en/latest/plugins/hmac-auth.md index 4205bbc83de5..c50bd57375c4 100644 --- a/docs/en/latest/plugins/hmac-auth.md +++ b/docs/en/latest/plugins/hmac-auth.md @@ -52,7 +52,7 @@ This Plugin works with a [Consumer](../terminology/consumer.md) object and a con First we enable the Plugin on a Consumer object as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -77,7 +77,7 @@ You can also use the [APISIX Dashboard](/docs/dashboard/USER_GUIDE) to complete Next, you can configure the Plugin to a Route or a Service: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -140,7 +140,52 @@ curl -i http://127.0.0.1:9080/index.html?name=james&age=36 \ -H "User-Agent: curl/7.29.0" ``` -The `signing_string` generated according to the algorithm above is: +### Explanation of signature generation formula process + +1. The default HTTP Method for the above request is GET, which gives `signing_string` as + +```plain +"GET" +``` + +2. The requested URI is `/index.html`, and the `signing_string` is obtained from the HTTP Method + \n + HTTP URI as + +```plain +"GET +/index.html" +``` + +3. The query item in the URL is `name=james&age=36`, assuming that `encode_uri_params` is false. +According to the algorithm of `canonical_query_string`, the focus is on dictionary sorting of `key` to get `age=36&name=james`. + +```plain +"GET +/index.html +age=36&name=james" +``` + +4. The `access_key` is `user-key`, and the `signing_string` is obtained from HTTP Method + \n + HTTP URI + \n + canonical_query_string + \n + access_key as + +```plain +"GET +/index.html +age=36&name=james +user-key" +``` + +5. Date is in GMT format, as in `Tue, 19 Jan 2021 11:33:20 GMT`, and the `signing_string` is obtained from the HTTP Method + \n + HTTP URI + \n + canonical_query_string + \n + access_key + \n + Date as + +```plain +"GET +/index.html +age=36&name=james +user-key +Tue, 19 Jan 2021 11:33:20 GMT" +``` + +6. `signed_headers_string` is used to specify the headers involved in the signature, which in the above example includes `User-Agent: curl/7.29.0` and `x-custom-a: test`. + +And the `signing_string` is obtained from the HTTP Method + \n + HTTP URI + \n + canonical_query_string + \n + access_key + \n + Date + \n as ```plain "GET @@ -153,8 +198,6 @@ x-custom-a:test " ``` -The last request header also needs + `\n`. - The Python code below shows how to generate the signature: ```python @@ -310,7 +353,7 @@ Accept-Ranges: bytes To disable the `hmac-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": {}, diff --git a/docs/en/latest/plugins/http-logger.md b/docs/en/latest/plugins/http-logger.md index 87ec78fa08d7..f46e2b077dda 100644 --- a/docs/en/latest/plugins/http-logger.md +++ b/docs/en/latest/plugins/http-logger.md @@ -2,10 +2,10 @@ title: http-logger keywords: - APISIX + - API 网关 - Plugin - HTTP Logger - - http-logger -description: This document contains information about the Apache APISIX http-logger Plugin. +description: This document contains information about the Apache APISIX http-logger Plugin. Using this Plugin, you can push APISIX log data to HTTP or HTTPS servers. --- - ## Example usage -The example above configures the Plugin to only allow one concurrent request. When more than one request is received, the Plugin will respond with a 503 status code: +The example above configures the Plugin to only allow one connection on this route. When more than one request is received, the Plugin will respond with a `503` HTTP status code and reject the connection: -```bash +```shell curl -i http://127.0.0.1:9080/index.html?sleep=20 & curl -i http://127.0.0.1:9080/index.html?sleep=20 ``` -```bash +```shell + +503 Service Temporarily Unavailable + +

503 Service Temporarily Unavailable

+
openresty
+ + +``` + +## Limit the number of concurrent WebSocket connections + +Apache APISIX supports WebSocket proxy, we can use `limit-conn` plugin to limit the number of concurrent WebSocket connections. + +1. Create a Route, enable the WebSocket proxy and the `limit-conn` plugin. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/ws", + "enable_websocket": true, + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +The above route enables the WebSocket proxy on `/ws`, and limits the number of concurrent WebSocket connections to 1. More than 1 concurrent WebSocket connection will return `503` to reject the request. + +2. Initiate a WebSocket request, and the connection is established successfully. + +```shell +curl --include \ + --no-buffer \ + --header "Connection: Upgrade" \ + --header "Upgrade: websocket" \ + --header "Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==" \ + --header "Sec-WebSocket-Version: 13" \ + --http1.1 \ + http://127.0.0.1:9080/ws +``` + +```shell +HTTP/1.1 101 Switching Protocols +``` + +3. Initiate the WebSocket request again in another terminal, the request will be rejected. + +```shell +HTTP/1.1 503 Service Temporarily Unavailable +··· 503 Service Temporarily Unavailable @@ -125,8 +188,9 @@ curl -i http://127.0.0.1:9080/index.html?sleep=20 To disable the `limit-conn` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. -```bash -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/en/latest/plugins/limit-count.md b/docs/en/latest/plugins/limit-count.md index 20dbfbc97dda..b3be40ea76d5 100644 --- a/docs/en/latest/plugins/limit-count.md +++ b/docs/en/latest/plugins/limit-count.md @@ -2,10 +2,9 @@ title: limit-count keywords: - APISIX - - Plugin + - API Gateway - Limit Count - - limit-count -description: This document contains information about the Apache APISIX limit-count Plugin. +description: This document contains information about the Apache APISIX limit-count Plugin, you can use it to limit the number of requests to your service by a given count per time. --- - You can also create a group to share the same counter across multiple Routes: -```bash -curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl -i http://127.0.0.1:9180/apisix/admin/services/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "limit-count": { @@ -130,34 +130,37 @@ curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f0343 Now every Route which belongs to group `services_1#1640140620` (or the service with ID `1`) will share the same counter. -```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "service_id": "1", "uri": "/hello" }' ``` -```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/2 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "service_id": "1", "uri": "/hello2" }' ``` -```bash +```shell curl -i http://127.0.0.1:9080/hello ``` -```bash +```shell HTTP/1.1 200 ... ``` You can also share the same limit counter for all your requests by setting the `key_type` to `constant`: -```bash -curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl -i http://127.0.0.1:9180/apisix/admin/services/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "limit-count": { @@ -184,8 +187,9 @@ For cluster-level traffic limiting, you can use a Redis server. The counter will The example below shows how you can use the `redis` policy: -```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -213,8 +217,9 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 Similarly you can also configure the `redis-cluster` policy: -```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -245,11 +250,11 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 The above configuration limits to 2 requests in 60 seconds. The first two requests will work and the response headers will contain the headers `X-RateLimit-Limit` and `X-RateLimit-Remaining`: -```bash +```shell curl -i http://127.0.0.1:9080/index.html ``` -```bash +```shell HTTP/1.1 200 OK Content-Type: text/html Content-Length: 13175 @@ -261,25 +266,17 @@ Server: APISIX web server When you visit for a third time in the 60 seconds, you will receive a response with 503 code: -```bash +```shell HTTP/1.1 503 Service Temporarily Unavailable Content-Type: text/html Content-Length: 194 Connection: keep-alive Server: APISIX web server - - -503 Service Temporarily Unavailable - -

503 Service Temporarily Unavailable

-
openresty
- - ``` You can also set a custom response by configuring the `rejected_msg` attribute: -```bash +```shell HTTP/1.1 503 Service Temporarily Unavailable Content-Type: text/html Content-Length: 194 @@ -293,8 +290,9 @@ Server: APISIX web server To disable the `limit-count` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. -```bash -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/en/latest/plugins/limit-req.md b/docs/en/latest/plugins/limit-req.md index 5760aa5577fb..1911a202786c 100644 --- a/docs/en/latest/plugins/limit-req.md +++ b/docs/en/latest/plugins/limit-req.md @@ -2,10 +2,10 @@ title: limit-req keywords: - APISIX - - Plugin + - API Gateway - Limit Request - limit-req -description: This document contains information about the Apache APISIX limit-req Plugin. +description: The limit-req Plugin limits the number of requests to your service using the leaky bucket algorithm. --- + +## Description + +The `openfunction` Plugin is used to integrate APISIX with [CNCF OpenFunction](https://openfunction.dev/) serverless platform. + +This Plugin can be configured on a Route and requests will be sent to the configured OpenFunction API endpoint as the upstream. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| --------------------------- | ------- | -------- | ------- | ------------ | ---------------------------------------------------------------------------------------------------------- | +| function_uri | string | True | | | function uri. For example, `https://localhost:30858/default/function-sample`. | +| ssl_verify | boolean | False | true | | When set to `true` verifies the SSL certificate. | +| authorization | object | False | | | Authorization credentials to access functions of OpenFunction. | +| authorization.service_token | string | False | | | The token format is 'xx:xx' which supports basic auth for function entry points. | +| timeout | integer | False | 3000 ms | [100, ...] ms| OpenFunction action and HTTP call timeout in ms. | +| keepalive | boolean | False | true | | When set to `true` keeps the connection alive for reuse. | +| keepalive_timeout | integer | False | 60000 ms| [1000,...] ms| Time is ms for connection to remain idle without closing. | +| keepalive_pool | integer | False | 5 | [1,...] | Maximum number of requests that can be sent on this connection before closing it. | + +:::note + +The `timeout` attribute sets the time taken by the OpenFunction to execute, and the timeout for the HTTP client in APISIX. OpenFunction calls may take time to pull the runtime image and start the container. So, if the value is set too small, it may cause a large number of requests to fail. + +::: + +## Prerequisites + +Before configuring the plugin, you need to have OpenFunction running. +Installation of OpenFunction requires a certain version Kubernetes cluster. +For details, please refer to [Installation](https://openfunction.dev/docs/getting-started/installation/). + +### Create and Push a Function + +You can then create a function following the [sample](https://github.com/OpenFunction/samples) + +You'll need to push your function container image to a container registry like Docker Hub or Quay.io when building a function. To do that, you'll need to generate a secret for your container registry first. + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= ${your_registry_user} REGISTRY_PASSWORD= ${your_registry_password} +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +## Enable the Plugin + +You can now configure the Plugin on a specific Route and point to this running OpenFunction service: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample/test", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +## Example usage + +Once you have configured the plugin, you can send a request to the Route and it will invoke the configured function: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +This will give back the response from the function: + +``` +hello, test! +``` + +### Configure Path Transforming + +The `OpenFunction` Plugin also supports transforming the URL path while proxying requests to the OpenFunction API endpoints. Extensions to the base request path get appended to the `function_uri` specified in the Plugin configuration. + +:::info IMPORTANT + +The `uri` configured on a Route must end with `*` for this feature to work properly. APISIX Routes are matched strictly and the `*` implies that any subpath to this URI would be matched to the same Route. + +::: + +The example below configures this feature: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello/*", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +Now, any requests to the path `hello/123` will invoke the OpenFunction, and the added path is forwarded: + +```shell +curl http://127.0.0.1:9080/hello/123 +``` + +```shell +Hello, 123! +``` + +## Disable Plugin + +To disable the `openfunction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/openid-connect.md b/docs/en/latest/plugins/openid-connect.md index 5b33e5d53ad0..56f20e75b777 100644 --- a/docs/en/latest/plugins/openid-connect.md +++ b/docs/en/latest/plugins/openid-connect.md @@ -2,10 +2,10 @@ title: openid-connect keywords: - APISIX - - Plugin + - API Gateway - OpenID Connect - - openid-connect -description: This document contains information about the Apache APISIX openid-connect Plugin. + - OIDC +description: OpenID Connect allows the client to obtain user information from the identity providers, such as Keycloak, Ory Hydra, Okta, Auth0, etc. API Gateway APISIX supports to integrate with the above identity providers to protect your APIs. --- + +## Description + +The `tencent-cloud-cls` Plugin uses [TencentCloud CLS](https://cloud.tencent.com/document/product/614)API to forward APISIX logs to your topic. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ----------------- | ------- | -------- |---------| ------------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| cls_host | string | Yes | | | CLS API host,please refer [Uploading Structured Logs](https://www.tencentcloud.com/document/api/614/16873). | +| cls_topic | string | Yes | | | topic id of CLS. | +| secret_id | string | Yes | | | SecretId of your API key. | +| secret_key | string | Yes | | | SecretKey of your API key. | +| sample_ratio | number | No | 1 | [0.00001, 1] | How often to sample the requests. Setting to `1` will sample all requests. | +| include_req_body | boolean | No | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to NGINX's limitations. | +| include_resp_body | boolean | No | false | [false, true] | When set to `true` includes the response body in the log. | +| global_tag | object | No | | | kv pairs in JSON,send with each log. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `tencent-cloud-cls` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/tencent-cloud-cls \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enabling the Plugin + +The example below shows how you can enable the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "ap-guangzhou.cls.tencentyun.com", + "cls_topic": "${your CLS topic name}", + "global_tag": { + "module": "cls-logger", + "server_name": "YourApiGateWay" + }, + "include_req_body": true, + "include_resp_body": true, + "secret_id": "${your secret id}", + "secret_key": "${your secret key}" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your cls topic: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Disable Plugin + +To disable this Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/traffic-split.md b/docs/en/latest/plugins/traffic-split.md index 89ba7e296bd3..b5df772e8cc6 100644 --- a/docs/en/latest/plugins/traffic-split.md +++ b/docs/en/latest/plugins/traffic-split.md @@ -2,11 +2,13 @@ title: traffic-split keywords: - APISIX - - Plugin + - API Gateway - Traffic Split - - traffic-split -description: This document contains information about the Apache APISIX traffic-split Plugin. + - Blue-green Deployment + - Canary Deployment +description: This document contains information about the Apache APISIX traffic-split Plugin, you can use it to dynamically direct portions of traffic to various Upstream services. --- + + +## Description + +The `workflow` plugin is used to introduce [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) to provide complex traffic control features. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------------- | ------------- | -------- | ------- | ------------ | ------------------------------------------------------------ | +| rules.case | array[array] | True | | | List of variables to match for filtering requests for conditional traffic split. It is in the format `{variable operator value}`. For example, `{"arg_name", "==", "json"}`. The variables here are consistent with NGINX internal variables. For details on supported operators, you can refer to [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). | +| rules.actions | array[object] | True | | | The action to be performed when the case matches successfully. Currently, only one element is supported in actions. The first child element of the actions' only element can be `return` or `limit-count`. | + +### `actions` Attributes + +#### return + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------- | ------------- | -------- | ------- | ------------ | ---------------------------------------------------------- | +| actions[1].return | string | False | | | Return directly to the client. | +| actions[1].[2].code | integer | False | | | HTTP status code returned to the client. | + +#### limit-count + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------- | ------------- | -------- | ------- | ------------ | ---------------------------------------------------------------- | +| actions[1].limit-count | string | False | | | Execute the functions of the `limit-count` plugin. | +| actions[1].[2] | object | False | | | Configuration of `limit-count` plugin, `group` is not supported. | + +:::note + +In `rules`, match `case` in order according to the index of the `rules`, and execute `actions` directly if `case` match. + +::: + +## Enabling the Plugin + +You can configure the `workflow` plugin on a Route as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "plugins":{ + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/hello/rejected"] + ], + "actions":[ + [ + "return", + {"code": 403} + ] + ] + }, + { + "case":[ + ["uri", "==", "/hello/v2/appid"] + ], + "actions":[ + [ + "limit-count", + { + "count":2, + "time_window":60, + "rejected_code":429 + } + ] + ] + } + ] + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` + +Here, the `workflow` Plugin is enabled on the Route. If the request matches the `case` in the `rules`, the `actions` will be executed. + +**Example 1: If the requested uri is `/hello/rejected`, the status code `403` is returned to the client** + +```shell +curl http://127.0.0.1:9080/hello/rejected -i +HTTP/1.1 403 Forbidden +...... + +{"error_msg":"rejected by workflow"} +``` + +**Example 2: if the request uri is `/hello/v2/appid`, the `workflow` plugin would execute the `limit-count` plugin** + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 429 Too Many Requests +``` + +**Example 3: if the request can not match any `case` in the `rules`, the `workflow` plugin would do nothing** + +```shell +curl http://127.0.0.1:0080/hello/fake -i +HTTP/1.1 200 OK +``` + +## Disable Plugin + +To disable the `workflow` plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/zipkin.md b/docs/en/latest/plugins/zipkin.md index 0e9909e1d91e..94c13f5e3e79 100644 --- a/docs/en/latest/plugins/zipkin.md +++ b/docs/en/latest/plugins/zipkin.md @@ -111,7 +111,7 @@ func main(){ The example below enables the Plugin on a specific Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -178,7 +178,7 @@ docker run -d --name jaeger \ Similar to configuring for Zipkin, create a Route and enable the Plugin: ``` -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -221,7 +221,7 @@ You can access the Jaeger UI to view the traces in endpoint [http://127.0.0.1:16 To disable the `zipkin` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/en/latest/pubsub.md b/docs/en/latest/pubsub.md index d03ba03ac67c..2db871cecb0f 100644 --- a/docs/en/latest/pubsub.md +++ b/docs/en/latest/pubsub.md @@ -115,7 +115,7 @@ The plugins list [config-default.yaml](https://github.com/apache/apisix/blob/mas After this is done, create a route like the one below to connect to this messaging system via APISIX using the WebSocket. ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/kafka' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ -H 'X-API-KEY: ${api-key}' \ -H 'Content-Type: application/json' \ -d '{ diff --git a/docs/en/latest/pubsub/kafka.md b/docs/en/latest/pubsub/kafka.md index b25936641d0f..2e0cc98c8887 100644 --- a/docs/en/latest/pubsub/kafka.md +++ b/docs/en/latest/pubsub/kafka.md @@ -74,7 +74,7 @@ Possible response body: When an error occurs, `ErrorResp` will be returned, whic Create a route, set the upstream `scheme` field to `kafka`, and configure `nodes` to be the address of the Kafka broker. ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/kafka' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ -H 'X-API-KEY: ' \ -H 'Content-Type: application/json' \ -d '{ @@ -98,7 +98,7 @@ After configuring the route, you can use this feature. Simply turn on the `kafka-proxy` plugin on the created route and enable the Kafka TLS handshake and SASL authentication through the configuration, which can be found in the [plugin documentation](../../../en/latest/plugins/kafka-proxy.md). ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/kafka' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ -H 'X-API-KEY: ' \ -H 'Content-Type: application/json' \ -d '{ diff --git a/docs/en/latest/router-radixtree.md b/docs/en/latest/router-radixtree.md index e2d0de65b6ff..7af4bcf8e1d5 100644 --- a/docs/en/latest/router-radixtree.md +++ b/docs/en/latest/router-radixtree.md @@ -82,7 +82,7 @@ Note: In the matching rules, the `priority` field takes precedence over other ru Create two routes with different `priority` values ​​(the larger the value, the higher the priority). ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -96,7 +96,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -123,7 +123,7 @@ All requests only hit the route of port `1980`. Here is an example of setting host matching rules: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -137,7 +137,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -197,7 +197,7 @@ Please take a look at [radixtree-new](https://github.com/api7/lua-resty-radixtre here is an simple example: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "vars": [ @@ -225,7 +225,7 @@ APISIX supports filtering route by POST form attributes with `Content-Type` = `a We can define the following route: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST", "GET"], "uri": "/_post", @@ -273,7 +273,7 @@ query getRepo { We can filter such route out with: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST", "GET"], "uri": "/graphql", diff --git a/docs/en/latest/stand-alone.md b/docs/en/latest/stand-alone.md index 736bd39601a3..c08ce9632f13 100644 --- a/docs/en/latest/stand-alone.md +++ b/docs/en/latest/stand-alone.md @@ -34,14 +34,15 @@ The routing rules in the `conf/apisix.yaml` file are loaded into memory immediat Since the current Admin API is based on the etcd configuration center solution, enable Admin API is not allowed when the Stand-alone mode is enabled. -To enable Stand-alone mode, we can set `apisix.config_center` to `yaml` and disable Admin API in file `conf/config.yaml`. +The Stand-alone mode can only be enabled when set the role of APISIX as data plane. We can set `deployment.role` to `data_plane` and `deployment.role_data_plane.config_provider` to `yaml`. Refer to the example below: ```yaml -apisix: - enable_admin: false - config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml ``` ### How to configure rules @@ -284,9 +285,6 @@ stream_routes: mqtt-proxy: protocol_name: "MQTT" protocol_level: 4 - upstream: - ip: "127.0.0.1" - port: 1995 upstreams: - nodes: "127.0.0.1:1995": 1 diff --git a/docs/en/latest/stream-proxy.md b/docs/en/latest/stream-proxy.md index 34797ec98c2a..0ab87b8df4ef 100644 --- a/docs/en/latest/stream-proxy.md +++ b/docs/en/latest/stream-proxy.md @@ -59,7 +59,7 @@ apisix: Here is a mini example: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "remote_addr": "127.0.0.1", "upstream": { @@ -86,7 +86,7 @@ And we can add more options to match a route. Currently stream route configurati Here is an example: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.1", "server_port": 2000, @@ -129,7 +129,7 @@ Let's take another real world example: 3. Now we are going to create a stream route with server filtering: ```shell - curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.10", "server_port": 9101, @@ -187,7 +187,7 @@ mTLS is also supported, see [Protect Route](./mtls.md#protect-route) for how to Third, we need to configure a stream route to match and proxy it to the upstream: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -201,7 +201,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 When the connection is TLS over TCP, we can use the SNI to match a route, like: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "sni": "a.test.com", "upstream": { @@ -220,7 +220,7 @@ In this case, a connection handshaked with SNI `a.test.com` will be proxied to ` APISIX also supports proxying to TLS over TCP upstream. ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "scheme": "tls", diff --git a/docs/en/latest/terminology/consumer.md b/docs/en/latest/terminology/consumer.md index 331cc6cc9c41..3cd665fbdfc6 100644 --- a/docs/en/latest/terminology/consumer.md +++ b/docs/en/latest/terminology/consumer.md @@ -60,7 +60,7 @@ The example below shows how you can enable a Plugin for a specific Consumer. ```shell # Create a Consumer, specify the authentication plugin key-auth, and enable the specific plugin limit-count -$ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -77,7 +77,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335 }' # Create a Router, set routing rules and enable plugin configuration -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {} @@ -109,7 +109,7 @@ We can use the [consumer-restriction](../plugins/consumer-restriction.md) Plugin ```shell # Add Jack to the blacklist -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {}, diff --git a/docs/en/latest/architecture-design/plugin-config.md b/docs/en/latest/terminology/plugin-config.md similarity index 84% rename from docs/en/latest/architecture-design/plugin-config.md rename to docs/en/latest/terminology/plugin-config.md index 075562f414bc..a544312adc3a 100644 --- a/docs/en/latest/architecture-design/plugin-config.md +++ b/docs/en/latest/terminology/plugin-config.md @@ -1,5 +1,10 @@ --- title: Plugin Config +keywords: + - API gateway + - Apache APISIX + - Plugin Config +description: Plugin Config in Apache APISIX. --- + +This article will guide you through APISIX's upstream, routing, and service concepts and introduce how to publish your services through APISIX. + +## Concept introduction + +### Upstream + +[Upstream](../terminology/upstream.md) is a virtual host abstraction that performs load balancing on a given set of service nodes according to the configured rules. + +The role of the Upstream is to load balance the service nodes according to the configuration rules, and Upstream information can be directly configured to the Route or Service. + +When multiple routes or services refer to the same upstream, you can create an upstream object and use the upstream ID in the Route or Service to reference the upstream to reduce maintenance pressure. + +### Route + +[Routes](../terminology/route.md) match the client's request based on defined rules, load and execute the corresponding plugins, and forwards the request to the specified Upstream. + +### Service + +A [Service](../terminology/service.md) is an abstraction of an API (which can also be understood as a set of Route abstractions). It usually corresponds to an upstream service abstraction. + +## Prerequisites + +Please make sure you have [installed Apache APISIX](../installation-guide.md) before doing the following. + +## Expose your service + +1. Create an Upstream. + +Create an Upstream service containing `httpbin.org` that you can use for testing. This is a return service that will return the parameters we passed in the request. + +``` +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } +}' +``` + +In this command, we specify the Admin API Key of Apache APISIX as `edd1c9f034335f136f87ad84b625c8f1`, use `roundrobin` as the load balancing mechanism, and set `httpbin.org:80` as the upstream service. To bind this upstream to a route, `upstream_id` needs to be set to `1` here. Here you can specify multiple upstreams under `nodes` to achieve load balancing. + +For more information, please refer to [Upstream](../terminology/upstream.md). + +2. Create a Route. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream_id": "1" +}' +``` + +:::note + +Adding an `upstream` object to your route can achieve the above effect. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +::: + +3. Test + +After creating the Route, you can test the Service with the following command: + +``` +curl -i -X GET "http://127.0.0.1:9080/get?foo1=bar1&foo2=bar2" -H "Host: httpbin.org" +``` + +APISIX will forward the request to `http://httpbin.org:80/anything/foo?arg=10`. + +## More Tutorials + +You can refer to [Protect API](./protect-api.md) to protect your API. + +You can also use APISIX's [Plugin](../terminology/plugin.md) to achieve more functions. diff --git a/docs/en/latest/tutorials/observe-your-api.md b/docs/en/latest/tutorials/observe-your-api.md new file mode 100644 index 000000000000..2e4b94529107 --- /dev/null +++ b/docs/en/latest/tutorials/observe-your-api.md @@ -0,0 +1,258 @@ +--- +title: Observe APIs +keywords: + - API gateway + - Apache APISIX + - Observability + - Monitor + - Plugins +description: Apache APISIX Observability Plugins and take a look at how to set up these plugins. +--- + + + +In this guide, we can leverage the power of some [Apache APISIX](https://apisix.apache.org/) Observability Plugins and take a look at how to set up these plugins, how to use them to understand API behavior, and later solve problems that impact our users. + +## API Observability + +Nowadays **API Observability** is already a part of every API development as it addresses many problems related to API consistency, reliability, and the ability to quickly iterate on new API features. When you design for full-stack observability, you get everything you need to find issues and catch breaking changes. + +API observability can help every team in your organization: + +- Sales and growth teams to monitor your API usage, free trials, observe expansion opportunities and ensure that API serves the correct data. + +- Engineering teams to monitor and troubleshoot API issues. + +- Product teams to understand API usage and business value. + +- Security teams to detect and protect from API threats. + +![API observability in every team](https://static.apiseven.com/2022/09/14/6321ceff5548e.jpg) + +## A central point for observation + +We know that **an API gateway** offers a central control point for incoming traffic to a variety of destinations but it can also be a central point for observation as well since it is uniquely qualified to know about all the traffic moving between clients and our service networks. + +The core of observability breaks down into _three key areas_: structured logs, metrics, and traces. Let’s break down each pillar of API observability and learn how with Apache APISIX Plugins we can simplify these tasks and provides a solution that you can use to better understand API usage. + +![Observability of three key areas](https://static.apiseven.com/2022/09/14/6321cf14c555a.jpg) + +## Prerequisites + +Before enabling our plugins we need to install Apache APISIX, create a route, an upstream, and map the route to the upstream. You can simply follow [getting started guide](https://apisix.apache.org/docs/apisix/getting-started) provided on the website. + +## Logs + +**Logs** are also easy to instrument and trivial steps of API observability, they can be used to inspect API calls in real-time for debugging, auditing, and recording time-stamped events that happened over time. There are several logger plugins Apache APISIX provides such as: + +- [http-logger](https://apisix.apache.org/docs/apisix/plugins/http-logger/) + +- [skywalking-logger](https://apisix.apache.org/docs/apisix/plugins/skywalking-logger/) + +- [tcp-logger](https://apisix.apache.org/docs/apisix/plugins/tcp-logger) + +- [kafka-logger](https://apisix.apache.org/docs/apisix/plugins/kafka-logger) + +- [rocketmq-logger](https://apisix.apache.org/docs/apisix/plugins/rocketmq-logger) + +- [udp-logger](https://apisix.apache.org/docs/apisix/plugins/udp-logger) + +- [clickhouse-logger](https://apisix.apache.org/docs/apisix/plugins/clickhouse-logger) + +- [error-logger](https://apisix.apache.org/docs/apisix/plugins/error-log-logger) + +- [google-cloud-logging](https://apisix.apache.org/docs/apisix/plugins/google-cloud-logging) + +And you can see the [full list](../plugins/http-logger.md) on the official website of Apache APISIX. Now for demo purposes, let's choose a simple but mostly used _http-logger_ plugin that is capable of sending API Log data requests to HTTP/HTTPS servers or sends as JSON objects to Monitoring tools. We can assume that a route and an upstream are created. You can learn how to set up them in the **[Getting started with Apache APISIX](https://youtu.be/dUOjJkb61so)** video tutorial. Also, you can find all command-line examples on the GitHub page [apisix-observability-plugins](https://boburmirzo.github.io/apisix-observability-plugins/) + +You can generate a mock HTTP server at [mockbin.com](https://mockbin.org/) to record and view the logs. Note that we also bind the route to an upstream (You can refer to this documentation to learn about more [core concepts of Apache APISIX](https://apisix.apache.org/docs/apisix/architecture-design/apisix)). + +The following is an example of how to enable the http-logger for a specific route. + +```shell + +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" + } + }, + "upstream_id": "1", + "uri": "/get" +}' + +``` + +:::note + +To `http-logger` plugin settings, your can just put your mock server URI address like below: + +```json +{ + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" +} +``` + +::: + +Once we get a successful response from APISIX server, we can send a request to this _get_ endpoint to generate logs. + +```shell + +curl -i http://127.0.0.1:9080/get + +``` + +Then if you click and navigate to the following our [mock server link](http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61/log) some recent logs are sent and we can see them: + +![http-logger-plugin-test-screenshot](https://static.apiseven.com/2022/09/14/6321d1d83eb7a.png) + +## Metrics + +**Metrics** are a numeric representation of data measured over intervals of time. You can also aggregate this data into daily or weekly frequency and run queries against a distributed system like [Elasticsearch](https://www.elastic.co/). Or sometimes based on metrics you trigger alerts to take any action later. Once API metrics are collected, you can track them with metrics tracking tools such as [Prometheus](https://prometheus.io/). + +Apache APISIX API Gateway also offers [prometheus-plugin](https://apisix.apache.org/docs/apisix/plugins/prometheus/) to fetch your API metrics and expose them in Prometheus. Behind the scene, Apache APISIX downloads the Grafana dashboard meta, imports it to [Grafana](https://grafana.com/), and fetches real-time metrics from the Prometheus plugin. + +Let’s enable prometheus-plugin for our route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "prometheus": {} + }, + "upstream_id": "1" +}' +``` + +We fetch the metric data from the specified URL `/apisix/prometheus/metrics`. + +```shell +curl -i http://127.0.0.1:9091/apisix/prometheus/metrics +``` + +You will get a response with Prometheus metrics something like below: + +```text +HTTP/1.1 200 OK +Server: openresty +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive + +# HELP apisix_batch_process_entries batch process remaining entries +# TYPE apisix_batch_process_entries gauge +apisix_batch_process_entries{name="http logger",route_id="1",server_addr="172.19.0.8"} 0 +# HELP apisix_etcd_modify_indexes Etcd modify index for APISIX keys +# TYPE apisix_etcd_modify_indexes gauge +apisix_etcd_modify_indexes{key="consumers"} 17819 +apisix_etcd_modify_indexes{key="global_rules"} 17832 +apisix_etcd_modify_indexes{key="max_modify_index"} 20028 +apisix_etcd_modify_indexes{key="prev_index"} 18963 +apisix_etcd_modify_indexes{key="protos"} 0 +apisix_etcd_modify_indexes{key="routes"} 20028 +... +``` + +And we can also check the status of our endpoint at the Prometheus dashboard by pointing to this URL `http://localhost:9090/targets` + +![plugin-orchestration-configure-rule-screenshot](https://static.apiseven.com/2022/09/14/6321d30b32024.png) + +As you can see, Apache APISIX exposed metrics endpoint is upon and running. + +Now you can query metrics for `apisix_http_status` to see what HTTP requests are handled by API Gateway and what was the outcome. + +![prometheus-plugin-dashboard-query-http-status-screenshot](https://static.apiseven.com/2022/09/14/6321d30aed3b2.png) + +In addition to this, you can view the Grafana dashboard running in your local instance. Go to `http://localhost:3000/` + +![prometheus-plugin-grafana-dashboard-screenshot](https://static.apiseven.com/2022/09/14/6321d30bba97c.png) + +You can also check two other plugins for metrics: + +- [Node status Plugin](../plugins/node-status.md) + +- [Datadog Plugin](../plugins/datadog.md) + +## Tracing + +The third is **tracing** or distributed tracing allows you to understand the life of a request as it traverses your service network and allows you to answer questions like what service has this request touched and how much latency was introduced. Traces enable you to further explore which logs to look at for a particular session or related set of API calls. + +[Zipkin](https://zipkin.io/) an open-source distributed tracing system. [APISIX plugin](https://apisix.apache.org/docs/apisix/plugins/zipkin) is supported to collect tracing and report to Zipkin Collector based on [Zipkin API specification](https://zipkin.io/pages/instrumenting.html). + +Here’s an example to enable the `zipkin` plugin on the specified route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": [ + "GET" + ], + "uri": "/get", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1 + } + }, + "upstream_id": "1" +}' +``` + +We can test our example by simply running the following curl command: + +```shell +curl -i http://127.0.0.1:9080/get +``` + +As you can see, there are some additional trace identifiers (like traceId, spanId, parentId) were appended to the headers: + +```text +"X-B3-Parentspanid": "61bd3f4046a800e7", +"X-B3-Sampled": "1", +"X-B3-Spanid": "855cd5465957f414", +"X-B3-Traceid": "e18985df47dab632d62083fd96626692", +``` + +Then you can use a browser to access `http://127.0.0.1:9411/zipkin`, see traces on the Web UI of Zipkin. + +> Note that you need to run the Zipkin instance in order to install Zipkin Web UI. For example, by using docker you can simply run it: +>`docker run -d -p 9411:9411 openzipkin/zipkin` + +![Zipkin plugin output 1](https://static.apiseven.com/2022/09/14/6321dc27f3d33.png) + +![Zipkin plugin output 2](https://static.apiseven.com/2022/09/14/6321dc284049c.png) + +As you noticed, the recent traces were exposed in the above pictures. + +You can also check two other plugins for tracing: + +- [Skywalking-plugin](../plugins/skywalking.md) + +- [Opentelemetry-plugin](../plugins/opentelemetry.md) + +## Summary + +As we learned, API Observability is a sort of framework for managing your applications in an API world and Apache APISIX API Gateway plugins can help when observing modern API-driven applications by integrating to several observability platforms. So, you can make your development work focused on core business features instead of building a custom integration for observability tools. diff --git a/docs/en/latest/tutorials/protect-api.md b/docs/en/latest/tutorials/protect-api.md new file mode 100644 index 000000000000..dd0d67e15299 --- /dev/null +++ b/docs/en/latest/tutorials/protect-api.md @@ -0,0 +1,124 @@ +--- +title: Protect API +keywords: + - API Gateway + - Apache APISIX + - Rate Limit + - Protect API +description: This article describes how to secure your API with the rate limiting plugin for API Gateway Apache APISIX. +--- + + + +This article describes secure your API with the rate limiting plugin for API Gateway Apache APISIX. + +## Concept introduction + +### Plugin + +This represents the configuration of the plugins that are executed during the HTTP request/response lifecycle. A [Plugin](./terminology/plugin.md) configuration can be bound directly to a Route, a Service, a Consumer or a Plugin Config. + +:::note + +If [Route](./terminology/route.md), [Service](./terminology/service.md), [Plugin Config](./terminology/plugin-config.md) or Consumer are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is: Consumer > Route > Plugin Config > Service. At the same time, there are 6 stages involved in the plugin execution process, namely `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter` and `log`. + +::: + +## Preconditions + +Before following this tutorial, ensure you have [exposed the service](./expose-api.md). + +## Protect your API + +We can use rate limits to limit our API services to ensure the stable operation of API services and avoid system crashes caused by some sudden traffic. We can restrict as follows: + +1. Limit the request rate; +2. Limit the number of requests per unit time; +3. Delay request; +4. Reject client requests; +5. Limit the rate of response data. + +APISIX provides several plugins for limiting current and speed, including [limit-conn](./plugins/limit-conn.md), [limit-count](./plugins/limit-count.md), [limit- req](./plugins/limit-req.md) and other plugins. + +- The `limit-conn` Plugin limits the number of concurrent requests to your services. +- The `limit-req` Plugin limits the number of requests to your service using the leaky bucket algorithm. +- The `limit-count` Plugin limits the number of requests to your service by a given count per time. + +Next, we will use the `limit-count` plugin as an example to show you how to protect your API with a rate limit plugin: + +1. Create a Route. + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream_id": "1" + } +}' +``` + +In the above configuration, a Route with ID `1` is created using the upstream made in [Expose Service](./expose-api.md), and the `limit-count` plugin is enabled. The plugin only allows the client to access the upstream service `2` times within `60` seconds. If more than two times, the `503` error code will be returned. + +2. Test + +```shell +curl http://127.0.0.1:9080/index.html +``` + +After using the above command to access three times in a row, the following error will appear: + +``` + +503 Service Temporarily Unavailable + +

503 Service Temporarily Unavailable

+
openresty
+ + +``` + +If the above result is returned, the `limit-count` plugin has taken effect and protected your API. + +## More Traffic plugins + +In addition to providing plugins for limiting current and speed, APISIX also offers many other plugins to meet the needs of actual scenarios: + +- [proxy-cache](./plugins/proxy-cache.md): This plugin provides the ability to cache backend response data. It can be used with other plugins. The plugin supports both disk and memory-based caching. Currently, the data to be cached can be specified according to the response code and request mode, and more complex caching strategies can also be configured through the no_cache and cache_bypass attributes. +- [request-validation](./plugins/request-validation.md): This plugin is used to validate requests forwarded to upstream services in advance. +- [proxy-mirror](./plugins/proxy-mirror.md): This plugin provides the ability to mirror client requests. Traffic mirroring is copying the real online traffic to the mirroring service, so that the online traffic or request content can be analyzed in detail without affecting the online service. +- [api-breaker](./plugins/api-breaker.md): This plugin implements an API circuit breaker to help us protect upstream business services. +- [traffic-split](./plugins/traffic-split.md): You can use this plugin to gradually guide the percentage of traffic between upstreams to achieve blue-green release and grayscale release. +- [request-id](./plugins/request-id.md): The plugin adds a `unique` ID to each request proxy through APISIX for tracking API requests. +- [proxy-control](./plugins/proxy-control.md): This plugin can dynamically control the behavior of NGINX proxy. +- [client-control](./plugins/client-control.md): This plugin can dynamically control how NGINX handles client requests by setting an upper limit on the client request body size. + +## More Tutorials + +You can refer to the [Observe API](./observe-your-api.md) document to monitor APISIX, collect logs, and track. diff --git a/docs/en/latest/wasm.md b/docs/en/latest/wasm.md index 506207303a8f..e261bc3a3858 100644 --- a/docs/en/latest/wasm.md +++ b/docs/en/latest/wasm.md @@ -73,7 +73,7 @@ That's all. Now you can use the wasm plugin as a regular plugin. For example, enable this plugin on the specified route: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { diff --git a/docs/en/latest/xrpc/redis.md b/docs/en/latest/xrpc/redis.md index 26c4added22b..415e27da6ac2 100644 --- a/docs/en/latest/xrpc/redis.md +++ b/docs/en/latest/xrpc/redis.md @@ -86,7 +86,7 @@ Assumed the APISIX is proxying TCP on port `9101`, and the Redis is listening on Let's create a Stream Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' {     "upstream": {         "type": "none", diff --git a/docs/zh/latest/CHANGELOG.md b/docs/zh/latest/CHANGELOG.md index 08e3a82db38c..a7907fd2dc5a 100644 --- a/docs/zh/latest/CHANGELOG.md +++ b/docs/zh/latest/CHANGELOG.md @@ -23,8 +23,11 @@ title: CHANGELOG ## Table of Contents +- [3.0.0-beta](#300-beta) +- [2.15.0](#2150) - [2.14.1](#2141) - [2.14.0](#2140) +- [2.13.3](#2133) - [2.13.2](#2132) - [2.13.1](#2131) - [2.13.0](#2130) @@ -59,6 +62,204 @@ title: CHANGELOG - [0.7.0](#070) - [0.6.0](#060) +## 3.0.0-beta + +这里我们使用 `2.99.0` 作为源代码中的版本号,而不是代码名称 +`3.0.0-beta`,有两个原因。 + +1. 避免在一些程序试图比较版本时出现意外的错误,因为 `3.0.0-beta` 包含 `3.0.0` 并且比它长。 +2. 一些软件包系统可能不允许在版本号后面有一个后缀。 + +### Change + +#### 移动 config_center、etcd 和 Admin API 的配置到 deployment 下面 + +我们调整了下静态配置文件里面的配置,所以你需要同步更新下 config.yaml 里面的配置了: + +- `config_center` 功能改由 `deployment` 下面的 `config_provider` 实现: [#7901](https://github.com/apache/apisix/pull/7901) +- `etcd` 字段整体搬迁到 `deployment` 下面: [#7860](https://github.com/apache/apisix/pull/7860) +- 以下的 Admin API 配置移动到 `deployment` 下面的 `admin` 字段:[#7823](https://github.com/apache/apisix/pull/7823) + - admin_key + - enable_admin_cors + - allow_admin + - admin_listen + - https_admin + - admin_api_mtls + - admin_api_version + +具体可以参考最新的 config-default.yaml。 + +#### 移除多个已废弃的配置 + +借着 3.0 新版本的机会,我们把许多之前标记为 deprecated 的配置清理出去。 + +在静态配置中,我们移除了以下若干字段: + +- 移除 `apisix.ssl` 中的 `enable_http2` 和 `listen_port`:[#7717](https://github.com/apache/apisix/pull/7717) +- 移除 `apisix.port_admin`: [#7716](https://github.com/apache/apisix/pull/7716) +- 移除 `etcd.health_check_retry`: [#7676](https://github.com/apache/apisix/pull/7676) +- 移除 `nginx_config.http.lua_shared_dicts`: [#7677](https://github.com/apache/apisix/pull/7677) +- 移除 `nginx_config.http.real_ip_header`: [#7696](https://github.com/apache/apisix/pull/7696) + +在动态配置中,我们做了以下调整: + +- 将插件配置的 `disable` 移到 `_meta` 下面:[#7707](https://github.com/apache/apisix/pull/7707) +- 从 Route 里面移除了 `service_protocol`:[#7701](https://github.com/apache/apisix/pull/7701) + +此外还有具体插件级别上的改动: + +- authz-keycloak 中移除了 `audience` 字段: [#7683](https://github.com/apache/apisix/pull/7683) +- mqtt-proxy 中移除了 `upstream` 字段:[#7694](https://github.com/apache/apisix/pull/7694) +- error-log-logger 中把 tcp 相关配置放到 `tcp` 字段下面:[#7700](https://github.com/apache/apisix/pull/7700) +- syslog 中移除了 `max_retry_times` 和 `retry_interval` 字段: [#7699](https://github.com/apache/apisix/pull/7699) +- proxy-rewrite 中移除了 `scheme` 字段: [#7695](https://github.com/apache/apisix/pull/7695) + +#### 新的 Admin API 响应格式 + +我们在以下若干个 PR 中调整了 Admin API 的响应格式: + +- [#7630](https://github.com/apache/apisix/pull/7630) +- [#7622](https://github.com/apache/apisix/pull/7622) + +新的响应格式展示如下: + +返回单个配置: + +```json +{ + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 +} +``` + +返回多个配置: + +```json +{ + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 +} +``` + +#### 其他 + +- Admin API 的端口改为 9180:[#7806](https://github.com/apache/apisix/pull/7806) +- 我们只支持 OpenResty 1.19.3.2 及以上的版本:[#7625](https://github.com/apache/apisix/pull/7625) +- 调整了 Plugin Config 对象的优先级,同名插件配置的优先级由 Consumer > Plugin Config > Route > Service 变成 Consumer > Route > Plugin Config > Service: [#7614](https://github.com/apache/apisix/pull/7614) + +### Core + +- 集成 grpc-client-nginx-module 到 APISIX: [#7917](https://github.com/apache/apisix/pull/7917) +- k8s 服务发现支持配置多个集群:[#7895](https://github.com/apache/apisix/pull/7895) + +### Plugin + +- 支持在 opentelemetry 插件里注入指定前缀的 header:[#7822](https://github.com/apache/apisix/pull/7822) +- 新增 openfunction 插件:[#7634](https://github.com/apache/apisix/pull/7634) +- 新增 elasticsearch-logger 插件:[#7643](https://github.com/apache/apisix/pull/7643) +- response-rewrite 插件支持增加响应体:[#7794](https://github.com/apache/apisix/pull/7794) +- log-rorate 支持指定最大大小来切割日志:[#7749](https://github.com/apache/apisix/pull/7749) +- 新增 workflow 插件: + - [#7760](https://github.com/apache/apisix/pull/7760) + - [#7771](https://github.com/apache/apisix/pull/7771) +- 新增 Tencent Cloud Log Service 插件:[#7593](https://github.com/apache/apisix/pull/7593) +- jwt-auth 支持 ES256 算法: [#7627](https://github.com/apache/apisix/pull/7627) +- ldap-auth 内部实现,由 lualdap 换成 lua-resty-ldap:[#7590](https://github.com/apache/apisix/pull/7590) +- prometheus 插件内的 http request metrics 支持通过变量来设置额外的 labels:[#7549](https://github.com/apache/apisix/pull/7549) +- clickhouse-logger 插件支持指定多个 clickhouse endpoints: [#7517](https://github.com/apache/apisix/pull/7517) + +### Bugfix + +- gRPC 代理设置 :authority 请求头为配置的上游 Host: [#7939](https://github.com/apache/apisix/pull/7939) +- response-rewrite 写入空 body 时有可能导致 AIPSIX 无法响应该请求:[#7836](https://github.com/apache/apisix/pull/7836) +- 修复同时使用 Plugin Config 和 Consumer,有一定概率发生插件配置没有更新的问题:[#7965](https://github.com/apache/apisix/pull/7965) +- 日志切割时,只 reopen 一次日志文件:[#7869](https://github.com/apache/apisix/pull/7869) +- 默认不应开启被动健康检查: [#7850](https://github.com/apache/apisix/pull/7850) +- zipkin 插件即使不进行 sample,也要向上游传递 trace IDs: [#7833](https://github.com/apache/apisix/pull/7833) +- 将 opentelemetry 的 span kind 更正为 server: [#7830](https://github.com/apache/apisix/pull/7830) +- limit-count 插件中,同样配置的不同路由不应该共享同一个计数器:[#7750](https://github.com/apache/apisix/pull/7750) +- 修复偶发的移除 clean_handler 时抛异常的问题: [#7648](https://github.com/apache/apisix/pull/7648) +- 允许配置上游节点时直接使用 IPv6 字面量: [#7594](https://github.com/apache/apisix/pull/7594) +- wolf-rbac 插件调整对错误的响应方式: + - [#7561](https://github.com/apache/apisix/pull/7561) + - [#7497](https://github.com/apache/apisix/pull/7497) +- 当代理到上游之前发生 500 错误时,代理到上游之后运行的插件不应被跳过 [#7703](https://github.com/apache/apisix/pull/7703) +- 当 consumer 上绑定了多个插件且该插件定义了 rewrite 方法时,避免抛出异常 [#7531](https://github.com/apache/apisix/pull/7531) +- 升级 lua-resty-etcd 到 1.8.3。该版本修复了若干问题。 [#7565](https://github.com/apache/apisix/pull/7565) + +## 2.15.0 + +### Change + +- grpc 状态码 OUT_OF_RANGE 如今会在 grpc-transcode 插件中作为 http 状态码 400: [#7419](https://github.com/apache/apisix/pull/7419) +- 重命名 `etcd.health_check_retry` 配置项为 `startup_retry`。 [#7304](https://github.com/apache/apisix/pull/7304) +- 移除 `upstream.enable_websocket`。该配置已于 2020 年标记成已过时。 [#7222](https://github.com/apache/apisix/pull/7222) + +### Core + +- 支持动态启用插件 [#7453](https://github.com/apache/apisix/pull/7453) +- 支持动态指定插件执行顺序 [#7273](https://github.com/apache/apisix/pull/7273) +- 支持 Upstream 对象从 SSL 对象中引用证书 [#7221](https://github.com/apache/apisix/pull/7221) +- 允许在插件中使用自定义错误 [#7128](https://github.com/apache/apisix/pull/7128) +- xRPC Redis 代理增加 metrics: [#7183](https://github.com/apache/apisix/pull/7183) +- 引入 deployment role 概念来简化 APISIX 的部署: + - [#7405](https://github.com/apache/apisix/pull/7405) + - [#7417](https://github.com/apache/apisix/pull/7417) + - [#7392](https://github.com/apache/apisix/pull/7392) + - [#7365](https://github.com/apache/apisix/pull/7365) + - [#7249](https://github.com/apache/apisix/pull/7249) + +### Plugin + +- prometheus 指标中提供 ngx.shared.dict 统计信息 [#7412](https://github.com/apache/apisix/pull/7412) +- 允许在 proxy-rewrite 插件中使用客户端发过来的原始 URL [#7401](https://github.com/apache/apisix/pull/7401) +- openid-connect 插件支持 PKCE: [#7370](https://github.com/apache/apisix/pull/7370) +- sls-logger 插件支持自定义日志格式 [#7328](https://github.com/apache/apisix/pull/7328) +- kafka-logger 插件支持更多的 Kafka 客户端配置 [#7266](https://github.com/apache/apisix/pull/7266) +- openid-connect 插件支持暴露 refresh token [#7220](https://github.com/apache/apisix/pull/7220) +- 移植 prometheus 插件到 stream 子系统 [#7174](https://github.com/apache/apisix/pull/7174) + +### Bugfix + +- Kubernetes 服务发现在重试时应当清除上一次尝试时遗留的状态 [#7506](https://github.com/apache/apisix/pull/7506) +- redirect 插件禁止同时启用冲突的 http_to_https 和 append_query_string 配置 [#7433](https://github.com/apache/apisix/pull/7433) +- 默认配置下,http-logger 不再发送空 Authorization 头 [#7444](https://github.com/apache/apisix/pull/7444) +- 修复 limit-count 插件不能同时配置 group 和 disable 的问题 [#7384](https://github.com/apache/apisix/pull/7384) +- 让 request-id 插件优先执行,这样 tracing 插件可以用到 request id [#7281](https://github.com/apache/apisix/pull/7281) +- 更正 grpc-transcode 插件中对 repeated Message 的处理。 [#7231](https://github.com/apache/apisix/pull/7231) +- 允许 proxy-cache 插件 cache key 出现缺少的值。 [#7168](https://github.com/apache/apisix/pull/7168) +- 减少 chash 负载均衡节点权重过大时额外的内存消耗。 [#7103](https://github.com/apache/apisix/pull/7103) +- proxy-cache 插件 method 不匹配时不应该返回缓存结果。 [#7111](https://github.com/apache/apisix/pull/7111) +- 上游 keepalive 应考虑 TLS 参数: + - [#7054](https://github.com/apache/apisix/pull/7054) + - [#7466](https://github.com/apache/apisix/pull/7466) +- 重定向插件在将 HTTP 重定向到 HTTPS 时设置了正确的端口。 + - [#7065](https://github.com/apache/apisix/pull/7065) + ## 2.14.1 ### Bugfix @@ -121,6 +322,10 @@ title: CHANGELOG - [#6686](https://github.com/apache/apisix/pull/6686) - Admin API 拒绝未知的 stream 插件。[#6813](https://github.com/apache/apisix/pull/6813) +## 2.13.3 + +**这是一个 LTS 维护版本,您可以在 `release/2.13` 分支中看到 CHANGELOG。** + ## 2.13.2 **这是一个 LTS 维护版本,您可以在 `release/2.13` 分支中看到 CHANGELOG。** diff --git a/docs/zh/latest/FAQ.md b/docs/zh/latest/FAQ.md index d285f58d75ab..0f4356f133c4 100644 --- a/docs/zh/latest/FAQ.md +++ b/docs/zh/latest/FAQ.md @@ -121,7 +121,7 @@ make deps ENV_LUAROCKS_SERVER=https://luarocks.cn 1. 创建一个[Route](terminology/route.md)并配置 `vars` 字段: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "vars": [ @@ -134,7 +134,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 } }' -curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "vars": [ @@ -161,7 +161,7 @@ Apache APISIX 提供了几种不同的方法来实现: 1. 在 [redirect](plugins/redirect.md) 插件中将 `http_to_https` 设置为 `true`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "host": "foo.com", @@ -176,7 +176,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 2. 结合高级路由规则 `vars` 和 `redirect` 插件一起使用: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "host": "foo.com", @@ -199,7 +199,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f03433 3. 使用 `serverless` 插件: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": { @@ -270,15 +270,16 @@ nginx_config: - 9082 ``` - 处理 HTTPS 请求也类似,修改 `conf/config.yaml` 中 HTTPS 端口监听的参数 `ssl.listen_port`,示例: + 处理 HTTPS 请求也类似,修改 `conf/config.yaml` 中 HTTPS 端口监听的参数 `ssl.listen`,示例: ``` apisix: ssl: - listen_port: - - 9443 - - 9444 - - 9445 + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 ``` 2. 重启或者重新加载 APISIX。 @@ -367,8 +368,11 @@ make: *** [deps] Error 1 1. 为 Apache APISIX 代理和 Admin API 配置不同的端口,或者禁用 Admin API。 ```yaml -apisix: - port_admin: 9180 # use a separate port +deployment: + admin: + admin_listen: # use a separate port + ip: 127.0.0.1 + port: 9180 ``` 2、添加 APISIX Dashboard 的代理路由: @@ -398,7 +402,7 @@ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f03433 你可以在 Route 中使用 `vars` 字段来匹配正则表达式: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/*", "vars": [ @@ -434,7 +438,7 @@ HTTP/1.1 404 Not Found 这是支持的,下面是一个 `FQDN` 为 `httpbin.default.svc.cluster.local`(一个 Kubernetes Service)的示例: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/ip", "upstream": { @@ -454,23 +458,23 @@ curl http://127.0.0.1:9080/ip -i ## Admin API 的 `X-API-KEY` 指的是什么?是否可以修改? -Admin API 的 `X-API-KEY` 指的是 `./conf/config.yaml` 文件中的 `apisix.admin_key.key`,默认值是 `edd1c9f034335f136f87ad84b625c8f1`。它是 Admin API 的访问 token。 +Admin API 的 `X-API-KEY` 指的是 `./conf/config.yaml` 文件中的 `deployment.admin.admin_key.key`,默认值是 `edd1c9f034335f136f87ad84b625c8f1`。它是 Admin API 的访问 token。 默认情况下,它被设置为 `edd1c9f034335f136f87ad84b625c8f1`,也可以通过修改 `./conf/conf/config` 中的参数来修改,如下示例: ```yaml -apisix: - admin_key - - - name: "admin" - key: newkey - role: admin +deployment: + admin: + admin_key + - name: "admin" + key: newkey + role: admin ``` 然后访问 Admin API: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' { "uris":[ "/*" ], "name":"admin-token-test", @@ -499,9 +503,10 @@ Apache APISIX 默认只允许 `127.0.0.0/24` 的 IP 段范围访问 `Admin API` 如果你想允许所有的 IP 访问,只需在 `./conf/config.yaml` 配置文件中添加如下的配置,然后重启或重新加载 APISIX 就可以让所有 IP 访问 `Admin API`。 ```yaml -apisix: - allow_admin: - - 0.0.0.0/0 +deployment: + admin: + allow_admin: + - 0.0.0.0/0 ``` **注意**:你可以在非生产环境中使用此方法,以允许所有客户端从任何地方访问 Apache APISIX 实例,但是在生产环境中该设置并不安全。在生产环境中,请仅授权特定的 IP 地址或地址范围访问 Apache APISIX 实例。 @@ -533,7 +538,7 @@ acme.sh --renew --domain demo.domain 在转发至上游之前移除请求路径中的前缀,比如说从 `/foo/get` 改成 `/get`,可以通过 `[proxy-rewrite](plugins/proxy-rewrite.md)` 插件来实现: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/foo/*", "plugins": { @@ -592,6 +597,88 @@ apisix: | 对绑定到 Plugin 的配置实例的所有实体生效。 | 对绑定到 `plugin-config` 的路由生效。 | | 对绑定到 Plugin 的配置实例的所有实体生效。 | 对绑定到 `plugin-config` 的路由生效。 | +## 部署了 Apache APISIX 之后,如何检测 APISIX 数据平面的存活情况(如何探活)? + +可以创建一个名为 `health-info` 的路由,并开启 [fault-injection](https://apisix.apache.org/zh/docs/apisix/plugins/fault-injection/) 插件(其中 YOUR-TOKEN 是用户自己的 token;127.0.0.1 是控制平面的 IP 地址,可以自行修改): + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/health-info \ +-H 'X-API-KEY: YOUR-TOKEN' -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "fine" + } + } + }, + "uri": "/status" +}' +``` + +验证方式: + +访问 Apache APISIX 数据平面的 `/status` 来探测 APISIX,如果 response code 是 200 就代表 APISIX 存活。 + +:::note + +这个方式只是探测 APISIX 数据平面是否存活,并不代表 APISIX 的路由和其他功能是正常的,这些需要更多路由级别的探测。 + +::: + +## APISIX 与 [etcd](https://etcd.io/) 相关的延迟较高的问题有哪些,如何修复? + +etcd 作为 APISIX 的数据存储组件,它的稳定性关乎 APISIX 的稳定性。在实际场景中,如果 APISIX 使用证书通过 HTTPS 的方式连接 etcd,可能会出现以下 2 种数据查询或写入延迟较高的问题: + +1. 通过接口操作 APISIX Admin API 进行数据的查询或写入,延迟较高。 +2. 在监控系统中,Prometheus 抓取 APISIX 数据面 Metrics 接口超时。 + +这些延迟问题,严重影响了 APISIX 的服务稳定性,而之所以会出现这类问题,主要是因为 etcd 对外提供了 2 种操作方式:HTTP(HTTPS)、gRPC。而 APISIX 是基于 HTTP(HTTPS)协议来操作 etcd 的。 + +在这个场景中,etcd 存在一个关于 HTTP/2 的 BUG:如果通过 HTTPS 操作 etcd(HTTP 不受影响),HTTP/2 的连接数上限为 Golang 默认的 `250` 个。 + +所以,当 APISIX 数据面节点数较多时,一旦所有 APISIX 节点与 etcd 连接数超过这个上限,则 APISIX 的接口响应会非常的慢。 + +Golang 中,默认的 HTTP/2 上限为 `250`,代码如下: + +```go +package http2 + +import ... + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 +) + +``` + +目前,etcd 官方主要维护了 `3.4` 和 `3.5` 这两个主要版本。在 `3.4` 系列中,近期发布的 `3.4.20` 版本已修复了这个问题。至于 `3.5` 版本,其实,官方很早之前就在筹备发布 `3.5.5` 版本了,但截止目前(2022.09.13)仍尚未发布。所以,如果你使用的是 etcd 的版本小于 `3.5.5`,可以参考以下几种方式解决这个问题: + +1. 将 APISIX 与 etcd 的通讯方式由 HTTPS 改为 HTTP。 +2. 将 etcd 版本回退到 `3.4.20`。 +3. 将 etcd 源码克隆下来,直接编译 `release-3.5` 分支(此分支已修复,只是尚未发布新版本而已)。 + +重新编译 etcd 的方式如下: + +```shell +git checkout release-3.5 +make GOOS=linux GOARCH=amd64 +``` + +编译的二进制在 `bin` 目录下,将其替换掉你服务器环境的 etcd 二进制后,然后重启 etcd 即可。 + +更多信息,请参考: + +- [when etcd node have many http long polling connections, it may cause etcd to respond slowly to http requests.](https://github.com/etcd-io/etcd/issues/14185) +- [bug: when apisix starts for a while, its communication with etcd starts to time out](https://github.com/apache/apisix/issues/7078) +- [the prometheus metrics API is tool slow](https://github.com/apache/apisix/issues/7353) +- [Support configuring `MaxConcurrentStreams` for http2](https://github.com/etcd-io/etcd/pull/14169) + ## 如果在使用 APISIX 过程中遇到问题,我可以在哪里寻求更多帮助? - [Apache APISIX Slack Channel](/docs/general/join/#加入-slack-频道):加入后请选择 channel-apisix 频道,即可通过此频道进行 APISIX 相关问题的提问。 diff --git a/docs/zh/latest/README.md b/docs/zh/latest/README.md index 04cb2ce5e1de..d8062b77bcde 100644 --- a/docs/zh/latest/README.md +++ b/docs/zh/latest/README.md @@ -40,7 +40,7 @@ Apache APISIX 的技术架构如下图所示: - 邮件列表 - 发送任意内容到 dev-subscribe@apisix.apache.org 后,根据回复以订阅邮件列表。 - QQ 群 - 781365357 -- Slack - [查看加入方式](https://apisix.apache.org/docs/general/join/#join-the-slack-channel) +- Slack - [查看加入方式](https://apisix.apache.org/zh/docs/general/join/#join-the-slack-channel) - ![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAPISIX?style=social) - 使用标签 `#ApacheAPISIX` 关注我们并与我们互动。 - [哔哩哔哩](https://space.bilibili.com/551921247) - **新手任务列表** @@ -84,7 +84,7 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 - **全动态能力** - [热更新和热插件](terminology/plugin.md):无需重启服务,就可以持续更新配置和插件。 - - [代理请求重写](plugins/proxy-rewrite.md):支持重写请求上游的`host`、`uri`、`schema`、`enable_websocket`、`headers`信息。 + - [代理请求重写](plugins/proxy-rewrite.md):支持重写请求上游的`host`、`uri`、`schema`、`method`、`headers`信息。 - [输出内容重写](plugins/response-rewrite.md):支持自定义修改返回内容的 `status code`、`body`、`headers`。 - [Serverless](plugins/serverless.md):在 APISIX 的每一个阶段,你都可以添加并调用自己编写的函数。 - 动态负载均衡:动态支持有权重的 round-robin 负载平衡。 @@ -143,7 +143,7 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 - 高性能:在单核上 QPS 可以达到 18k,同时延迟只有 0.2 毫秒。 - [故障注入](plugins/fault-injection.md) - [REST Admin API](admin-api.md):使用 REST Admin API 来控制 Apache APISIX,默认只允许 127.0.0.1 访问,你可以修改 `conf/config.yaml` 中的 `allow_admin` 字段,指定允许调用 Admin API 的 IP 列表。同时需要注意的是,Admin API 使用 key auth 来校验调用者身份,**在部署前需要修改 `conf/config.yaml` 中的 `admin_key` 字段,来保证安全。** - - 外部日志记录器:将访问日志导出到外部日志管理工具。([HTTP Logger](plugins/http-logger.md)、[TCP Logger](plugins/tcp-logger.md)、[Kafka Logger](plugins/kafka-logger.md)、[UDP Logger](plugins/udp-logger.md)、[RocketMQ Logger](plugins/rocketmq-logger.md)、[SkyWalking Logger](plugins/skywalking-logger.md)、[Alibaba Cloud Logging(SLS)](plugins/sls-logger.md)、[Google Cloud Logging](plugins/google-cloud-logging.md)、[Splunk HEC Logging](plugins/splunk-hec-logging.md)、[File Logger](plugins/file-logger.md)) + - 外部日志记录器:将访问日志导出到外部日志管理工具。([HTTP Logger](plugins/http-logger.md)、[TCP Logger](plugins/tcp-logger.md)、[Kafka Logger](plugins/kafka-logger.md)、[UDP Logger](plugins/udp-logger.md)、[RocketMQ Logger](plugins/rocketmq-logger.md)、[SkyWalking Logger](plugins/skywalking-logger.md)、[Alibaba Cloud Logging(SLS)](plugins/sls-logger.md)、[Google Cloud Logging](plugins/google-cloud-logging.md)、[Splunk HEC Logging](plugins/splunk-hec-logging.md)、[File Logger](plugins/file-logger.md)、[Elasticsearch Logger](plugins/elasticsearch-logger.md)、[TencentCloud CLS](plugins/tencent-cloud-cls.md)) - [Helm charts](https://github.com/apache/apisix-helm-chart) - **高度可扩展** @@ -167,11 +167,11 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 1. 安装 - 请参考[APISIX 安装指南](./installation-guide.md)。 + 请参考[APISIX 安装指南](https://apisix.apache.org/zh/docs/apisix/installation-guide/)。 2. 入门指南 - 入门指南是学习 APISIX 基础知识的好方法。按照 [入门指南](getting-started.md)的步骤即可。 + 入门指南是学习 APISIX 基础知识的好方法。按照 [入门指南](https://apisix.apache.org/zh/docs/apisix/getting-started/)的步骤即可。 更进一步,你可以跟着文档来尝试更多的[插件](plugins)。 @@ -184,7 +184,7 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 可以参考[插件开发指南](plugin-develop.md),以及示例插件 `example-plugin` 的代码实现。 阅读[插件概念](terminology/plugin.md) 会帮助你学到更多关于插件的知识。 -更多文档请参考 [Apache APISIX 文档站](https://apisix.apache.org/docs/apisix/getting-started/)。 +更多文档请参考 [Apache APISIX 文档站](https://apisix.apache.org/zh/docs/apisix/getting-started/)。 ## 性能测试 @@ -221,6 +221,8 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 - [腾讯云:为什么选择 Apache APISIX 来实现 k8s ingress controller?](https://www.upyun.com/opentalk/448.html) - [思必驰:为什么我们重新写了一个 k8s ingress controller?](https://mp.weixin.qq.com/s/bmm2ibk2V7-XYneLo9XAPQ) +更多用户案例,请查看 [Case Studies](https://apisix.apache.org/zh/blog/tags/case-studies/)。 + ## APISIX 的用户有哪些? 有很多公司和组织把 APISIX 用于学习、研究、生产环境和商业产品中,包括: diff --git a/docs/zh/latest/admin-api.md b/docs/zh/latest/admin-api.md index e5dc14202727..b5a2ff19c3e8 100644 --- a/docs/zh/latest/admin-api.md +++ b/docs/zh/latest/admin-api.md @@ -25,9 +25,122 @@ title: Admin API Admin API 是为 Apache APISIX 服务的一组 API,我们可以将参数传递给 Admin API 以控制 APISIX 节点。更好地了解其工作原理,请参阅 [Architecture Design](./architecture-design/apisix.md) 中的文档。 -启动 Apache APISIX 时,默认情况下 Admin API 将监听 `9080` 端口(HTTPS 的 `9443` 端口)。您可以通过修改 [conf/config.yaml](https://github.com/apache/apisix/blob/master/conf/config.yaml) 文件来改变默认监听的端口。 +启动 Apache APISIX 时,默认情况下 Admin API 将监听 `9180` 端口。您可以通过修改 [conf/config.yaml](https://github.com/apache/apisix/blob/master/conf/config.yaml) 文件来改变默认监听的端口。 -在下面出现的 `X-API-KEY` 指的是 `conf/config.yaml` 文件中的 `apisix.admin_key.key`,它是 Admin API 的访问 token。 +在下面出现的 `X-API-KEY` 指的是 `conf/config.yaml` 文件中的 `deployment.admin.admin_key.key`,它是 Admin API 的访问 token。 + +## V3 + +Admin API 在 V3 版本中做了一些不向下兼容的调整,以及支持更多特性。 + +### 支持新的响应体格式 + +1. 移除响应体中的 `action` 字段; +2. 调整获取资源列表时的响应体结构,新的响应体结构示例如下: + +返回单个资源: + +```json +{ + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 +} +``` + +返回多个资源: + +```json +{ + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 +} +``` + +### 支持分页查询 + +获取资源列表时支持分页查询,分页参数包括: + +| 参数 | 默认值 | 范围 | 说明 | +| --------- | ------ | -------- | ------------ | +| page | 1 | [1, ...] | 页数 | +| page_size | | [10, 500]| 每页资源数量 | + +示例如下: + +```shell +$ curl "http://127.0.0.1:9180/apisix/admin/routes?page=1&page_size=10" \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X GET -i -d ' +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +目前支持分页查询的资源如下: + +- Consumer +- Global Rules +- Plugin Config +- Proto +- Route +- Service +- SSL +- Stream Route +- Upstream + +### 支持过滤资源 + +获取资源列表时支持根据 `name`, `label`, `uri` 过滤资源。 + +| 参数 | 说明 | +| ----- | --------------------------------------------------------------------------------------------------- | +| name | 根据资源的 `name` 属性进行查询,如果资源本身没有 `name` 属性则不会出现在查询结果中。 | +| label | 根据资源的 `label` 属性进行查询,如果资源本身没有 `labe`l 属性则不会出现在查询结果中。 | +| uri | 仅在 Route 资源上支持。如果 Route 的 `uri` 等于查询的 uri 或 `uris` 包含查询的 uri,则该 Route 资源出现在查询结果中。 | + +当启用了多个过滤参数时,对不同过滤参数的查询结果取交集。 +下述示例将返回一个路由列表,该路由列表中的所有路由满足以下条件:路由的 `name` 包含字符串 "test";`uri` 包含字符串 "foo";对路由的 `label` 没有限制,因为查询的 label 是空字符串。 + +```shell +$ curl 'http://127.0.0.1:9180/apisix/admin/routes?name=test&uri=foo&label=' \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X GET -i -d ' +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` ## Route @@ -35,7 +148,7 @@ Admin API 是为 Apache APISIX 服务的一组 API,我们可以将参数传递 *说明*:Route 字面意思就是路由,通过定义一些规则来匹配客户端的请求,然后根据匹配结果加载并执行相应的插件,并把请求转发给到指定 Upstream。 -注意:在启用 `Admin API` 时,它会占用前缀为 `/apisix/admin` 的 API。因此,为了避免您设计 API 与 `/apisix/admin` 冲突,建议为 Admin API 使用其他端口,您可以在 `conf/config.yaml` 中通过 `port_admin` 进行自定义 Admin API 端口。 +注意:在启用 `Admin API` 时,它会占用前缀为 `/apisix/admin` 的 API。因此,为了避免您设计 API 与 `/apisix/admin` 冲突,建议为 Admin API 使用其他端口,您可以在 `conf/config.yaml` 中通过 `admin_listen` 进行自定义 Admin API 端口。 ### 请求方法 @@ -118,7 +231,7 @@ route 对象 json 配置内容: ```shell # 创建一个路由 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "hosts": ["foo.com", "*.bar.com"], @@ -138,7 +251,7 @@ Date: Sat, 31 Aug 2019 01:17:15 GMT ... # 创建一个有效期为 60 秒的路由,过期后自动删除 -$ curl http://127.0.0.1:9080/apisix/admin/routes/2?ttl=60 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl 'http://127.0.0.1:9180/apisix/admin/routes/2?ttl=60' -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/aa/index.html", "upstream": { @@ -155,7 +268,7 @@ Date: Sat, 31 Aug 2019 01:17:15 GMT # 给路由增加一个 upstream node -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -174,7 +287,7 @@ HTTP/1.1 200 OK # 给路由更新一个 upstream node 的权重 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -193,7 +306,7 @@ HTTP/1.1 200 OK # 给路由删除一个 upstream node -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -211,7 +324,7 @@ HTTP/1.1 200 OK # 替换路由的 methods -- 数组 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '{ +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '{ "methods": ["GET", "POST"] }' HTTP/1.1 200 OK @@ -222,7 +335,7 @@ HTTP/1.1 200 OK # 替换路由的 upstream nodes -- sub path -$ curl http://127.0.0.1:9080/apisix/admin/routes/1/upstream/nodes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1/upstream/nodes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -236,7 +349,7 @@ HTTP/1.1 200 OK # 替换路由的 methods -- sub path -$ curl http://127.0.0.1:9080/apisix/admin/routes/1/methods -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '["POST", "DELETE", "PATCH"]' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1/methods -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '["POST", "DELETE", "PATCH"]' HTTP/1.1 200 OK ... @@ -245,7 +358,7 @@ HTTP/1.1 200 OK # 禁用路由 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "status": 0 }' @@ -259,7 +372,7 @@ HTTP/1.1 200 OK # 启用路由 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "status": 1 }' @@ -333,7 +446,7 @@ service 对象 json 配置内容: ```shell # 创建一个 Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "plugins": { "limit-count": { @@ -359,7 +472,7 @@ HTTP/1.1 201 Created # 给 Service 增加一个 upstream node -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -378,7 +491,7 @@ HTTP/1.1 200 OK # 给 Service 更新一个 upstream node 的权重 -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -397,7 +510,7 @@ HTTP/1.1 200 OK # 给 Service 删除一个 upstream node -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -415,7 +528,7 @@ HTTP/1.1 200 OK # 替换 Service 的 upstream nodes -$ curl http://127.0.0.1:9080/apisix/admin/services/201/upstream/nodes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201/upstream/nodes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -477,7 +590,7 @@ consumer 对象 json 配置内容: ```shell # 创建 Consumer ,指定认证插件 key-auth ,并开启特定插件 limit-count -$ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "username": "jack", "plugins": { @@ -496,7 +609,7 @@ HTTP/1.1 200 OK Date: Thu, 26 Dec 2019 08:17:49 GMT ... -{"node":{"value":{"username":"jack","plugins":{"key-auth":{"key":"auth-one"},"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":64,"key":"\/apisix\/consumers\/jack","modifiedIndex":64},"prevNode":{"value":"{\"username\":\"jack\",\"plugins\":{\"key-auth\":{\"key\":\"auth-one\"},\"limit-count\":{\"time_window\":60,\"count\":2,\"rejected_code\":503,\"key\":\"remote_addr\",\"policy\":\"local\"}}}","createdIndex":63,"key":"\/apisix\/consumers\/jack","modifiedIndex":63},"action":"set"} +{"node":{"value":{"username":"jack","plugins":{"key-auth":{"key":"auth-one"},"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":64,"key":"\/apisix\/consumers\/jack","modifiedIndex":64},"prevNode":{"value":"{\"username\":\"jack\",\"plugins\":{\"key-auth\":{\"key\":\"auth-one\"},\"limit-count\":{\"time_window\":60,\"count\":2,\"rejected_code\":503,\"key\":\"remote_addr\",\"policy\":\"local\"}}}","createdIndex":63,"key":"\/apisix\/consumers\/jack","modifiedIndex":63}} ``` 从 `v2.2` 版本之后,同一个 consumer 可以绑定多个认证插件。 @@ -532,7 +645,7 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 | 名字 | 可选项 | 类型 | 说明 | 示例 | | -------------- | ---------------------------------- | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | | type | 必需 | 枚举 | 负载均衡算法 | | | -| nodes | 必需,不能和 `service_name` 一起用 | Node | 哈希表或数组。当它是哈希表时,内部元素的 key 是上游机器地址列表,格式为`地址 +(可选的)端口`,其中地址部分可以是 IP 也可以是域名,比如 `192.168.1.100:80`、`foo.com:80`等。value 则是节点的权重。当它是数组时,数组中每个元素都是一个哈希表,其中包含 `host`、`weight` 以及可选的 `port`、`priority`。`nodes` 可以为空,这通常用作占位符。客户端命中这样的上游会返回 502。 | `192.168.1.100:80` | +| nodes | 必需,不能和 `service_name` 一起用 | Node | 哈希表或数组。当它是哈希表时,内部元素的 key 是上游机器地址列表,格式为`地址 +(可选的)端口`,其中地址部分可以是 IP 也可以是域名,比如 `192.168.1.100:80`、`foo.com:80`等。对于哈希表的情况,如果 key 是 IPv6 地址加端口,则必须用中括号将 IPv6 地址括起来。value 则是节点的权重。当它是数组时,数组中每个元素都是一个哈希表,其中包含 `host`、`weight` 以及可选的 `port`、`priority`。`nodes` 可以为空,这通常用作占位符。客户端命中这样的上游会返回 502。 | `192.168.1.100:80`, `[::1]:80` | | service_name | 必需,不能和 `nodes` 一起用 | string | 服务发现时使用的服务名,见[集成服务发现注册中心](./discovery.md) | `a-bootiful-client` | | discovery_type | 必需,如果设置了 `service_name` | string | 服务发现类型,见 [集成服务发现注册中心](./discovery.md) | `eureka` | | key | 条件必需 | 匹配类型 | 该选项只有类型是 `chash` 才有效。根据 `key` 来查找对应的 node `id`,相同的 `key` 在同一个对象中,永远返回相同 id,目前支持的 Nginx 内置变量有 `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`,其中 `arg_***` 是来自 URL 的请求参数,[Nginx 变量列表](http://nginx.org/en/docs/varindex.html) | | @@ -614,7 +727,7 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 ```shell # 创建一个 upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' { "type":"roundrobin", "nodes":{ @@ -626,7 +739,7 @@ HTTP/1.1 201 Created # 给 Upstream 增加一个 node -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1981": 1 @@ -643,7 +756,7 @@ HTTP/1.1 200 OK # 给 Upstream 更新一个 node 的权重 -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1981": 10 @@ -660,7 +773,7 @@ HTTP/1.1 200 OK # 给 Upstream 删除一个 node -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1980": null @@ -676,7 +789,7 @@ HTTP/1.1 200 OK # 替换 Upstream 的 nodes -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100/nodes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100/nodes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -695,7 +808,7 @@ HTTP/1.1 200 OK 1、创建 route 并配置 upstream 的 scheme 为 `https`。 ```shell -$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/get", "upstream": { @@ -774,7 +887,7 @@ $ curl http://127.0.0.1:9080/get ## SSL -*地址*:/apisix/admin/ssl/{id} +*地址*:/apisix/admin/ssls/{id} *说明*:SSL. @@ -782,11 +895,11 @@ $ curl http://127.0.0.1:9080/get | 名字 | 请求 uri | 请求 body | 说明 | | ------ | ---------------------- | --------- | ------------------------------- | -| GET | /apisix/admin/ssl | 无 | 获取资源列表 | -| GET | /apisix/admin/ssl/{id} | 无 | 获取资源 | -| PUT | /apisix/admin/ssl/{id} | {...} | 根据 id 创建资源 | -| POST | /apisix/admin/ssl | {...} | 创建资源,id 由后台服务自动生成 | -| DELETE | /apisix/admin/ssl/{id} | 无 | 删除资源 | +| GET | /apisix/admin/ssls | 无 | 获取资源列表 | +| GET | /apisix/admin/ssls/{id} | 无 | 获取资源 | +| PUT | /apisix/admin/ssls/{id} | {...} | 根据 id 创建资源 | +| POST | /apisix/admin/ssls | {...} | 创建资源,id 由后台服务自动生成 | +| DELETE | /apisix/admin/ssls/{id} | 无 | 删除资源 | ### body 请求参数 @@ -897,7 +1010,7 @@ ssl 对象 json 配置内容: 例子: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/example-plugin -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/example-plugin -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' { "skey": "val", "ikey": 1 @@ -930,11 +1043,11 @@ Content-Type: text/plain 例子: ```shell -$ curl "http://127.0.0.1:9080/apisix/admin/plugins/list" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +$ curl "http://127.0.0.1:9180/apisix/admin/plugins/list" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ["zipkin","request-id",...] -$ curl "http://127.0.0.1:9080/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -{"properties":{"disable":{"type":"boolean"}},"additionalProperties":false,"type":"object"} +$ curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +{"$comment":"this is a mark for our injected plugin schema","properties":{"header":{"default":"apikey","type":"string"},"hide_credentials":{"default":false,"type":"boolean"},"_meta":{"properties":{"filter":{"type":"array","description":"filter determines whether the plugin needs to be executed at runtime"},"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"priority":{"type":"integer","description":"priority of plugins by customized order"}},"type":"object"},"query":{"default":"apikey","type":"string"}},"type":"object"} ``` *地址*:/apisix/admin/plugins?all=true diff --git a/docs/zh/latest/apisix-variable.md b/docs/zh/latest/apisix-variable.md new file mode 100644 index 000000000000..194d0d56732a --- /dev/null +++ b/docs/zh/latest/apisix-variable.md @@ -0,0 +1,51 @@ +--- +title: APISIX 变量 +keywords: + - Apache APISIX + - API 网关 + - APISIX variable +description: 本文介绍了 Apache APISIX 支持的变量。 +--- + + + +## 描述 + +APISIX 除了支持 [NGINX 变量](http://nginx.org/en/docs/varindex.html)外,自身也提供了一些变量。 + +## 变量列表 + +| 变量名称 | 来源 | 描述 | 示例 | +|---------------------|----------- |--------------------------------------------------------------------------------- | ---------------- | +| balancer_ip | core | 上游服务器的 IP 地址。 | 192.168.1.2 | +| balancer_port | core | 上游服务器的端口。 | 80 | +| consumer_name | core | 消费者的名称。 | | +| graphql_name | core | GraphQL 的 [operation name](https://graphql.org/learn/queries/#operation-name)。 | HeroComparison | +| graphql_operation | core | GraphQL 的操作类型。 | mutation | +| graphql_root_fields | core | GraphQL 最高级别的字段。 | ["hero"] | +| mqtt_client_id | mqtt-proxy | MQTT 协议中的客户端 ID。 | | +| route_id | core | APISIX 路由的 ID。 | | +| route_name | core | APISIX 路由的名称。 | | +| service_id | core | APISIX 服务的 ID。 | | +| service_name | core | APISIX 服务的名称。 | | +| redis_cmd_line | Redis | Redis 命令的内容。 | | +| rpc_time | xRPC | 在 RPC 请求级别所花费的时间。 | | + +当然,除上述变量外,你也可以创建自定义[变量](./plugin-develop.md#register-custom-variable)。 diff --git a/docs/zh/latest/architecture-design/apisix.md b/docs/zh/latest/architecture-design/apisix.md index cd2e57bc3689..dfe866eb788c 100644 --- a/docs/zh/latest/architecture-design/apisix.md +++ b/docs/zh/latest/architecture-design/apisix.md @@ -25,38 +25,21 @@ title: APISIX ![软件架构](../../../assets/images/flow-software-architecture.png) -## 插件加载流程 - -![插件加载流程](../../../assets/images/flow-load-plugin.png) - -## 插件内部结构 - -![插件内部结构](../../../assets/images/flow-plugin-internal.png) - -## 配置 APISIX +Apache APISIX 是一个动态、实时、高性能的云原生 API 网关。它构建于 NGINX + ngx_lua 的技术基础之上,充分利用了 LuaJIT 所提供的强大性能。 [为什么 Apache APISIX 选择 NGINX+Lua 技术栈?](https://apisix.apache.org/zh/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/)。 -通过修改本地 `conf/config.yaml` 文件,或者在启动 APISIX 时使用 `-c` 或 `--config` 添加文件路径参数 `apisix start -c `,完成对 APISIX 服务本身的基本配置。 +APISIX 主要分为两个部分: -比如修改 APISIX 默认监听端口为 8000,其他配置保持默认,在 `config.yaml` 中只需这样配置: +1. APISIX 核心:包括 Lua 插件、多语言插件运行时(Plugin Runner)、Wasm 插件运行时等; +2. 功能丰富的各种内置插件:包括可观测性、安全、流量控制等。 -```yaml -apisix: - node_listen: 8000 # APISIX listening port -``` +APISIX 在其核心中,提供了路由匹配、负载均衡、服务发现、API 管理等重要功能,以及配置管理等基础性模块。除此之外,APISIX 插件运行时也包含其中,提供原生 Lua 插件的运行框架和多语言插件的运行框架,以及实验性的 Wasm 插件运行时等。APISIX 多语言插件运行时提供多种开发语言的支持,比如 Golang、Java、Python、JS 等。 -比如指定 APISIX 默认监听端口为 8000,并且设置 etcd 地址为 `http://foo:2379`, -其他配置保持默认。在 `config.yaml` 中只需这样配置: +APISIX 目前也内置了各类插件,覆盖了 API 网关的各种领域,如认证鉴权、安全、可观测性、流量管理、多协议接入等。当前 APISIX 内置的插件使用原生 Lua 实现,关于各个插件的介绍与使用方式,可以查看相关[插件文档](https://apisix.apache.org/docs/apisix/plugins/batch-requests)。 -```yaml -apisix: - node_listen: 8000 # APISIX listening port +## 插件加载流程 -etcd: - host: "http://foo:2379" # etcd address -``` +![插件加载流程](../../../assets/images/flow-load-plugin.png) -其他默认配置,可以在 `conf/config-default.yaml` 文件中看到,该文件是与 APISIX 源码强绑定, -**永远不要**手工修改 `conf/config-default.yaml` 文件。如果需要自定义任何配置,都应在 `config.yaml` 文件中完成。 +## 插件内部结构 -_注意_ 不要手工修改 APISIX 自身的 `conf/nginx.conf` 文件,当服务每次启动时,`apisix` -会根据 `config.yaml` 配置自动生成新的 `conf/nginx.conf` 并自动启动服务。 +![插件内部结构](../../../assets/images/flow-plugin-internal.png) diff --git a/docs/zh/latest/batch-processor.md b/docs/zh/latest/batch-processor.md index df6d5972e576..27c2a267abe1 100644 --- a/docs/zh/latest/batch-processor.md +++ b/docs/zh/latest/batch-processor.md @@ -80,7 +80,7 @@ end 举个例子: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "http-logger": { diff --git a/docs/zh/latest/benchmark.md b/docs/zh/latest/benchmark.md index d9bc3fc9b8c2..c7daeffdc6b7 100644 --- a/docs/zh/latest/benchmark.md +++ b/docs/zh/latest/benchmark.md @@ -52,7 +52,7 @@ title: 压力测试 如果你需要在本地服务器上运行基准测试,你需要同时运行另一个 NGINX 实例来监听 80 端口: ```bash -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", @@ -96,7 +96,7 @@ wrk -d 60 --latency http://127.0.0.1:9080/hello 如果你需要在本地服务器上运行基准测试,你需要同时运行另一个 NGINX 实例来监听 80 端口: ```bash -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", diff --git a/docs/zh/latest/building-apisix.md b/docs/zh/latest/building-apisix.md index 4d2aadba2fea..ec704a871cea 100644 --- a/docs/zh/latest/building-apisix.md +++ b/docs/zh/latest/building-apisix.md @@ -53,7 +53,7 @@ curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-depend 然后,创建一个目录并设置环境变量 `APISIX_VERSION`: ```shell -APISIX_VERSION='2.14.1' +APISIX_VERSION='2.99.0' mkdir apisix-${APISIX_VERSION} ``` diff --git a/docs/zh/latest/certificate.md b/docs/zh/latest/certificate.md index e6c433e2fc9c..310f53890dd3 100644 --- a/docs/zh/latest/certificate.md +++ b/docs/zh/latest/certificate.md @@ -52,7 +52,7 @@ with open(sys.argv[2]) as f: key = f.read() sni = sys.argv[3] api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ "cert": cert, "key": key, "snis": [sni], @@ -68,7 +68,7 @@ print(resp.text) ./ssl.py t.crt t.key test.com # 创建 Router 对象 -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["test.com"], @@ -113,7 +113,7 @@ curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/hello -vvv ```shell ./ssl.py t.crt t.key '*.test.com' -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["*.test.com"], @@ -168,3 +168,153 @@ curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/hello -v * `keys`:PEM 格式的 SSL 证书私钥列表 `APISIX` 会将相同下标的证书和私钥配对使用,因此 `certs` 和 `keys` 列表的长度必须一致。 + +### 设置多个 CA 证书 + +APISIX 目前支持在多处设置 CA 证书,比如 [保护 Admin API](./mtls.md#保护-admin-api),[保护 ETCD](./mtls.md#保护-etcd),以及 [部署模式](../../en/latest/architecture-design/deployment-role.md) 等。 + +在这些地方,使用 `ssl_trusted_certificate` 或 `trusted_ca_cert` 来配置 CA 证书,但是这些配置最终将转化为 OpenResty 的 [lua_ssl_trusted_certificate](https://github.com/openresty/lua-nginx-module#lua_ssl_trusted_certificate) 指令。 + +如果你需要在不同的地方指定不同的 CA 证书,你可以将这些 CA 证书制作成一个 CA bundle 文件,在需要用到 CA 证书的地方将配置指向这个文件。这样可以避免生成的 `lua_ssl_trusted_certificate` 存在多处并且互相覆盖的问题。 + +下面用一个完整的例子来展示如何在 APISIX 设置多个 CA 证书。 + +假设让 client 与 APISIX Admin API,APISIX 与 ETCD 之间都使用 mTLS 协议进行通信,目前有两张 CA 证书,分别是 `foo_ca.crt` 和 `bar_ca.crt`,用这两张 CA 证书各自签发 client 与 server 证书对,`foo_ca.crt` 及其签发的证书对用于保护 Admin API,`bar_ca.crt` 及其签发的证书对用于保护 ETCD。 + +下表详细列出这个示例所涉及到的配置及其作用: + +| 配置 | 类型 | 用途 | +| ------------- | ------- | ----------------------------------------------------------------------------------------------------------- | +| foo_ca.crt | CA 证书 | 签发客户端与 APISIX Admin API 进行 mTLS 通信所需的次级证书。 | +| foo_client.crt | 证书 | 由 `foo_ca.crt` 签发,客户端使用,访问 APISIX Admin API 时证明自身身份的证书。 | +| foo_client.key | 密钥文件 | 由 `foo_ca.crt` 签发,客户端使用,访问 APISIX Admin API 所需的密钥文件。 | +| foo_server.crt | 证书 | 由 `foo_ca.crt` 签发,APISIX 使用,对应 `admin_api_mtls.admin_ssl_cert` 配置项。 | +| foo_server.key | 密钥文件 | 由 `foo_ca.crt` 签发,APISIX 使用,对应 `admin_api_mtls.admin_ssl_cert_key` 配置项。 | +| admin.apisix.dev | 域名 | 签发 `foo_server.crt` 证书时使用的 Common Name,客户端通过该域名访问 APISIX Admin API | +| bar_ca.crt | CA 证书 | 签发 APISIX 与 ETCD 进行 mTLS 通信所需的次级证书。 | +| bar_etcd.crt | 证书 | 由 `bar_ca.crt` 签发,ETCD 使用,对应 ETCD 启动命令中的 `--cert-file` 选项。 | +| bar_etcd.key | 密钥文件 | 由 `bar_ca.crt` 签发,ETCD 使用,对应 ETCD 启动命令中的 `--key-file` 选项。 | +| bar_apisix.crt | 证书 | 由 `bar_ca.crt` 签发,APISIX 使用,对应 `etcd.tls.cert` 配置项。 | +| bar_apisix.key | 密钥文件 | 由 `bar_ca.crt` 签发,APISIX 使用,对应 `etcd.tls.key` 配置项。 | +| etcd.cluster.dev | 域名 | 签发 `bar_etcd.crt` 证书时使用的 Common Name,APISIX 与 ETCD 进行 mTLS 通信时,使用该域名作为 SNI。对应 `etcd.tls.sni` 配置项。| +| apisix.ca-bundle | CA bundle | 由 `foo_ca.crt` 与 `bar_ca.crt` 合并而成,替代 `foo_ca.crt` 与 `bar_ca.crt`。 | + +1. 制作 CA bundle 文件 + +``` +cat /path/to/foo_ca.crt /path/to/bar_ca.crt > apisix.ca-bundle +``` + +2. 启动 ETCD 集群,并开启客户端验证 + +先编写 `goreman` 配置,命名为 `Procfile-single-enable-mtls`,内容如下: + +```text +# 运行 `go get github.com/mattn/goreman` 安装 goreman,用 goreman 执行以下命令: +etcd1: etcd --name infra1 --listen-client-urls https://127.0.0.1:12379 --advertise-client-urls https://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd2: etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd3: etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +``` + +使用 `goreman` 来启动 ETCD 集群: + +```shell +goreman -f Procfile-single-enable-mtls start > goreman.log 2>&1 & +``` + +3. 更新 `config.yaml` + +```yaml +deployment: + admin: + admin_key + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_ca_cert: /path/to/apisix.ca-bundle + admin_ssl_cert: /path/to/foo_server.crt + admin_ssl_cert_key: /path/to/foo_server.key + +apisix: + ssl: + ssl_trusted_certificate: /path/to/apisix.ca-bundle + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + - "https://127.0.0.1:22379" + - "https://127.0.0.1:32379" + tls: + cert: /path/to/bar_apisix.crt + key: /path/to/bar_apisix.key + sni: etcd.cluster.dev +``` + +4. 测试 Admin API + +启动 APISIX,如果 APISIX 启动成功,`logs/error.log` 中没有异常输出,表示 APISIX 与 ETCD 之间进行 mTLS 通信正常。 + +用 curl 模拟客户端,与 APISIX Admin API 进行 mTLS 通信,并创建一条路由: + +```shell +curl -vvv \ + --resolve 'admin.apisix.dev:9180:127.0.0.1' https://admin.apisix.dev:9180/apisix/admin/routes/1 \ + --cert /path/to/foo_client.crt \ + --key /path/to/foo_client.key \ + --cacert /path/to/apisix.ca-bundle \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +如果输出以下 SSL 握手过程,表示 curl 与 APISIX Admin API 之间 mTLS 通信成功: + +```shell +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Request CERT (13): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Certificate (11): +* TLSv1.3 (OUT), TLS handshake, CERT verify (15): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +5. 验证 APISIX 代理 + +```shell +curl http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 298 +Connection: keep-alive +Date: Tue, 26 Jul 2022 16:31:00 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/2.14.1 + +…… +``` + +APISIX 将请求代理到了上游 `httpbin.org` 的 `/get` 路径,并返回了 `HTTP/1.1 200 OK`。整个过程使用 CA bundle 替代 CA 证书是正常可用的。 diff --git a/docs/zh/latest/config.json b/docs/zh/latest/config.json index 0832c0429924..da96eb5da7bc 100644 --- a/docs/zh/latest/config.json +++ b/docs/zh/latest/config.json @@ -1,13 +1,25 @@ { - "version": "2.14.1", + "version": "2.99.0", "sidebar": [ + { + "type": "doc", + "id": "getting-started" + }, + { + "type": "doc", + "id": "installation-guide" + }, + { + "type": "doc", + "id": "architecture-design/apisix" + }, { "type": "category", - "label": "Architecture Design", + "label": "Tutorials", "items": [ - "architecture-design/apisix", - "architecture-design/plugin-config", - "architecture-design/debug-mode" + "tutorials/expose-api", + "tutorials/protect-api", + "tutorials/observe-your-api" ] }, { @@ -18,6 +30,7 @@ "terminology/consumer", "terminology/global-rule", "terminology/plugin", + "terminology/plugin-config", "terminology/route", "terminology/router", "terminology/script", @@ -25,14 +38,6 @@ "terminology/upstream" ] }, - { - "type": "doc", - "id": "getting-started" - }, - { - "type": "doc", - "id": "installation-guide" - }, { "type": "category", "label": "Plugins", @@ -92,7 +97,8 @@ "plugins/ua-restriction", "plugins/referer-restriction", "plugins/consumer-restriction", - "plugins/csrf" + "plugins/csrf", + "plugins/public-api" ] }, { @@ -151,7 +157,10 @@ "plugins/sls-logger", "plugins/google-cloud-logging", "plugins/splunk-hec-logging", - "plugins/file-logger" + "plugins/file-logger", + "plugins/loggly", + "plugins/elasticsearch-logger", + "plugins/tencent-cloud-cls" ] } ] @@ -163,12 +172,14 @@ "plugins/serverless", "plugins/azure-functions", "plugins/openwhisk", - "plugins/aws-lambda" + "plugins/aws-lambda", + "plugins/workflow", + "plugins/openfunction" ] }, { "type": "category", - "label": "其它", + "label": "Other protocols", "items": [ "plugins/dubbo-proxy", "plugins/mqtt-proxy" @@ -197,6 +208,22 @@ { "type": "doc", "id": "building-apisix" + }, + { + "type": "doc", + "id": "external-plugin" + }, + { + "type": "doc", + "id": "CODE_STYLE" + }, + { + "type": "doc", + "id": "plugin-develop" + }, + { + "type": "doc", + "id": "debug-mode" } ] }, @@ -220,10 +247,6 @@ "discovery/kubernetes" ] }, - { - "type": "doc", - "id": "external-plugin" - }, { "type": "doc", "id": "health-check" @@ -254,23 +277,19 @@ }, { "type": "doc", - "id": "batch-processor" - }, - { - "type": "doc", - "id": "benchmark" + "id": "apisix-variable" }, { "type": "doc", - "id": "install-dependencies" + "id": "batch-processor" }, { "type": "doc", - "id": "plugin-develop" + "id": "benchmark" }, { "type": "doc", - "id": "CODE_STYLE" + "id": "install-dependencies" }, { "type": "doc", diff --git a/docs/zh/latest/control-api.md b/docs/zh/latest/control-api.md index 89792c430fec..541034b0c395 100644 --- a/docs/zh/latest/control-api.md +++ b/docs/zh/latest/control-api.md @@ -208,3 +208,40 @@ APISIX 中一些插件添加了自己的 control API。如果你对他们感兴 在 http 子系统中触发一次全量 GC 注意,当你启用 stream proxy 时,APISIX 将为 stream 子系统运行另一个 Lua 虚拟机。它不会触发这个 Lua 虚拟机中的全量 GC。 + +### GET /v1/plugin_metadatas + +引入自 3.0.0 版本 + +打印所有插件的元数据: + +```json +[ + { + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" + }, + { + "ikey": 1, + "skey": "val", + "id": "example-plugin" + } +] +``` + +### GET /v1/plugin_metadata/{plugin_name} + +引入自 3.0.0 版本 + +打印指定插件的元数据: + +```json +{ + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" +} +``` diff --git a/docs/zh/latest/debug-function.md b/docs/zh/latest/debug-function.md index 5ed2dc74993f..4f2f164dd039 100644 --- a/docs/zh/latest/debug-function.md +++ b/docs/zh/latest/debug-function.md @@ -34,7 +34,7 @@ title: 调试功能 示例 1:`502` 响应状态码来源于 `Upstream` (IP 地址不可用) ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "upstream": { @@ -75,7 +75,7 @@ $ curl http://127.0.0.1:9080/hello -v 示例 2:`502` 响应状态码来源于 `APISIX` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -109,7 +109,7 @@ Fault Injection! 示例 3:`Upstream` 具有多节点,并且所有节点不可用 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "nodes": { "127.0.0.3:1": 1, @@ -122,7 +122,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034 ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream_id": "1" diff --git a/docs/zh/latest/architecture-design/debug-mode.md b/docs/zh/latest/debug-mode.md similarity index 100% rename from docs/zh/latest/architecture-design/debug-mode.md rename to docs/zh/latest/debug-mode.md diff --git a/docs/zh/latest/discovery.md b/docs/zh/latest/discovery.md index 87687caf5674..189945a0d1d7 100644 --- a/docs/zh/latest/discovery.md +++ b/docs/zh/latest/discovery.md @@ -49,7 +49,7 @@ APISIX 要扩展注册中心其实是件非常容易的事情,其基本步骤 首先,在 `apisix/discovery` 下创建 `eureka` 目录; -其次,在 `apisix/discovery/eureka` 目录中添加 [`init.lua`](../../../apisix/discovery/eureka/init.lua); +其次,在 `apisix/discovery/eureka` 目录中添加 [`init.lua`](https://github.com/apache/apisix/blob/master/apisix/discovery/init.lua); 然后在 `init.lua` 实现用于初始化的 `init_worker` 函数以及用于获取服务实例节点列表的 `nodes` 函数即可: @@ -189,7 +189,7 @@ discovery: APISIX 是通过 `upstream.discovery_type` 选择使用的服务发现,`upstream.service_name` 与注册中心的服务名进行关联。下面是将 URL 为 "/user/\*" 的请求路由到注册中心名为 "USER-SERVICE" 的服务上例子: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/user/*", "upstream": { @@ -206,13 +206,13 @@ Transfer-Encoding: chunked Connection: keep-alive Server: APISIX web server -{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} ``` 因为上游的接口 URL 可能会有冲突,通常会在网关通过前缀来进行区分: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/a/*", "plugins": { @@ -227,7 +227,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f } }' -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/b/*", "plugins": { diff --git a/docs/zh/latest/discovery/dns.md b/docs/zh/latest/discovery/dns.md index 47debd65dd3a..e2a56bc9913e 100644 --- a/docs/zh/latest/discovery/dns.md +++ b/docs/zh/latest/discovery/dns.md @@ -61,9 +61,22 @@ discovery: 注意所有来自 `test.consul.service` 的 IP 都有相同的权重。 -解析的记录将根据它们的 TTL 来进行缓存。 -对于记录不在缓存中的服务,我们将按照 `SRV -> A -> AAAA -> CNAME` 的顺序进行查询。 -刷新缓存记录时,我们将从上次成功的类型开始尝试。 +解析的记录将根据它们的 TTL 来进行缓存。对于记录不在缓存中的服务,我们将默认按照 `SRV -> A -> AAAA -> CNAME` 的顺序进行查询,刷新缓存记录时,我们将从上次成功的类型开始尝试。也可以通过修改配置文件来自定义 DNS 的解析顺序。 + +```yaml +# 添加到 config.yaml +discovery: + dns: + servers: + - "127.0.0.1:8600" # 使用 DNS 服务器的真实地址 + order: # DNS 解析的顺序 + - last # "last" 表示从上次成功的类型开始 + - SRV + - A + - AAAA + - CNAME + +``` 如果你想指定 upstream 服务器的端口,可以把以下内容添加到 `service_name`: diff --git a/docs/zh/latest/discovery/kubernetes.md b/docs/zh/latest/discovery/kubernetes.md index c08672cce90a..17342882082a 100644 --- a/docs/zh/latest/discovery/kubernetes.md +++ b/docs/zh/latest/discovery/kubernetes.md @@ -1,5 +1,12 @@ --- title: Kubernetes +keywords: + - Kubernetes + - Apache APISIX + - 服务发现 + - 集群 + - API 网关 +description: 本文将介绍如何在 Apache APISIX 中基于 Kubernetes 进行服务发现以及相关问题汇总。 --- + +## 描述 + +`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储。 + +启用该插件后 APISIX 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 Elaticsearch 中。更多信息,请参考 [Batch-Processor](./batch-processor.md)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ | +| endpoint_addr | string | 是 | | Elasticsearch API。 | +| field | array | 是 | | Elasticsearch `field`配置信息。 | +| field.index | string | 是 | | Elasticsearch `[_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field)`。 | +| field.type | string | 否 | Elasticsearch 默认值 | Elasticsearch `[_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field)` | +| auth | array | 否 | | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 配置信息 | +| auth.username | string | 是 | | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 用户名。 | +| auth.password | string | 是 | | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 密码。 | +| ssl_verify | boolean | 否 | true | 当设置为 `true` 时则启用 SSL 验证。更多信息请参考 [lua-nginx-module](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake)。 | +| timeout | integer | 否 | 10 | 发送给 Elasticsearch 请求超时时间。 | + +本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。 + +## 启用插件 + +你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件: + +### 完整配置示例 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services", + "type":"collector" + }, + "auth":{ + "username":"elastic", + "password":"123456" + }, + "ssl_verify":false, + "timeout": 60, + "retry_delay":1, + "buffer_duration":60, + "max_retry_count":0, + "batch_max_size":1000, + "inactive_timeout":5, + "name":"elasticsearch-logger" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +### 最小化配置示例 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services" + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +## 测试插件 + +向配置 `elasticsearch-logger` 插件的路由发送请求 + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +现在,你可以从 Elasticsearch 获取相关日志。 + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "M1qAxYIBRmRqWkmH4Wya", + "_score": 1, + "_source": { + "apisix_latency": 0, + "route_id": "1", + "server": { + "version": "2.15.0", + "hostname": "apisix" + }, + "request": { + "size": 102, + "uri": "/elasticsearch.do?q=hello", + "querystring": { + "q": "hello" + }, + "headers": { + "user-agent": "curl/7.29.0", + "host": "127.0.0.1:9080", + "accept": "*/*" + }, + "url": "http://127.0.0.1:9080/elasticsearch.do?q=hello", + "method": "GET" + }, + "service_id": "", + "latency": 0, + "upstream": "127.0.0.1:1980", + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "start_time": 1661170929107, + "response": { + "size": 192, + "headers": { + "date": "Mon, 22 Aug 2022 12:22:09 GMT", + "server": "APISIX/2.15.0", + "content-type": "text/plain; charset=utf-8", + "connection": "close", + "transfer-encoding": "chunked" + }, + "status": 200 + } + } + } + ] + } +} +``` + +## 插件元数据设置 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------- | ------ | ------ | ------------------------------------------------------------ | ------ | ------------------------------------------------------------ | +| log_format | object | 可选 | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) 或 [Nginx 内置变量](http://nginx.org/en/docs/varindex.html)。请注意,**该设置是全局生效的**,因此在指定 log_format 后,将对所有绑定 elasticsearch-logger 的 Route 或 Service 生效。 | + +### 设置日志格式示例 + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +在日志收集处,将得到类似下面的日志: + +```json +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +向配置 `elasticsearch-logger` 插件的路由发送请求 + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +现在,你可以从 Elasticsearch 获取相关日志。 + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 1, + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "NVqExYIBRmRqWkmH4WwG", + "_score": 1, + "_source": { + "@timestamp": "2022-08-22T20:26:31+08:00", + "client_ip": "127.0.0.1", + "host": "127.0.0.1", + "route_id": "1" + } + } + ] + } +} +``` + +### 禁用插件元数据 + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +``` + +## 禁用插件 + +当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{}, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` diff --git a/docs/zh/latest/plugins/error-log-logger.md b/docs/zh/latest/plugins/error-log-logger.md index 2c35057a9bab..02c717e6f2c1 100644 --- a/docs/zh/latest/plugins/error-log-logger.md +++ b/docs/zh/latest/plugins/error-log-logger.md @@ -1,5 +1,11 @@ --- title: error-log-logger +keywords: + - APISIX + - API 网关 + - 错误日志 + - Plugin +description: API 网关 Apache APISIX error-log-logger 插件用于将 APISIX 的错误日志推送到 TCP、Apache SkyWalking 或 ClickHouse 服务器。 --- + +## 描述 + +`loggly` 插件可用于将日志转发到 [SolarWinds Loggly](https://www.solarwinds.com/loggly) 进行分析和存储。 + +当启用插件时,APISIX 会将请求上下文信息序列化为符合 [Loggly Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm?cshid=loggly_streaming-syslog-without-using-files) 的数据格式,即具有 [RFC5424](https://datatracker.ietf.org/doc/html/rfc5424) 兼容标头的 Syslog。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------------------------|---------------|----------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| customer_token | string | 是 | | 将日志发送到 Loggly 时使用的唯一标识符,以确保将日志发送到正确的组织帐户。 | +| severity | string (enum) | 否 | INFO | Syslog 日志事件的严重性级别。 包括:`DEBUG`、`INFO`、`NOTICE`、`WARNING`、`ERR`、`CRIT`、`ALERT` 和 `EMEGR`。 | +| severity_map | object | 否 | nil | 一种将上游 HTTP 响应代码映射到 Syslog 中的方法。 `key-value`,其中 `key` 是 HTTP 响应代码,`value`是 Syslog 严重级别。例如`{"410": "CRIT"}`。 | +| tags | array | 否 | | 元数据将包含在任何事件日志中,以帮助进行分段和过滤。 | +| include_req_body | boolean | 否 | false | 当设置为 `true` 时,包含请求体。**注意**:如果请求体无法完全存放在内存中,由于 NGINX 的限制,APISIX 无法将它记录下来。 | +| include_resp_body | boolean | 否 | false | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志或数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +如果要生成用户令牌,请在 Loggly 系统中的 `/loggly.com/tokens` 设置,或者在系统中单击 `Logs > Source setup > Customer tokens`。 + +## 插件元数据设置 + +你还可以通过插件元数据配置插件。详细配置如下: + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|------------|---------|-------|----------------------|--------------------------------|---------------------------------------------------------------------| +| host | string | 否 | "logs-01.loggly.com" | | 发送日志的主机的端点。 | +| port | integer | 否 | 514 | | 要连接的 Loggly 端口。 仅用于 `syslog` 协议。 | +| timeout | integer | 否 | 5000 | | 发送数据请求超时时间(以毫秒为单位)。 | +| protocol | string | 否 | "syslog" | [ "syslog", "http", "https" ] | 将日志发送到 Loggly 的协议。 | +| log_format | object | 否 | nil | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +APISIX 支持 [Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm)、[HTTP/S](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/http-bulk-endpoint.htm)(批量端点)协议将日志事件发送到 Loggly。**默认情况下 `protocol` 的值为 `syslog`**。该协议允许你通过一些细粒度的控制(基于上游 HTTP 响应代码的日志严重性映射)发送符合 RFC5424 的系统日志事件。但是 HTTP/S 批量端点非常适合以更快的传输速度发送更大量的日志事件。 + +:::note 注意 + +Syslog 协议允许你发送符合 RFC5424 的 syslog 事件并进行细粒度控制。但是在以快速传输速度发送大量日志时,使用 HTTP/S 批量端点会更好。你可以通过以下方式更新元数据以更新使用的协议: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/loggly \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "protocol": "http" +}' +``` + +::: + +## 启用插件 + +以下示例展示了如何在指定路由上启用该插件: + +**完整配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + "tags":["apisix", "testroute"], + "severity":"info", + "severity_map":{ + "503": "err", + "410": "alert" + }, + "buffer_duration":60, + "max_retry_count":0, + "retry_delay":1, + "inactive_timeout":2, + "batch_max_size":10 + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +**最小化配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +发出请求后,你就可以在 Loggly 仪表盘上查看相关日志: + +![Loggly Dashboard](../../../assets/images/plugin/loggly-dashboard.png) + +## 禁用插件 + +当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/mocking.md b/docs/zh/latest/plugins/mocking.md index 13ca95342fb4..ec4568837b1a 100644 --- a/docs/zh/latest/plugins/mocking.md +++ b/docs/zh/latest/plugins/mocking.md @@ -126,7 +126,7 @@ JSON Schema 在其字段中支持以下类型: 你可以通过如下命令在指定路由上启用 `mocking` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], @@ -229,7 +229,7 @@ Server: APISIX/2.10.0 当你需要禁用 `mocking` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], diff --git a/docs/zh/latest/plugins/mqtt-proxy.md b/docs/zh/latest/plugins/mqtt-proxy.md index bd97286ef8e4..2eb8eb2208a4 100644 --- a/docs/zh/latest/plugins/mqtt-proxy.md +++ b/docs/zh/latest/plugins/mqtt-proxy.md @@ -1,5 +1,11 @@ --- title: mqtt-proxy +keywords: + - APISIX + - API 网关 + - Plugin + - MQTT Proxy +description: 本文档介绍了 Apache APISIX mqtt-proxy 插件的信息,通过 `mqtt-proxy` 插件可以使用 MQTT 的 `client_id` 进行动态负载平衡。 --- + +## 描述 + +`openfunction` 插件用于将开源的分布式无服务器平台 [CNCF OpenFunction](https://openfunction.dev/) 作为动态上游集成至 APISIX。 + +启用 `openfunction` 插件后,该插件会终止对已配置 URI 的请求,并代表客户端向 OpenFunction 的 function 发起一个新的请求,然后 `openfunction` 插件会将响应信息返回至客户端。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------------------------- | ------- | ------ | ------- | ------------ | ------------------------------------------------------------ | +| function_uri | string | 是 | | | OpenFunction function uri,例如 `https://localhost:30858/default/function-sample`。 | +| ssl_verify | boolean | 否 | true | | 当设置为 `true` 时执行 SSL 验证。 | +| authorization | object | 否 | | | 访问 OpenFunction 的函数的授权凭证。| +| authorization.service_token | string | 否 | | | OpenFunction service token,其格式为 `xxx:xxx`,支持函数入口的 basic auth 认证方式。 | +| timeout | integer | 否 | 3000 ms | [100,...] ms | OpenFunction action 和 HTTP 调用超时时间,以毫秒为单位。 | +| keepalive | boolean | 否 | true | | 当设置为 `true` 时,保持连接的活动状态以便重复使用。 | +| keepalive_timeout | integer | 否 | 60000 ms| [1000,...] ms| 当连接空闲时,保持该连接处于活动状态的时间,以毫秒为单位。 | +| keepalive_pool | integer | 否 | 5 | [1,...] | 连接断开之前,可接收的最大请求数。 | + +:::note 注意 + +`timeout` 字段规定了 OpenFunction function 的最大执行时间,以及 APISIX 中 HTTP 客户端的请求超时时间。 + +因为 OpenFunction function 调用可能会耗费很长时间来拉取容器镜像和启动容器,如果 `timeout` 字段的值设置太小,可能会导致大量请求失败。 + +::: + +## 前提条件 + +在使用 `openfunction` 插件之前,你需要通过以下命令运行 OpenFunction。详情参考 [OpenFunction 安装指南](https://openfunction.dev/docs/getting-started/installation/) 。 + +请确保当前环境中已经安装对应版本的 Kubernetes 集群。 + +### 创建并推送函数 + +你可以参考 [OpenFunction 官方示例](https://github.com/OpenFunction/samples) 创建函数。构建函数时,你需要使用以下命令为容器仓库生成一个密钥,才可以将函数容器镜像推送到容器仓库 ( 例如 Docker Hub 或 Quay.io)。 + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= REGISTRY_PASSWORD= +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample/test", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +## 测试插件 + +使用 `curl` 命令测试: + +```shell +curl -i http://127.0.0.1:9080/hello -X POST -d'test' +``` + +正常返回结果: + +``` +hello, test! +``` + +### 配置路径转发 + +`OpenFunction` 插件还支持 URL 路径转发,同时将请求代理到上游的 OpenFunction API 端点。基本请求路径的扩展(如路由 `/hello/*` 中 `*` 的部分)会被添加到插件配置中指定的 `function_uri`。 + +:::info 重要 + +路由上配置的 `uri` 必须以 `*` 结尾,此功能才能正常工作。APISIX 路由是严格匹配的,`*` 表示此 URI 的任何子路径都将匹配到同一路由。 + +::: + +下面的示例配置了此功能: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello/*", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +现在,对路径 `hello/123` 的任何请求都将调用 OpenFunction 插件设置的对应的函数,并转发添加的路径: + +```shell +curl http://127.0.0.1:9080/hello/123 +``` + +```shell +Hello, 123! +``` + +## 禁用插件 + +当你需要禁用 `openfunction` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/openid-connect.md b/docs/zh/latest/plugins/openid-connect.md index 16a8f9bfe47d..d8b69fd63e0b 100644 --- a/docs/zh/latest/plugins/openid-connect.md +++ b/docs/zh/latest/plugins/openid-connect.md @@ -2,10 +2,10 @@ title: openid-connect keywords: - APISIX - - Plugin + - API Gateway - OpenID Connect - - openid-connect -description: 本文介绍了关于 Apache APISIX `openid-connect` 插件的基本信息及使用方法。 + - OIDC +description: OpenID Connect(OIDC)是基于 OAuth 2.0 的身份认证协议,APISIX 可以与支持该协议的身份认证服务对接,如 Okta、Keycloak、Ory Hydra、Authing 等,实现对客户端请求的身份认证。 --- + +## 描述 + +`public-api` 插件可用于通过创建路由的方式暴露用户自定义的 API。 + +你可以通过在路由中添加 `public-api` 插件,来保护**自定义插件为了实现特定功能**而暴露的 API。例如,你可以使用 [`jwt-auth`](./jwt-auth.md) 插件创建一个公共 API 端点 `/apisix/plugin/jwt/sign` 用于 JWT 认证。 + +:::note 注意 + +默认情况下,在自定义插件中添加的公共 API 不对外暴露的,你需要手动配置一个路由并启用 `public-api` 插件。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------|--------|----------|---------|------------------------------------------------------------| +| uri | string | 否 | "" | 公共 API 的 URI。在设置路由时,使用此属性来配置初始的公共 API URI。 | + +## 启用插件 + +`public-api` 插件需要与授权插件一起配合使用,以下示例分别用到了 [`jwt-auth`](./jwt-auth.md) 插件和 [`key-auth`](./key-auth.md) 插件。 + +### 基本用法 + +首先,你需要启用并配置 `jwt-auth` 插件,详细使用方法请参考 [`jwt-auth`](./jwt-auth.md) 插件文档。 + +然后,使用以下命令在指定路由上启用并配置 `public-api` 插件: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/apisix/plugin/jwt/sign", + "plugins": { + "public-api": {} + } +}' +``` + +**测试插件** + +向配置的 URI 发出访问请求,如果返回一个包含 JWT Token 的响应,则代表插件生效: + +```shell +curl 'http://127.0.0.1:9080/apisix/plugin/jwt/sign?key=user-key' +``` + +```shell +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NTk0Mjg1MzIsImtleSI6InVzZXIta2V5In0.NhrWrO-da4kXezxTLdgFBX2rJA2dF1qESs8IgmwhNd0 +``` + +### 使用自定义 URI + +首先,你需要启用并配置 `jwt-auth` 插件,详细使用方法请参考 [`jwt-auth`](./jwt-auth.md) 插件文档。 + +然后,你可以使用一个自定义的 URI 来暴露 API: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r2' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/gen_token", + "plugins": { + "public-api": { + "uri": "/apisix/plugin/jwt/sign" + } + } +}' +``` + +**测试插件** + +向自定义的 URI 发出访问请求,如果返回一个包含 JWT Token 的响应,则代表插件生效: + +```shell +curl 'http://127.0.0.1:9080/gen_token?key=user-key' +``` + +```shell +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NTk0Mjg1NjIsImtleSI6InVzZXIta2V5In0.UVkXWbyGb8ajBNtxs0iAaFb2jTEWIlqTR125xr1ZMLc +``` + +### 确保 Route 安全 + +你可以配合使用 `key-auth` 插件来添加认证,从而确保路由的安全: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r2' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/gen_token", + "plugins": { + "public-api": { + "uri": "/apisix/plugin/jwt/sign" + }, + "key-auth": { + "key": "test-apikey" + } + } +}' +``` + +**测试插件** + +通过上述命令启用插件并添加认证后,只有经过认证的请求才能访问。 + +发出访问请求并指定 `apikey`,如果返回 `200` HTTP 状态码,则说明请求被允许: + +```shell +curl -i 'http://127.0.0.1:9080/gen_token?key=user-key' \ + -H "apikey: test-apikey" +``` + +```shell +HTTP/1.1 200 OK +``` + +发出访问请求,如果返回 `401` HTTP 状态码,则说明请求被阻止,插件生效: + +```shell +curl -i 'http://127.0.0.1:9080/gen_token?key=user-key' +``` + +```shell +HTTP/1.1 401 Unauthorized +``` + +## 禁用插件 + +当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/real-ip.md b/docs/zh/latest/plugins/real-ip.md index 3804cdcdac11..0eed3534c86a 100644 --- a/docs/zh/latest/plugins/real-ip.md +++ b/docs/zh/latest/plugins/real-ip.md @@ -58,7 +58,7 @@ description: 本文介绍了关于 Apache APISIX `real-ip` 插件的基本信息 以下示例展示了如何在指定路由中启用 `real-ip` 插件: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", @@ -102,7 +102,7 @@ remote-port: 9080 当你需要禁用 `real-ip` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", diff --git a/docs/zh/latest/plugins/redirect.md b/docs/zh/latest/plugins/redirect.md index c40726a16887..61e0862e0776 100644 --- a/docs/zh/latest/plugins/redirect.md +++ b/docs/zh/latest/plugins/redirect.md @@ -34,20 +34,20 @@ description: 本文介绍了关于 Apache APISIX `redirect` 插件的基本信 | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | |---------------------|---------------|-----|-------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| http_to_https | boolean | 否 | false | [true,false] | 当设置为 `true` 并且请求是 HTTP 时,它将被重定向具有相同 URI 和 301 状态码的 HTTPS。 | +| http_to_https | boolean | 否 | false | [true,false] | 当设置为 `true` 并且请求是 HTTP 时,它将被重定向具有相同 URI 和 301 状态码的 HTTPS,原 URI 的查询字符串也将包含在 Location 头中。 | | uri | string | 否 | | | 要重定向到的 URI,可以包含 NGINX 变量。例如:`/test/index.htm`, `$uri/index.html`,`${uri}/index.html`。如果你引入了一个不存在的变量,它不会报错,而是将其视为一个空变量。 | | regex_uri | array[string] | 否 | | | 将来自客户端的 URL 与正则表达式匹配并重定向。当匹配成功后使用模板替换发送重定向到客户端,如果未匹配成功会将客户端请求的 URI 转发至上游。 和 `regex_uri` 不可以同时存在。例如:["^/iresty/(.)/(.)/(.*)","/$1-$2-$3"] 第一个元素代表匹配来自客户端请求的 URI 正则表达式,第二个元素代表匹配成功后发送重定向到客户端的 URI 模板。 | | ret_code | integer | 否 | 302 | [200, ...] | HTTP 响应码 | -| encode_uri | boolean | 否 | false | [true,false] | 当设置为 `true` 时,对返回的 `Location` Header 按照 [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986)的编码格式进行编码。 | +| encode_uri | boolean | 否 | false | [true,false] | 当设置为 `true` 时,对返回的 `Location` Header 按照 [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986) 的编码格式进行编码。 | | append_query_string | boolean | 否 | false | [true,false] | 当设置为 `true` 时,将原始请求中的查询字符串添加到 `Location` Header。如果已配置 `uri` 或 `regex_uri` 已经包含查询字符串,则请求中的查询字符串将附加一个`&`。如果你已经处理过查询字符串(例如,使用 NGINX 变量 `$request_uri`),请不要再使用该参数以避免重复。 | :::note -`http_to_https`、`uri` 和 `regex_uri` 只能配置其中一个属性。 - +* `http_to_https`、`uri` 和 `regex_uri` 只能配置其中一个属性。 +* `http_to_https`、和 `append_query_string` 只能配置其中一个属性。 * 当开启 `http_to_https` 时,重定向 URL 中的端口将按如下顺序选取一个值(按优先级从高到低排列) * 从配置文件(`conf/config.yaml`)中读取 `plugin_attr.redirect.https_port`。 - * 如果 `apisix.ssl` 处于开启状态,先读取 `apisix.ssl.listen_port`,如果没有,再读取 `apisix.ssl.listen` 并从中随机选一个 `port`。 + * 如果 `apisix.ssl` 处于开启状态,读取 `apisix.ssl.listen` 并从中随机选一个 `port`。 * 使用 443 作为默认 `https port`。 ::: @@ -57,7 +57,7 @@ description: 本文介绍了关于 Apache APISIX `redirect` 插件的基本信 以下示例展示了如何在指定路由中启用 `redirect` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/test/index.html", @@ -79,7 +79,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ 你也可以在新的 URI 中使用 NGINX 内置的任意变量: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/test", @@ -121,7 +121,7 @@ Location: /test/default.html 以下示例展示了如何将 HTTP 重定向到 HTTPS: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", @@ -151,7 +151,7 @@ Location: https://127.0.0.1:9443/hello 当你需要禁用 `redirect` 插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/test/index.html", diff --git a/docs/zh/latest/plugins/referer-restriction.md b/docs/zh/latest/plugins/referer-restriction.md index cdf6d4605dc5..430f8c112775 100644 --- a/docs/zh/latest/plugins/referer-restriction.md +++ b/docs/zh/latest/plugins/referer-restriction.md @@ -1,5 +1,10 @@ --- title: referer-restriction +keywords: + - APISIX + - API 网关 + - Referer restriction +description: 本文介绍了 Apache APISIX referer-restriction 插件的使用方法,通过该插件可以将 referer 请求头中的域名加入黑名单或者白名单来限制其对服务或路由的访问。 --- + +## 描述 + +`tencent-cloud-cls` 插件可用于将 APISIX 日志使用[腾讯云日志服务](https://cloud.tencent.com/document/product/614) API 推送到您的日志主题。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------- | ------- | ------ |-------| ------------ |------------------------------------------------------------------------------| +| cls_host | string | 是 | | | CLS API 域名,参考[使用 API 上传日志](https://cloud.tencent.com/document/api/614/16873)。| +| cls_topic | string | 是 | | | CLS 日志主题 id。 | +| secret_id | string | 是 | | | 云 API 密钥的 id。 | +| secret_key | string | 是 | | | 云 API 密钥的 key。 | +| sample_ratio | number | 否 | 1 | [0.00001, 1] | 采样的比例。设置为 `1` 时,将对所有请求进行采样。 | +| include_req_body | boolean | 否 | false | [false, true]| 当设置为 `true` 时,日志中将包含请求体。 | +| include_resp_body | boolean | 否 | false | [false, true]| 当设置为 `true` 时,日志中将包含响应体。 | +| global_tag | object | 否 | | | kv 形式的 JSON 数据,可以写入每一条日志,便于在 CLS 中检索。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认情况下批处理器每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 重要 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `tencent-cloud-cls` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/tencent-cloud-cls \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "ap-guangzhou.cls.tencentyun.com", + "cls_topic": "${your CLS topic name}", + "global_tag": { + "module": "cls-logger", + "server_name": "YourApiGateWay" + }, + "include_req_body": true, + "include_resp_body": true, + "secret_id": "${your secret id}", + "secret_key": "${your secret key}" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +现在你可以向 APISIX 发起请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +## 禁用插件 + +当你需要禁用该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/traffic-split.md b/docs/zh/latest/plugins/traffic-split.md index c9f81c8546b8..97e87fd47c49 100644 --- a/docs/zh/latest/plugins/traffic-split.md +++ b/docs/zh/latest/plugins/traffic-split.md @@ -1,5 +1,12 @@ --- title: traffic-split +keywords: + - APISIX + - API 网关 + - Traffic Split + - 灰度发布 + - 蓝绿发布 +description: 本文介绍了 Apache APISIX traffic-split 插件的相关操作,你可以使用此插件动态地将部分流量引导至各种上游服务。 --- + +## 描述 + +`workflow` 插件引入 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 来提供复杂的流量控制功能。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| rules.case | array[array] | 是 | | | 由一个或多个{var, operator, val}元素组成的列表,类似这样:{{var, operator, val}, {var, operator, val}, ...}}。例如:{"arg_name", "==", "json"},表示当前请求参数 name 是 json。这里的 var 与 NGINX 内部自身变量命名保持一致,所以也可以使用 request_uri、host 等;对于 operator 部分,目前已支持的运算符有 ==、~=、~~、>、<、in、has 和 ! 。关于操作符的具体用法请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 的 `operator-list` 部分。 | +| rules.actions | array[object] | 是 | | | 当 `case` 成功匹配时要执行的 `actions`。目前,`actions` 中只支持一个元素。`actions` 的唯一元素的第一个子元素可以是 `return` 或 `limit-count`。 | + +### `actions` 属性 + +#### return + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | ----| ------------- | +| actions[1].return | string | 否 | | | 直接返回到客户端。 | +| actions[1].[2].code | integer | 否 | | | 返回给客户端的 HTTP 状态码。 | + +#### limit-count + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | ----| ------------- | +| actions[1].limit-count | string | 否 | | | 执行 `limit-count` 插件的功能。 | +| actions[1].[2] | object | 否 | | | `limit-count` 插件的配置。 | + +:::note + +在 `rules` 中,按照 `rules` 的数组下标顺序依次匹配 `case`,如果 `case` 匹配成功,则直接执行对应的 `actions`。 + +::: + +## 启用插件 + +以下示例展示了如何在路由中启用 `workflow` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "plugins":{ + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/hello/rejected"] + ], + "actions":[ + [ + "return", + {"code": 403} + ] + ] + }, + { + "case":[ + ["uri", "==", "/hello/v2/appid"] + ], + "actions":[ + [ + "limit-count", + { + "count":2, + "time_window":60, + "rejected_code":429 + } + ] + ] + } + ] + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` + +如上,我们启用了 `workflow` 插件,如果请求与 `rules` 中的 `case` 匹配,则会执行对应的 `actions`。 + +**示例 1: 如果请求的 uri 是 `/hello/rejected`,则返回给客户端状态码 `403`** + +```shell +curl http://127.0.0.1:9080/hello/rejected -i +HTTP/1.1 403 Forbidden +...... + +{"error_msg":"rejected by workflow"} +``` + +**示例 2: 如果请求的 uri 是 `/hello/v2/appid`,则执行 `limit-count` 插件,限制请求的数量为 2,时间窗口为 60 秒,如果超过限制数量,则返回给客户端状态码 `429`** + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 429 Too Many Requests +``` + +**示例 3: 如果请求不能被任何 `case` 匹配,则 `workflow` 不会执行任何操作** + +```shell +curl http://127.0.0.1:0080/hello/fake -i +HTTP/1.1 200 OK +``` + +## Disable Plugin + +当你需要禁用 `workflow` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/zipkin.md b/docs/zh/latest/plugins/zipkin.md index 88b2752f0596..e8f5814b9ad0 100644 --- a/docs/zh/latest/plugins/zipkin.md +++ b/docs/zh/latest/plugins/zipkin.md @@ -111,7 +111,7 @@ func main(){ 以下示例展示了如何在指定路由中启用 `zipkin` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -178,7 +178,7 @@ docker run -d --name jaeger \ 通过以下命令创建路由并启用插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -221,7 +221,7 @@ HTTP/1.1 200 OK 当你需要禁用 `zipkin` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/zh/latest/router-radixtree.md b/docs/zh/latest/router-radixtree.md index 63886f4e4b82..9291ec22d628 100644 --- a/docs/zh/latest/router-radixtree.md +++ b/docs/zh/latest/router-radixtree.md @@ -83,7 +83,7 @@ title: 路由 RadixTree 创建两条 `priority` 值不同的路由(值越大,优先级越高)。 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -97,7 +97,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -124,7 +124,7 @@ curl http://127.0.0.1:1980/hello 以下是设置主机匹配规则的示例: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -138,7 +138,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -197,7 +197,7 @@ apisix: 具体参数及使用方式请查看 [radixtree#new](https://github.com/api7/lua-resty-radixtree#new) 文档,下面是一个简单的示例: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "vars": [ @@ -226,7 +226,7 @@ APISIX 支持通过 POST 表单属性过滤路由,其中需要您使用 `Conte 我们可以定义这样的路由: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST"], "uri": "/_post", @@ -274,7 +274,7 @@ query getRepo { 我们可以用以下方法过滤掉这样的路由: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST", "GET"], "uri": "/graphql", diff --git a/docs/zh/latest/stand-alone.md b/docs/zh/latest/stand-alone.md index 07f7425afad7..9abbe20d6d70 100644 --- a/docs/zh/latest/stand-alone.md +++ b/docs/zh/latest/stand-alone.md @@ -35,14 +35,15 @@ APISIX 节点服务启动后会立刻加载 `conf/apisix.yaml` 文件中的路 由于目前 Admin API 都是基于 etcd 配置中心解决方案,当开启 Stand-alone 模式后, Admin API 将不再被允许使用。 -通过设置 `conf/config.yaml` 中的 `apisix.config_center` 选项为 `yaml` ,并禁用 Admin API 即可启用 Stand-alone 模式。 +只有当 APISIX 的角色设置为 data plane 时,才能开启 Stand-alone 模式。通过设置 `deployment.role` 为 `data_plane`,设置 `deployment.role_data_plane.config_provider` 为 `yaml` 即可启用 Stand-alone 模式。 参考下面示例: ```yaml -apisix: - enable_admin: false - config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml ``` ### 如何配置规则 @@ -282,9 +283,6 @@ stream_routes: mqtt-proxy: protocol_name: "MQTT" protocol_level: 4 - upstream: - ip: "127.0.0.1" - port: 1995 upstreams: - nodes: "127.0.0.1:1995": 1 diff --git a/docs/zh/latest/stream-proxy.md b/docs/zh/latest/stream-proxy.md index f2b330ce5a9a..4fd5ef28de17 100644 --- a/docs/zh/latest/stream-proxy.md +++ b/docs/zh/latest/stream-proxy.md @@ -58,7 +58,7 @@ apisix: 简例如下: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "remote_addr": "127.0.0.1", "upstream": { @@ -71,7 +71,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 ``` 例子中 APISIX 对客户端 IP 为 `127.0.0.1` 的请求代理转发到上游主机 `127.0.0.1:1995`。 -更多用例,请参照 [test case](../../../t/stream-node/sanity.t)。 +更多用例,请参照 [test case](https://github.com/apache/apisix/blob/master/t/stream-node/sanity.t)。 ## 更多 route 匹配选项 @@ -84,7 +84,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 例如 ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.1", "server_port": 2000, @@ -127,7 +127,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 3. 现在我们将创建一个带有服务器过滤的 stream 路由: ```shell - curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.10", "server_port": 9101, @@ -184,7 +184,7 @@ mTLS 也是支持的,参考 [保护路由](./mtls.md#保护路由)。 然后,我们需要配置一个 route,匹配连接并代理到上游: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -198,7 +198,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 当连接为 TLS over TCP 时,我们可以通过 SNI 来匹配路由,比如: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "sni": "a.test.com", "upstream": { @@ -217,7 +217,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 APISIX 还支持代理到 TLS over TCP 上游。 ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "scheme": "tls", diff --git a/docs/zh/latest/terminology/consumer.md b/docs/zh/latest/terminology/consumer.md index fc9102983b35..02c15f5ae021 100644 --- a/docs/zh/latest/terminology/consumer.md +++ b/docs/zh/latest/terminology/consumer.md @@ -50,7 +50,7 @@ title: Consumer ```shell # 创建 Consumer ,指定认证插件 key-auth ,并开启特定插件 limit-count -$ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -67,7 +67,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335 }' # 创建 Router,设置路由规则和启用插件配置 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {} @@ -100,7 +100,7 @@ HTTP/1.1 503 Service Temporarily Unavailable ```shell # 设置黑名单,禁止 jack 访问该 API -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {}, diff --git a/docs/zh/latest/architecture-design/plugin-config.md b/docs/zh/latest/terminology/plugin-config.md similarity index 58% rename from docs/zh/latest/architecture-design/plugin-config.md rename to docs/zh/latest/terminology/plugin-config.md index d3bac196e36e..599b88078c8d 100644 --- a/docs/zh/latest/architecture-design/plugin-config.md +++ b/docs/zh/latest/terminology/plugin-config.md @@ -1,5 +1,11 @@ --- title: Plugin Config +keywords: + - API 网关 + - Apache APISIX + - 插件配置 + - Plugin Config +description: Plugin Config 对象,可以用于创建一组通用的插件配置,并在路由中使用这组配置。 --- -如果你想要复用一组通用的插件配置,你可以把它们提取成一个 Plugin config,并绑定到对应的路由上。 +## 描述 -举个例子,你可以这么做: +在很多情况下,我们在不同的路由中会使用相同的插件规则,此时就可以通过 `Plugin Config` 来设置这些规则。`plugins` 的配置可以通过 Admin API `/apisix/admin/plugin_configs` 进行单独配置,在路由中使用`plugin_config_id` 与之进行关联。插件配置属于一组通用插件配置的抽象。 + +## 配置步骤 + +你可以参考如下步骤将 Plugin Config 绑定在路由上。 + +1. 创建 Plugin config。 ```shell -# 创建 Plugin config -$ curl http://127.0.0.1:9080/apisix/admin/plugin_configs/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/plugin_configs/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { - "desc": "吾乃插件配置 1", + "desc": "enable limit-count plugin", "plugins": { "limit-count": { "count": 2, @@ -38,9 +49,13 @@ $ curl http://127.0.0.1:9080/apisix/admin/plugin_configs/1 -H 'X-API-KEY: edd1c9 } } }' +``` + +2. 创建路由并绑定 `Plugin Config 1`。 -# 绑定到路由上 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +``` +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uris": ["/index.html"], "plugin_config_id": 1, @@ -55,14 +70,19 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f 如果找不到对应的 Plugin config,该路由上的请求会报 503 错误。 -如果这个路由已经配置了 `plugins`,那么 Plugin config 里面的插件配置会合并进去。 -相同的插件会覆盖掉 `plugins` 原有的插件。 +## 注意事项 -举个例子: +如果路由中已经配置了 `plugins`,那么 Plugin Config 里面的插件配置将会与 `plugins` 合并。 -``` +相同的插件不会覆盖掉 `plugins` 原有的插件。 + +例如: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_configs/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { - "desc": "吾乃插件配置 1", + "desc": "enable ip-restruction and limit-count plugin", "plugins": { "ip-restriction": { "whitelist": [ @@ -79,9 +99,11 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f } ``` -加上 +在路由中引入 Plugin Config: -``` +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uris": ["/index.html"], "plugin_config_id": 1, @@ -94,7 +116,6 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f "plugins": { "proxy-rewrite": { "uri": "/test/add", - "scheme": "https", "host": "apisix.iresty.com" }, "limit-count": { @@ -107,9 +128,11 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f } ``` -等于 +最后实现的效果如下: -``` +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uris": ["/index.html"], "upstream": { @@ -127,13 +150,13 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f }, "proxy-rewrite": { "uri": "/test/add", - "scheme": "https", "host": "apisix.iresty.com" }, "limit-count": { - "count": 2, + "count": 20, "time_window": 60, - "rejected_code": 503 + "rejected_code": 503, + "key": "remote_addr" } } } diff --git a/docs/zh/latest/terminology/plugin.md b/docs/zh/latest/terminology/plugin.md index 86bed6442982..b832d394c1fc 100644 --- a/docs/zh/latest/terminology/plugin.md +++ b/docs/zh/latest/terminology/plugin.md @@ -23,13 +23,11 @@ title: Plugin `Plugin` 表示将在 `HTTP` 请求/响应生命周期期间执行的插件配置。 -`Plugin` 配置可直接绑定在 `Route` 上,也可以被绑定在 `Service` 或 `Consumer`上。而对于同一个插件的配置,只能有一份是有效的,配置选择优先级总是 `Consumer` > `Route` > `Service`。 +`Plugin` 配置可直接绑定在 `Route` 上,也可以被绑定在 `Service`、`Consumer` 或 `Plugin Config` 上。而对于同一个插件的配置,只能有一份是有效的,配置选择优先级总是 `Consumer` > `Route` > `Plugin Config` > `Service`。 在 `conf/config.yaml` 中,可以声明本地 APISIX 节点都支持哪些插件。这是个白名单机制,不在该白名单的插件配置,都将会被自动忽略。这个特性可用于临时关闭或打开特定插件,应对突发情况非常有效。 如果你想在现有插件的基础上新增插件,注意需要拷贝 `conf/config-default.yaml` 的插件节点内容到 `conf/config.yaml` 的插件节点中。 -插件的配置可以被直接绑定在指定 Route 中,也可以被绑定在 Service 中,不过 Route 中的插件配置优先级更高。 - 一个插件在一次请求中只会执行一次,即使被同时绑定到多个不同对象中(比如 Route 或 Service)。 插件运行先后顺序是根据插件自身的优先级来决定的,例如: @@ -68,7 +66,34 @@ local _M = { ## 插件通用配置 -一些通用的配置可以应用于插件配置。比如说。 +通过 `_meta` 配置项可以将一些通用的配置应用于插件,具体配置项如下: + +| 名称 | 类型 | 描述 | +|--------------|------|----------------| +| disable | boolean | 是否禁用该插件。 | +| error_response | string/object | 自定义错误响应。 | +| priority | integer | 自定义插件优先级。 | +| filter | array | 根据请求的参数,在运行时控制插件是否执行。此配置由一个或多个 {var, operator, val} 元素组成列表,类似:{{var, operator, val}, {var, operator, val}, ...}}。例如 `{"arg_version", "==", "v2"}`,表示当前请求参数 `version` 是 `v2`。这里的 `var` 与 NGINX 内部自身变量命名是保持一致。操作符的具体用法请看[lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 的 operator-list 部分。| + +### 禁用插件 + +通过 `disable` 配置,你可以新增一个处于禁用状态的插件,请求不会经过该插件。 + +```json +{ + "proxy-rewrite": { + "_meta": { + "disable": true + } + } +} +``` + +### 自定义错误响应 + +通过 `error_response` 配置,可以将任意插件的错误响应配置成一个固定的值,避免因为插件内置的错误响应信息而带来困扰。 + +如下配置表示将 `jwt-auth` 插件的错误响应自定义为 '{"message": "Missing credential in request"}'。 ```json { @@ -82,18 +107,9 @@ local _M = { } ``` -上面的配置意味着将 jwt-auth 插件的错误响应自定义为 '{"message": "Missing credential in request"}'。 - -### 在 `_meta` 下的插件通用配置 - -| 名称 | 类型 | 描述 | -|--------------|------|----------------| -| error_response | string/object | 自定义错误响应 | -| priority | integer | 自定义插件优先级 | - ### 自定义插件优先级 -所有插件都有默认优先级,但是可以自定义插件优先级来改变插件执行顺序。 +所有插件都有默认优先级,但你仍可以通过 `priority` 配置项来自定义插件优先级,从而改变插件执行顺序。 ```json { @@ -127,6 +143,106 @@ serverless-pre-function 的默认优先级是 10000,serverless-post-function - 自定义插件优先级只会影响插件实例绑定的主体,不会影响该插件的所有实例。比如上面的插件配置属于路由 A ,路由 B 上的插件 serverless-post-function 和 serverless-post-function 插件执行顺序不会受到影响,会使用默认优先级。 - 自定义插件优先级不适用于 consumer 上配置的插件的 rewrite 阶段。路由上配置的插件的 rewrite 阶段将会优先运行,然后才会运行 consumer 上除 auth 插件之外的其他插件的 rewrite 阶段。 +### 动态控制插件是否执行 + +默认情况下,在路由中指定的插件都会被执行。但是我们可以通过 `filter` 配置项为插件添加一个过滤器,通过过滤器的执行结果控制插件是否执行。 + +如下配置表示,只有当请求查询参数中 `version` 值为 `v2` 时,`proxy-rewrite` 插件才会执行。 + +```json +{ + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } +} +``` + +使用下述配置创建一条完整的路由: + +```json +{ + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +} +``` + +当请求中不带任何参数时,`proxy-rewrite` 插件不会执行,请求将被转发到上游的 `/get`: + +```shell +curl -v /dev/null http://127.0.0.1:9080/get -H"host:httpbin.org" +``` + +```shell +< HTTP/1.1 200 OK +...... +< Server: APISIX/2.15.0 +< +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6eec-46c97e8a5d95141e621e07fe", + "X-Forwarded-Host": "httpbin.org" + }, + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/get" +} +``` + +当请求中携带参数 `version=v2` 时,`proxy-rewrite` 插件执行,请求将被转发到上游的 `/anything`: + +```shell +curl -v /dev/null http://127.0.0.1:9080/get?version=v2 -H"host:httpbin.org" +``` + +```shell +< HTTP/1.1 200 OK +...... +< Server: APISIX/2.15.0 +< +{ + "args": { + "version": "v2" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6f02-24a613b57b6587a076ef18b4", + "X-Forwarded-Host": "httpbin.org" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/anything?version=v2" +} +``` + ## 热加载 APISIX 的插件是热加载的,不管你是新增、删除还是修改插件,都不需要重启服务。 @@ -134,7 +250,7 @@ APISIX 的插件是热加载的,不管你是新增、删除还是修改插件 只需要通过 admin API 发送一个 HTTP 请求即可: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT ``` 注意:如果你已经在路由规则里配置了某个插件(比如在 `route` 的 `plugins` 字段里面添加了它),然后禁用了该插件,在执行路由规则的时候会跳过这个插件。 diff --git a/docs/zh/latest/terminology/route.md b/docs/zh/latest/terminology/route.md index 161fc0bb92f0..53ac72f11cd4 100644 --- a/docs/zh/latest/terminology/route.md +++ b/docs/zh/latest/terminology/route.md @@ -35,7 +35,7 @@ Route 中主要包含三部分内容:匹配规则(比如 uri、host、remote 下面创建的 Route 示例,是把 URL 为 "/index.html" 的请求代理到地址为 "127.0.0.1:1980" 的 Upstream 服务: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "upstream": { @@ -53,7 +53,7 @@ Transfer-Encoding: chunked Connection: keep-alive Server: APISIX web server -{"node":{"value":{"uri":"\/index.html","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +{"node":{"value":{"uri":"\/index.html","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} ``` 当我们接收到成功应答,表示该 Route 已成功创建。 diff --git a/docs/zh/latest/terminology/service.md b/docs/zh/latest/terminology/service.md index e5c15086d978..6f34fee9155b 100644 --- a/docs/zh/latest/terminology/service.md +++ b/docs/zh/latest/terminology/service.md @@ -32,7 +32,7 @@ title: Service ```shell # create new Service -$ curl http://127.0.0.1:9080/apisix/admin/services/200 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/200 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "limit-count": { @@ -51,14 +51,14 @@ $ curl http://127.0.0.1:9080/apisix/admin/services/200 -H 'X-API-KEY: edd1c9f034 }' # create new Route and reference the service by id `200` -curl http://127.0.0.1:9080/apisix/admin/routes/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", "service_id": "200" }' -curl http://127.0.0.1:9080/apisix/admin/routes/101 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/101 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/foo/index.html", @@ -69,7 +69,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/101 -H 'X-API-KEY: edd1c9f034335f 当然我们也可以为 Route 指定不同的插件参数或上游,比如下面这个 Route 设置了不同的限流参数,其他部分(比如上游)则继续使用 Service 中的配置参数。 ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/bar/index.html", "id": "102", diff --git a/docs/zh/latest/terminology/upstream.md b/docs/zh/latest/terminology/upstream.md index 6c2a47ad9c56..7905bab639e6 100644 --- a/docs/zh/latest/terminology/upstream.md +++ b/docs/zh/latest/terminology/upstream.md @@ -36,7 +36,7 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 创建上游对象用例: ```shell -curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "type": "chash", "key": "remote_addr", @@ -50,7 +50,7 @@ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335 上游对象创建后,均可以被具体 `Route` 或 `Service` 引用,例如: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "upstream_id": 1 @@ -60,7 +60,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 为了方便使用,也可以直接把上游地址直接绑到某个 `Route` 或 `Service` ,例如: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -83,7 +83,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 下面是一个配置了健康检查的示例: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -127,7 +127,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 创建一个 consumer 对象: ```shell -curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -141,7 +141,7 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1 新建路由,打开 `key-auth` 插件认证,`upstream` 的 `hash_on` 类型为 `consumer`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {} @@ -169,7 +169,7 @@ curl http://127.0.0.1:9080/server_port -H "apikey: auth-jack" 新建路由和 `Upstream`,`hash_on` 类型为 `cookie`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hash_on_cookie", "upstream": { @@ -195,7 +195,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 新建路由和 `Upstream`,`hash_on` 类型为 `header`,`key` 为 `content-type`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hash_on_header", "upstream": { diff --git a/docs/zh/latest/tutorials/expose-api.md b/docs/zh/latest/tutorials/expose-api.md new file mode 100644 index 000000000000..0f03f537477a --- /dev/null +++ b/docs/zh/latest/tutorials/expose-api.md @@ -0,0 +1,126 @@ +--- +title: 发布 API +keywords: + - API 网关 + - Apache APISIX + - 发布路由 + - 创建服务 +description: 本文介绍了如何通过 Apache APISIX 发布服务和路由。 +--- + + + +## 描述 + +本文将引导你了解 APISIX 的上游、路由以及服务的概念,并介绍如何通过 APISIX 发布你的 API。 + +## 概念介绍 + +### 上游 + +[Upstream](../terminology/upstream.md) 也称为上游,上游是对虚拟主机的抽象,即应用层服务或节点的抽象。 + +上游的作用是按照配置规则对服务节点进行负载均衡,它的地址信息可以直接配置到路由或服务上。当多个路由或服务引用同一个上游时,可以通过创建上游对象,在路由或服务中使用上游的 ID 方式引用上游,减轻维护压力。 + +### 路由 + +[Route](../terminology/route.md) 也称为路由,是 APISIX 中最基础和最核心的资源对象。 + +APISIX 可以通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的[插件](./terminology/plugin.md),最后把请求转发给到指定的上游服务。路由中主要包含三部分内容:匹配规则、插件配置和上游信息。 + +### 服务 + +[Service](../terminology/service.md) 也称为服务,是某类 API 的抽象(也可以理解为一组 Route 的抽象)。它通常与上游服务抽象是一一对应的,Route 与 Service 之间,通常是 N:1 的关系。 + +## 前提条件 + +在进行如下操作前,请确保你已经通过 Docker [启动 APISIX](../installation-guide.md)。 + +## 公开你的服务 + +1. 创建上游。 + +创建一个包含 `httpbin.org` 的上游服务,你可以使用它进行测试。这是一个返回服务,它将返回我们在请求中传递的参数。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } +}' +``` + +在该命令中,我们指定了 Apache APISIX 的 Admin API Key 为 `edd1c9f034335f136f87ad84b625c8f1`,并且使用 `roundrobin` 作为负载均衡机制,并设置了 `httpbin.org:80` 为上游服务。为了将该上游绑定到路由,此处需要把 `upstream_id` 设置为 `1`。此处你可以在 `nodes` 下指定多个上游,以达到负载均衡的效果。 + +如需了解更多信息,请参考[上游](../terminology/upstream.md)。 + +2. 创建路由。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream_id": "1" +}' +``` + +:::note 注意 + +创建上游非必须步骤,你可以通过在路由中,添加 `upstream` 对象,达到上述的效果。例如: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +::: + +3. 测试路由。 + +在创建完成路由后,你可以通过以下命令测试路由是否正常: + +``` +curl -i -X GET "http://127.0.0.1:9080/get?foo1=bar1&foo2=bar2" -H "Host: httpbin.org" +``` + +该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。 + +## 更多教程 + +你可以查看[保护 API](./protect-api.md) 来保护你的 API。 + +接下来,你可以通过 APISIX 的一些[插件](./plugins/batch-requests.md),实现更多功能。 diff --git a/docs/zh/latest/tutorials/observe-your-api.md b/docs/zh/latest/tutorials/observe-your-api.md new file mode 100644 index 000000000000..d65aca50cd58 --- /dev/null +++ b/docs/zh/latest/tutorials/observe-your-api.md @@ -0,0 +1,236 @@ +--- +title: 监控 API +keywords: + - API 网关 + - Apache APISIX + - 可观测性 + - 监控 + - 插件 +description: 本文介绍了 API 网关 Apache APISIX 可观察性插件并了解如何设置这些插件。 +--- + + + +APISIX 中提供了很多具有丰富功能的可观测性插件。你可以通过使用和设置这些插件,来了解 API 行为,进而使整个业务流程更加清晰。 + +## API 可观测性 + +**API 可观测性**已经成为 API 开发的一部分,因为它解决了与 API 一致性、可靠性和快速迭代 API 功能的相关问题。可观测性可分为三个关键部分:日志、指标、链路追踪,接下来让我们逐个了解它们。 + +![Observability of three key areas](https://static.apiseven.com/2022/09/14/6321cf14c555a.jpg) + +## 前提条件 + +在进行该教程之前,请确保你已经[公开服务](./expose-api.md)。 + +## 日志 + +在 APISIX 中,**日志**可分为访问日志和错误日志。访问日志主要记录了每个请求的上下文信息,错误日志则是 APISIX 运行打印的日志信息,包括 NGINX 和插件相关的信息。APISIX 的日志存储在 `./apisix/logs/` 目录下。当然你可以通过一些 APISIX 的日志插件,将 APISIX 的日志发送到指定的日志服务中,APISIX 提供了以下插件: + +- [http-logger](../plugins/http-logger.md) +- [skywalking-logger](../plugins/skywalking-logger.md) +- [tcp-logger](../plugins/tcp-logger.md) +- [kafka-logger](../plugins/kafka-logger.md) +- [rocketmq-logger](../plugins/rocketmq-logger.md) +- [udp-logger](../plugins/udp-logger.md) +- [clickhouse-logger](../plugins/clickhouse-logger.md) +- [error-logger](../plugins/error-log-logger.md) +- [google-cloud-logging](../plugins/google-cloud-logging.md) + +你可以在 APISIX [插件中心](../plugins/http-logger.md) 查看 APISIX 支持的所有日志插件。接下来我们将使用 `http-logger` 插件为你演示如何将 APISIX 的日志数据发送到 HTPP/HTTPS 服务器中。 + +:::note 注意 + +你可以使用 [mockbin.com](https://mockbin.org/) 生成一个模拟的 HTTP 服务器来存储和查看日志。 + +::: + +以下示例展示了在指定路由上启动 `http-logger` 的示例。 + +```shell + +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" + } + }, + "upstream_id": "1", + "uri": "/get" +}' + +``` + +:::note 注意 + +你可以通过修改 `uri` 属性,将上述 `http-logger` 的服务器地址更换为你的服务器地址: + +```json +{ + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" +} +``` + +::: + +创建成功后,你可以通过以下命令向 `get` 端点发送请求以生成日志。 + +```shell +curl -i http://127.0.0.1:9080/get +``` + +请求成功后,你可以单击[模拟服务器链接](http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61/log)查看访问日志。 + +![http-logger-plugin-test-screenshot](https://static.apiseven.com/2022/09/14/6321d1d83eb7a.png) + +## 指标 + +**指标**是在⼀段时间内测量的数值。与⽇志不同,指标在默认情况下是结构化的,这使得查询和优化存储变得更加容易。而 APISIX 也提供了 [Prometheus](../plugins/prometheus.md) 的插件来获取你的 API 指标,并在 Prometheus 中暴露它们。通过使用 APISIX 提供的 Grafana 仪表板元数据,并从 Prometheus 中获取指标,更加方便地监控你的 API。 + +你可以通过以下命令启用 `prometheus` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "prometheus": {} + }, + "upstream_id": "1" +}' +``` + +启用成功后,你可以通过 `/apisix/prometheus/metrics` 接口获取 APISIX 的指标。 + +```shell +curl -i http://127.0.0.1:9091/apisix/prometheus/metrics +``` + +返回结果如下: + +```text +HTTP/1.1 200 OK +Server: openresty +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive + +# HELP apisix_batch_process_entries batch process remaining entries +# TYPE apisix_batch_process_entries gauge +apisix_batch_process_entries{name="http logger",route_id="1",server_addr="172.19.0.8"} 0 +# HELP apisix_etcd_modify_indexes Etcd modify index for APISIX keys +# TYPE apisix_etcd_modify_indexes gauge +apisix_etcd_modify_indexes{key="consumers"} 17819 +apisix_etcd_modify_indexes{key="global_rules"} 17832 +apisix_etcd_modify_indexes{key="max_modify_index"} 20028 +apisix_etcd_modify_indexes{key="prev_index"} 18963 +apisix_etcd_modify_indexes{key="protos"} 0 +apisix_etcd_modify_indexes{key="routes"} 20028 +... +``` + +你还可以通过 `http://localhost:9090/targets` 在 Prometheus 仪表板上查看端点的状态。 + +![plu​​gin-orchestration-configure-rule-screenshot](https://static.apiseven.com/2022/09/14/6321d30b32024.png) + +如上图,APISIX 公开的指标端点已启动并正在运行。 + +现在,你可以查询 `apisix_http_status` 的指标,查看 APISIX 处理了哪些 HTTP 请求及其结果。 + +![prometheus-plugin-dashboard-query-http-status-screenshot](https://static.apiseven.com/2022/09/14/6321d30aed3b2.png) + +除此之外,你还可以查看在本地实例中运行的 Grafana 仪表板。请访问 `http://localhost:3000/`。 + +![prometheus-plugin-grafana-dashboard-screenshot](https://static.apiseven.com/2022/09/14/6321d30bba97c.png) + +目前,APISIX 还提供了其他两个关于指标的插件: + +- [Node status 插件](../plugins/node-status.md)(https://apisix.apache.org/docs/apisix/plugins/node-status/) +- [Datadog 插件](../plugins/datadog.md) + +## 链路追踪 + +**链路追踪**就是将一次请求还原成调用链路,并将该请求的调用情况使用拓扑的方式展现,比如展示各个微服务节点上的耗时,请求具体经过了哪些服务器以及每个服务节点的请求状态等内容。 + +[Zipkin](https://zipkin.io/) 一个开源的分布式追踪系统。 APISIX 的 [zipkin 插件](../plugins/zipkin.md) 支持根据 [Zipkin API 规范](https://zipkin.io/pages/instrumenting.html) 收集链路信息并报告给 Zipkin Collector。 + +:::tip 提示 + +使用该插件前,请确保你已经有一个正在运行的 Zipkin 实例。你可以使用 Docker 快速启动一个 Zipkin 实例: + +``` +docker run -d -p 9411:9411 openzipkin/zipkin +``` + +::: + +你可以通过如下示例,在指定路由中启用 `zipkin` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": [ + "GET" + ], + "uri": "/get", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1 + } + }, + "upstream_id": "1" +}' +``` + +你可以通过以下命令请求 APISIX: + +```shell +curl -i http://127.0.0.1:9080/get +``` + +如下所示,返回结果中的 `header` 部分附加了一些额外的跟踪标识符(TraceId、SpanId 和 ParentId): + +```text +"X-B3-Parentspanid": "61bd3f4046a800e7", +"X-B3-Sampled": "1", +"X-B3-Spanid": "855cd5465957f414", +"X-B3-Traceid": "e18985df47dab632d62083fd96626692", +``` + +你可以通过访问 `http://127.0.0.1:9411/zipkin`,在 Zinkin 的 Web UI 上看到请求链路。 + +![Zipkin plugin output 1](https://static.apiseven.com/2022/09/14/6321dc27f3d33.png) + +![Zipkin plugin output 2](https://static.apiseven.com/2022/09/14/6321dc284049c.png) + +你也可以通过另外两个插件进行链路追踪: + +- [Skywalking 插件](../plugins/skywalking.md) + +- [OpenTelemetry 插件](../plugins/opentelemetry.md) + +## 总结 + +API 可观测性是一种用于在 API 世界中管理应用程序的框架,APISIX 的插件可以通过集成到多个可观测性平台来帮助你监控 API,让你更专注于开发核心业务功能,无需为集成多个可观测性应用花费更多时间。 diff --git a/docs/zh/latest/tutorials/protect-api.md b/docs/zh/latest/tutorials/protect-api.md new file mode 100644 index 000000000000..6e259b7aebce --- /dev/null +++ b/docs/zh/latest/tutorials/protect-api.md @@ -0,0 +1,129 @@ +--- +title: 保护 API +keywords: + - API 网关 + - Apache APISIX + - 发布路由 + - 创建服务 +description: 本文介绍了如何通过 Apache APISIX 发布服务和路由。 +--- + + + +## 描述 + +本文将为你介绍使用限流限速和安全插件保护你的 API。 + +## 概念介绍 + +### 插件 + +[Plugin](./terminology/plugin.md) 也称之为插件,它是扩展 APISIX 应用层能力的关键机制,也是在使用 APISIX 时最常用的资源对象。插件主要是在 HTTP 请求或响应生命周期期间执行的、针对请求的个性化策略。插件可以与路由、服务或消费者绑定。 + +:::note 注意 + +如果 [路由](./terminology/route.md)、[服务](./terminology/service.md)、[插件配置](./terminology/plugin-config.md) 或消费者都绑定了相同的插件,则只有一份插件配置会生效,插件配置的优先级由高到低顺序是:消费者 > 路由 > 插件配置 > 服务。同时在插件执行过程中也会涉及 6 个阶段,分别是 `rewrite`、`access`、`before_proxy`、`header_filter`、`body_filter` 和 `log`。 + +::: + +## 前提条件 + +在进行该教程前,请确保你已经[公开服务](./expose-api.md)。 + +## 保护 API + +在很多时候,我们的 API 并不是处于一个非常安全的状态,它随时会收到不正常的访问,一旦访问流量突增,可能就会导致你的 API 发生故障,产生不必要的损失。因此你可以通过速率限制保护你的 API 服务,限制非正常的访问请求,保障 API 服务的稳定运行。对此,我们可以使用如下方式进行: + +1. 限制请求速率; +2. 限制单位时间内的请求数; +3. 延迟请求; +4. 拒绝客户端请求; +5. 限制响应数据的速率。 + +为了实现上述功能,APISIX 提供了多个限流限速的插件,包括 [limit-conn](./plugins/limit-conn.md)、[limit-count](./plugins/limit-count.md) 和 [limit-req](./plugins/limit-req.md)。 + +- `limit-conn` 插件主要用于限制客户端对服务的并发请求数。 +- `limit-req` 插件使用漏桶算法限制对用户服务的请求速率。 +- `limit-count` 插件主要用于在指定的时间范围内,限制每个客户端总请求个数。 + +接下来,我们将以 `limit-count` 插件为例,为你介绍如何通过限流限速插件保护你的 API。 + +1. 创建路由。 + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream_id": "1" + } +}' + +``` + +以上配置中,使用了[公开服务](./expose-api.md)中创建的上游创建了一个 ID 为 `1` 的路由, ,并且启用了 `limit-count` 插件。该插件仅允许客户端在 60 秒内,访问上游服务 2 次,超过两次,则会返回 `503` 错误码。 + +2. 测试插件。 + +```shell + +curl http://127.0.0.1:9080/index.html + +``` + +使用上述命令连续访问三次后,则会出现如下错误。 + +``` + +503 Service Temporarily Unavailable + +

503 Service Temporarily Unavailable

+
openresty
+ + +``` + +返回上述结果,则表示 `limit-count` 插件已经配置成功。 + +## 流量控制插件 + +APISIX 除了提供限流限速的插件外,还提供了很多其他的关于 **traffic** 插件来满足实际场景的需求: + +- [proxy-cache](./plugins/proxy-cache.md):该插件提供缓存后端响应数据的能力,它可以和其他插件一起使用。该插件支持基于磁盘和内存的缓存。 +- [request-validation](./plugins/request-validation.md):该插件用于提前验证向上游服务转发的请求。 +- [proxy-mirror](./plugins/proxy-mirror.md):该插件提供了镜像客户端请求的能力。流量镜像是将线上真实流量拷贝到镜像服务中,以便在不影响线上服务的情况下,对线上流量或请求内容进行具体的分析。 +- [api-breaker](./plugins/api-breaker.md):该插件实现了 API 熔断功能,从而帮助我们保护上游业务服务。 +- [traffic-split](./plugins/traffic-split.md):该插件使用户可以逐步引导各个上游之间的流量百分比。,你可以使用该插件实现蓝绿发布,灰度发布。 +- [request-id](./plugins/request-id.md):该插件通过 APISIX 为每一个请求代理添加 `unique` ID 用于追踪 API 请求。 +- [proxy-control](./plugins/proxy-control.md):该插件能够动态地控制 NGINX 代理的相关行为。 +- [client-control](./plugins/client-control.md):该插件能够通过设置客户端请求体大小的上限来动态地控制 NGINX 处理客户端的请求。 + +## 更多操作 + +你可以参考[监控 API](./observe-your-api.md) 文档,对 APISIX 进行监控,日志采集,链路追踪等。 diff --git a/powered-by.md b/powered-by.md index 48b12a1a05b4..6b77a6e57704 100644 --- a/powered-by.md +++ b/powered-by.md @@ -97,6 +97,7 @@ Users are encouraged to add themselves to this page, [issue](https://github.com/ 1. 数地科技 1. 微吼 1. 小鹏汽车 +1. Ideacreep diff --git a/rockspec/apisix-2.13.3-0.rockspec b/rockspec/apisix-2.13.3-0.rockspec new file mode 100644 index 000000000000..9814a44f13ea --- /dev/null +++ b/rockspec/apisix-2.13.3-0.rockspec @@ -0,0 +1,100 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "2.13.3-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "2.13.3", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "lua-resty-dns-client = 6.0.2", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.8.3", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.0", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 2.24", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.1", + "lua-protobuf = 0.3.4", + "lua-resty-openidc = 1.7.2-1", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220127", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.07", + "lua-resty-logger-socket = 2.0-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.1", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.0rc1-2", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.4.0", + "casbin = 1.41.1", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.1-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-2.15.0-0.rockspec b/rockspec/apisix-2.15.0-0.rockspec new file mode 100644 index 000000000000..31ab2e23f00a --- /dev/null +++ b/rockspec/apisix-2.15.0-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "2.15.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "2.15.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "lua-resty-dns-client = 6.0.2", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.8.2", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.0", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.3.4", + "lua-resty-openidc = 1.7.5", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220527", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.20-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.1", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.0rc1-2", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.5.0", + "casbin = 1.41.1", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.1-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} + diff --git a/rockspec/apisix-2.99.0-0.rockspec b/rockspec/apisix-2.99.0-0.rockspec new file mode 100644 index 000000000000..21fdb7f66402 --- /dev/null +++ b/rockspec/apisix-2.99.0-0.rockspec @@ -0,0 +1,104 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "2.99.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "2.99.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "lua-resty-dns-client = 6.0.2", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.9.0", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.1", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.3.4", + "grpc-client-nginx-module = 0.2.2", + "lua-resty-openidc = 1.7.5", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220527", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.20-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.0rc1-2", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.5.0", + "casbin = 1.41.1", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.1-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-master-0.rockspec b/rockspec/apisix-master-0.rockspec index 88b4886a6a97..6f3150fa00cc 100644 --- a/rockspec/apisix-master-0.rockspec +++ b/rockspec/apisix-master-0.rockspec @@ -34,12 +34,12 @@ dependencies = { "lua-resty-ctxdump = 0.1-0", "lua-resty-dns-client = 6.0.2", "lua-resty-template = 2.0", - "lua-resty-etcd = 1.8.0", + "lua-resty-etcd = 1.9.0", "api7-lua-resty-http = 0.2.0", "lua-resty-balancer = 0.04", "lua-resty-ngxvar = 0.5.2", "lua-resty-jit-uuid = 0.0.7", - "lua-resty-healthcheck-api7 = 2.2.0", + "lua-resty-healthcheck-api7 = 2.2.1", "api7-lua-resty-jwt = 0.2.4", "lua-resty-hmac-ffi = 0.05", "lua-resty-cookie = 0.1.0", @@ -47,7 +47,8 @@ dependencies = { "opentracing-openresty = 0.1", "lua-resty-radixtree = 2.8.2", "lua-protobuf = 0.3.4", - "lua-resty-openidc = 1.7.2-1", + "grpc-client-nginx-module = 0.2.2", + "lua-resty-openidc = 1.7.5", "luafilesystem = 1.7.0-2", "api7-lua-tinyyaml = 0.4.2", "nginx-lua-prometheus = 0.20220527", @@ -60,14 +61,14 @@ dependencies = { "binaryheap = 0.4", "api7-dkjson = 0.1.1", "resty-redis-cluster = 1.02-4", - "lua-resty-expr = 1.3.1", + "lua-resty-expr = 1.3.2", "graphql = 0.0.2", "argparse = 0.7.1-1", "luasocket = 3.0rc1-2", "luasec = 0.9-1", "lua-resty-consul = 0.3-2", "penlight = 1.9.2-1", - "ext-plugin-proto = 0.5.0", + "ext-plugin-proto = 0.6.0", "casbin = 1.41.1", "api7-snowflake = 2.0-1", "inspect == 3.1.1", @@ -77,7 +78,8 @@ dependencies = { "net-url = 0.9-1", "xml2lua = 1.5-2", "nanoid = 0.1-1", - "lua-resty-mediador = 0.1.2-1" + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0" } build = { diff --git a/t/APISIX.pm b/t/APISIX.pm index 0143aa9d82ba..2af517abd8bb 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -33,6 +33,13 @@ my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); $ENV{TEST_NGINX_FAST_SHUTDOWN} ||= 1; +Test::Nginx::Socket::set_http_config_filter(sub { + my $config = shift; + my $snippet = `$apisix_home/t/bin/gen_snippet.lua conf_server`; + $config .= $snippet; + return $config; +}); + sub read_file($) { my $infile = shift; open my $in, "$apisix_home/$infile" @@ -90,13 +97,14 @@ my $ssl_ecc_crt = read_file("t/certs/apisix_ecc.crt"); my $ssl_ecc_key = read_file("t/certs/apisix_ecc.key"); my $test2_crt = read_file("t/certs/test2.crt"); my $test2_key = read_file("t/certs/test2.key"); +my $etcd_pem = read_file("t/certs/etcd.pem"); +my $etcd_key = read_file("t/certs/etcd.key"); $user_yaml_config = <<_EOC_; apisix: node_listen: 1984 stream_proxy: tcp: - 9100 - admin_key: null enable_resolv_search_opt: false _EOC_ @@ -104,9 +112,13 @@ my $etcd_enable_auth = $ENV{"ETCD_ENABLE_AUTH"} || "false"; if ($etcd_enable_auth eq "true") { $user_yaml_config .= <<_EOC_; -etcd: - user: root - password: 5tHkHhYkjr6cQY +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + user: root + password: 5tHkHhYkjr6cQY _EOC_ } @@ -177,6 +189,19 @@ my $grpc_location = <<_EOC_; apisix.grpc_access_phase() } +_EOC_ + +if ($version =~ m/\/apisix-nginx-module/) { + $grpc_location .= <<_EOC_; + grpc_set_header ":authority" \$upstream_host; +_EOC_ +} else { + $grpc_location .= <<_EOC_; + grpc_set_header "Host" \$upstream_host; +_EOC_ +} + +$grpc_location .= <<_EOC_; grpc_set_header Content-Type application/grpc; grpc_socket_keepalive on; grpc_pass \$upstream_scheme://apisix_backend; @@ -220,8 +245,11 @@ add_block_preprocessor(sub { $user_yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ } @@ -241,6 +269,8 @@ _EOC_ if ($version =~ m/\/apisix-nginx-module/) { $main_config .= <<_EOC_; +thread_pool grpc-client-nginx-module threads=1; + lua { lua_shared_dict prometheus-metrics 15m; } @@ -463,6 +493,11 @@ _EOC_ $block->set_value("main_config", $main_config); + # The new directive is introduced here to modify the schema + # before apisix validate in require("apisix") + # Todo: merge extra_init_by_lua_start and extra_init_by_lua + my $extra_init_by_lua_start = $block->extra_init_by_lua_start // ""; + my $extra_init_by_lua = $block->extra_init_by_lua // ""; my $init_by_lua_block = $block->init_by_lua_block // <<_EOC_; if os.getenv("APISIX_ENABLE_LUACOV") == "1" then @@ -472,11 +507,19 @@ _EOC_ require "resty.core" + $extra_init_by_lua_start + apisix = require("apisix") local args = { dns_resolver = $dns_addrs_tbl_str, } apisix.http_init(args) + + -- set apisix_lua_home into constans module + -- it may be used by plugins to determine the work path of apisix + local constants = require("apisix.constants") + constants.apisix_lua_home = "$apisix_home" + $extra_init_by_lua _EOC_ @@ -505,9 +548,12 @@ _EOC_ lua_shared_dict etcd-cluster-health-check 10m; # etcd health check lua_shared_dict ext-plugin 1m; lua_shared_dict kubernetes 1m; + lua_shared_dict kubernetes-first 1m; + lua_shared_dict kubernetes-second 1m; lua_shared_dict tars 1m; lua_shared_dict xds-config 1m; lua_shared_dict xds-config-version 1m; + lua_shared_dict cas_sessions 10m; proxy_ssl_name \$upstream_host; proxy_ssl_server_name on; @@ -561,17 +607,11 @@ _EOC_ require("apisix").http_init_worker() $extra_init_worker_by_lua } -_EOC_ - if ($version !~ m/\/1.17.8/) { - $http_config .= <<_EOC_; exit_worker_by_lua_block { require("apisix").http_exit_worker() } -_EOC_ - } - $http_config .= <<_EOC_; log_format main escape=default '\$remote_addr - \$remote_user [\$time_local] \$http_host "\$request" \$status \$body_bytes_sent \$request_time "\$http_referer" "\$http_user_agent" \$upstream_addr \$upstream_status \$upstream_response_time "\$upstream_scheme://\$upstream_host\$upstream_uri"'; # fake server, only for test @@ -604,6 +644,7 @@ _EOC_ more_clear_headers Date; } + # this configuration is needed as error_page is configured in http block location \@50x.html { set \$from_error_page 'true'; content_by_lua_block { @@ -824,6 +865,19 @@ _EOC_ my $yaml_config = $block->yaml_config // $user_yaml_config; + my $default_deployment = <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +_EOC_ + + if ($yaml_config !~ m/deployment:/) { + $yaml_config = $default_deployment . $yaml_config; + } + if ($block->extra_yaml_config) { $yaml_config .= $block->extra_yaml_config; } @@ -850,6 +904,10 @@ $ssl_ecc_key $test2_crt >>> ../conf/cert/test2.key $test2_key +>>> ../conf/cert/etcd.pem +$etcd_pem +>>> ../conf/cert/etcd.key +$etcd_key $user_apisix_yaml _EOC_ diff --git a/t/admin/api.t b/t/admin/api.t index 98007aecdf42..43e5ac163c84 100644 --- a/t/admin/api.t +++ b/t/admin/api.t @@ -24,7 +24,7 @@ add_block_preprocessor(sub { my ($block) = @_; if (!$block->request) { - $block->set_value("request", "GET /t"); + $block->set_value("request", "GET /apisix/admin/routes"); } if (!$block->no_error_log && !$block->error_log) { @@ -37,37 +37,87 @@ run_tests; __DATA__ === TEST 1: Server header for admin API ---- config - location /t { - content_by_lua_block { - local http = require("resty.http") - local httpc = http.new() - uri = ngx.var.scheme .. "://" .. ngx.var.server_addr - .. ":" .. ngx.var.server_port .. "/apisix/admin/routes" - local res, err = httpc:request_uri(uri) - ngx.say(res.headers["Server"]) - } - } ---- response_body eval -qr/APISIX\// +--- response_headers_like +Server: APISIX/(.*) === TEST 2: Server header for admin API without token --- yaml_config +deployment: + admin: + admin_key: + - key: a + name: a + role: admin apisix: node_listen: 1984 enable_server_tokens: false ---- config - location /t { - content_by_lua_block { - local http = require("resty.http") - local httpc = http.new() - uri = ngx.var.scheme .. "://" .. ngx.var.server_addr - .. ":" .. ngx.var.server_port .. "/apisix/admin/routes" - local res, err = httpc:request_uri(uri) - ngx.say(res.headers["Server"]) - } - } ---- response_body -APISIX +--- error_code: 401 +--- response_headers +Server: APISIX + + + +=== TEST 3: Version header for admin API (without apikey) +--- yaml_config +deployment: + admin: + admin_key: + - key: a + name: a + role: admin +apisix: + admin_api_version: default +--- error_code: 401 +--- response_headers +! X-API-VERSION + + + +=== TEST 4: Version header for admin API (v2) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v2 +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- response_headers +X-API-VERSION: v2 + + + +=== TEST 5: Version header for admin API (v3) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v3 +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- response_headers +X-API-VERSION: v3 + + + +=== TEST 6: CORS header for admin API +--- response_headers +Access-Control-Allow-Origin: * + + + +=== TEST 7: CORS header disabled for admin API +--- yaml_config +deployment: + admin: + admin_key: ~ + enable_admin_cors: false +--- response_headers +Access-Control-Allow-Origin: diff --git a/t/admin/consumers.t b/t/admin/consumers.t index 9ba284b3e494..8e6862f1a2f0 100644 --- a/t/admin/consumers.t +++ b/t/admin/consumers.t @@ -38,13 +38,11 @@ __DATA__ "desc": "new consumer" }]], [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer" - } + "value": { + "username": "jack", + "desc": "new consumer" }, - "action": "set" + "key": "/apisix/consumers/jack" }]] ) @@ -86,18 +84,16 @@ passed } }]], [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer", - "plugins": { - "key-auth": { - "key": "auth-one" - } + "value": { + "username": "jack", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "auth-one" } } }, - "action": "set" + "key": "/apisix/consumers/jack" }]] ) @@ -129,18 +125,16 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer", - "plugins": { - "key-auth": { - "key": "auth-one" - } + "value": { + "username": "jack", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "auth-one" } } }, - "action": "get" + "key": "/apisix/consumers/jack" }]] ) @@ -164,10 +158,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers/jack', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.status = code ngx.say(body) @@ -189,11 +181,8 @@ passed local t = require("lib.test_admin").test local code = t('/apisix/admin/consumers/not_found', ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + nil + ) ngx.say("[delete] code: ", code) } } @@ -217,12 +206,9 @@ GET /t "id":"jack" }]], [[{ - "node": { - "value": { - "id": "jack" - } - }, - "action": "set" + "value": { + "id": "jack" + } }]] ) @@ -257,18 +243,16 @@ GET /t } }]], [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer", - "labels": { - "build":"16", - "env":"production", - "version":"v2" - } + "value": { + "username": "jack", + "desc": "new consumer", + "labels": { + "build":"16", + "env":"production", + "version":"v2" } }, - "action": "set" + "key": "/apisix/consumers/jack" }]] ) @@ -353,15 +337,13 @@ GET /t "update_time": 1602893670 }]], [[{ - "node": { - "value": { - "username": "pony", - "desc": "new consumer", - "create_time": 1602883670, - "update_time": 1602893670 - } + "value": { + "username": "pony", + "desc": "new consumer", + "create_time": 1602883670, + "update_time": 1602893670 }, - "action": "set" + "key": "/apisix/consumers/pony" }]] ) @@ -385,10 +367,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers/pony', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.status = code ngx.say(body) diff --git a/t/admin/consumers2.t b/t/admin/consumers2.t index 3c296b79339f..6e351d02be96 100644 --- a/t/admin/consumers2.t +++ b/t/admin/consumers2.t @@ -59,13 +59,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/consumers/jack","value":{"username":"jack"}}} +{"key":"/apisix/consumers/jack","value":{"username":"jack"}} @@ -87,18 +87,19 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/consumers/jack","value":{"username":"jack"}}} +{"key":"/apisix/consumers/jack","value":{"username":"jack"}} @@ -124,7 +125,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/consumers/jack","node":{}} +{"deleted":"1","key":"/apisix/consumers/jack"} @@ -150,7 +151,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/consumers","nodes":[]}} +{"list":[],"total":0} diff --git a/t/admin/filter.t b/t/admin/filter.t new file mode 100644 index 000000000000..98844b186d9a --- /dev/null +++ b/t/admin/filter.t @@ -0,0 +1,813 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +worker_connections(1024); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v3 +apisix: + node_listen: 1984 +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: bad page_size(page_size must be between 10 and 500) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/?page=1&page_size=2', + ngx.HTTP_GET + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 400 +--- response_body +page_size must be between 10 and 500 + + + +=== TEST 2: ignore bad page and would use default value 1 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=-1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: sort by createdIndex +# the smaller the createdIndex, the higher the ranking +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + + for i = 1, #res.list - 1 do + assert(res.list[i].createdIndex < res.list[i + 1].createdIndex) + end + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: routes pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + + code, body, res = t('/apisix/admin/routes/?page=2&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + + code, body, res = t('/apisix/admin/routes/?page=3&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: services pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + + code, body, res = t('/apisix/admin/services/?page=2&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + + code, body, res = t('/apisix/admin/services/?page=3&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: only search name or labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "labels": {"]] .. i .. '":"' .. i .. [["} + }]] + ) + end + + ngx.sleep(0.5) + + local matched = {1, 10, 11} + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + -- match the name are 1, 10, 11 + assert(#res.list == 3) + + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + code, body, res = t('/apisix/admin/services/?label=1', + ngx.HTTP_GET + ) + res = json.decode(res) + -- match the label are 1, 10, 11 + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: services filter +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: routes filter +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "uri": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: filter with pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/services/?name=1&page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + -- we do filtering first now, so it will first filter to 1, 10, 11, and then paginate + -- res will contain 1, 10, 11 instead of just 1, 10. + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: routes filter with uri +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "uri": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: match labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello2", + "labels": { + "env2": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + -- only match labels' keys + local code, body, res = t('/apisix/admin/routes/?label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + -- don't match labels' values + code, body, res = t('/apisix/admin/routes/?label=production', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: match uris +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/foo", "/bar"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=world', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: match uris & labels +# uris are same in different routes, filter by labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"], + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"], + "labels": { + "build": "16" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + -- only match route 1 + local code, body, res = t('/apisix/admin/routes/?uri=world&label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: match uri & labels +# uri is same in different routes, filter by labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env2": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=hello&label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: filtered data total +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/routes', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 11) + + local code, body, res = t('/apisix/admin/routes/?label=', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 0) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: pagination data total +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/routes?page=1&page_size=10', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 10) + + local code, body, res = t('/apisix/admin/routes?page=10&page_size=10', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/admin/global-rules.t b/t/admin/global-rules.t index 422652a46c74..8f454c834fe3 100644 --- a/t/admin/global-rules.t +++ b/t/admin/global-rules.t @@ -45,20 +45,17 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "set" + "key": "/apisix/global_rules/1" }]] ) @@ -90,20 +87,17 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "get" + "key": "/apisix/global_rules/1" }]] ) @@ -129,27 +123,23 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "dir": true, - "nodes": [ + "total": 1, + "list": [ { "key": "/apisix/global_rules/1", "value": { - "plugins": { - "limit-count": { - "time_window": 60, - "policy": "local", - "count": 2, - "key": "remote_addr", - "rejected_code": 503 + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } } } - } } - ], - "key": "/apisix/global_rules" - }, - "action": "get" + ] }]] ) @@ -191,20 +181,17 @@ passed } }}]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 3, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/global_rules/1" }]] ) @@ -252,20 +239,17 @@ passed } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 3, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/global_rules/1" }]] ) @@ -295,12 +279,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -319,12 +299,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code) } } @@ -441,13 +417,13 @@ passed end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} --- request GET /t --- no_error_log @@ -479,13 +455,13 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} --- request GET /t --- no_error_log @@ -510,18 +486,19 @@ GET /t end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} --- request GET /t --- no_error_log @@ -550,7 +527,7 @@ GET /t } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/global_rules/1","node":{}} +{"deleted":"1","key":"/apisix/global_rules/1"} --- request GET /t --- no_error_log diff --git a/t/admin/global-rules2.t b/t/admin/global-rules2.t index 6ff033b52f43..345f67f62283 100644 --- a/t/admin/global-rules2.t +++ b/t/admin/global-rules2.t @@ -60,7 +60,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/global_rules","nodes":[]}} +{"list":[],"total":0} @@ -88,13 +88,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} @@ -116,11 +116,18 @@ __DATA__ end res = json.decode(res) - ngx.say(json.encode(res)) + assert(res.total == 1) + assert(#res.list == 1) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/global_rules/1") + assert(res.list[1].value ~= nil) + + ngx.say(message) } } --- response_body_like -{"action":"get","count":1,"node":\{"dir":true,"key":"/apisix/global_rules","nodes":.* +passed @@ -130,12 +137,8 @@ __DATA__ content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/admin/health-check.t b/t/admin/health-check.t index d26e92428c9b..1b0a9ca57ce1 100644 --- a/t/admin/health-check.t +++ b/t/admin/health-check.t @@ -43,11 +43,8 @@ add_block_preprocessor(sub { "uri": "/index.html" }]]) exp_data = { - node = { - value = req_data, - key = "/apisix/routes/1", - }, - action = "set", + value = req_data, + key = "/apisix/routes/1", } _EOC_ @@ -87,9 +84,9 @@ __DATA__ } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks - local code, body = t('/apisix/admin/routes/1', + local code, body, res = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data, exp_data @@ -130,7 +127,7 @@ passed } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -318,7 +315,7 @@ passed "req_headers": ["User-Agent: curl/7.29.0"] } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -352,7 +349,7 @@ passed "req_headers": ["User-Agent: curl/7.29.0", "Accept: */*"] } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -386,7 +383,7 @@ passed "req_headers": ["User-Agent: curl/7.29.0", 2233] } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -422,7 +419,7 @@ passed } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -460,8 +457,8 @@ passed } } }]]) - exp_data.node.value.upstream.checks.active = req_data.upstream.checks.active - exp_data.node.value.upstream.checks.passive = { + exp_data.value.upstream.checks.active = req_data.upstream.checks.active + exp_data.value.upstream.checks.passive = { type = "http", healthy = { http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, @@ -512,7 +509,7 @@ passed } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/admin/plugin-configs.t b/t/admin/plugin-configs.t index 1f0da8a2a463..852631666eaf 100644 --- a/t/admin/plugin-configs.t +++ b/t/admin/plugin-configs.t @@ -57,20 +57,17 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "set" + "key": "/apisix/plugin_configs/1" }]] ) @@ -98,20 +95,17 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "get" + "key": "/apisix/plugin_configs/1" }]] ) @@ -133,27 +127,23 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "dir": true, - "nodes": [ + "total": 1, + "list": [ { "key": "/apisix/plugin_configs/1", "value": { - "plugins": { - "limit-count": { - "time_window": 60, - "policy": "local", - "count": 2, - "key": "remote_addr", - "rejected_code": 503 + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } } } - } } - ], - "key": "/apisix/plugin_configs" - }, - "action": "get" + ] }]] ) @@ -191,20 +181,17 @@ passed } }}]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 3, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/plugin_configs/1" }]] ) @@ -248,20 +235,17 @@ passed } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/plugin_configs/1" }]] ) @@ -332,24 +316,21 @@ passed "desc": "blah" }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } - }, - "labels": { - "你好": "世界" - }, - "desc": "blah" + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" }, - "key": "/apisix/plugin_configs/1" + "desc": "blah" }, - "action": "set" + "key": "/apisix/plugin_configs/1" }]] ) @@ -377,24 +358,21 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } - }, - "labels": { - "你好": "世界" - }, - "desc": "blah" + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" }, - "key": "/apisix/plugin_configs/1" + "desc": "blah" }, - "action": "get" + "key": "/apisix/plugin_configs/1" }]] ) @@ -502,10 +480,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_configs/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.print(body) } } @@ -521,10 +497,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.say(body) } } @@ -540,10 +514,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_configs/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.say(body) } } diff --git a/t/admin/plugin-metadata.t b/t/admin/plugin-metadata.t index 5dccf2c0cd2d..5874e4e039c5 100644 --- a/t/admin/plugin-metadata.t +++ b/t/admin/plugin-metadata.t @@ -38,15 +38,13 @@ __DATA__ "ikey": 1 }]], [[{ - "node": { - "value": { - "skey": "val", - "ikey": 1 - } + "value": { + "skey": "val", + "ikey": 1 }, - "action": "set" + "key": "/apisix/plugin_metadata/example-plugin" }]] - ) + ) ngx.status = code ngx.say(body) @@ -73,15 +71,12 @@ passed "ikey": 2 }]], [[{ - "node": { - "value": { - "skey": "val2", - "ikey": 2 - } - }, - "action": "set" + "value": { + "skey": "val2", + "ikey": 2 + } }]] - ) + ) ngx.status = code ngx.say(body) @@ -94,15 +89,12 @@ passed "ikey": 2 }]], [[{ - "node": { - "value": { - "skey": "val2", - "ikey": 2 - } - }, - "action": "set" + "value": { + "skey": "val2", + "ikey": 2 + } }]] - ) + ) ngx.say(code) ngx.say(body) @@ -128,15 +120,12 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "skey": "val2", - "ikey": 2 - } - }, - "action": "get" + "value": { + "skey": "val2", + "ikey": 2 + } }]] - ) + ) ngx.status = code ngx.say(body) @@ -157,11 +146,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_DELETE) ngx.status = code ngx.say(body) @@ -181,13 +166,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/plugin_metadata/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/plugin_metadata/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } } @@ -206,15 +185,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_metadata', - ngx.HTTP_PUT, - [[{"k": "v"}]], + ngx.HTTP_PUT, + [[{"k": "v"}]], [[{ - "node": { - "value": "sdf" - }, - "action": "set" + "value": "sdf" }]] - ) + ) ngx.status = code ngx.print(body) @@ -236,15 +212,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_metadata/test', - ngx.HTTP_PUT, - [[{"k": "v"}]], + ngx.HTTP_PUT, + [[{"k": "v"}]], [[{ - "node": { - "value": "sdf" - }, - "action": "set" + "value": "sdf" }]] - ) + ) ngx.status = code ngx.print(body) @@ -271,15 +244,12 @@ GET /t "skey": "val" }]], [[{ - "node": { - "value": { - "skey": "val", - "ikey": 1 - } - }, - "action": "set" + "value": { + "skey": "val", + "ikey": 1 + } }]] - ) + ) ngx.status = code ngx.say(body) @@ -302,12 +272,12 @@ qr/\{"error_msg":"invalid configuration: property \\"ikey\\" is required"\}/ local json = require("toolkit.json") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_PUT, + ngx.HTTP_PUT, [[{ "skey": "val", "ikey": 1 }]] - ) + ) if code >= 300 then ngx.status = code @@ -316,13 +286,11 @@ qr/\{"error_msg":"invalid configuration: property \\"ikey\\" is required"\}/ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}}} +{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}} --- request GET /t --- no_error_log @@ -336,9 +304,7 @@ GET /t content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -347,14 +313,17 @@ GET /t end res = json.decode(res) - local value = res.node.value - assert(res.count ~= nil) - res.count = nil + + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}}} +{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}} --- request GET /t --- no_error_log @@ -368,9 +337,7 @@ GET /t content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_DELETE - ) + local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code @@ -383,7 +350,7 @@ GET /t } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/plugin_metadata/example-plugin","node":{}} +{"deleted":"1","key":"/apisix/plugin_metadata/example-plugin"} --- request GET /t --- no_error_log diff --git a/t/admin/plugin-metadata2.t b/t/admin/plugin-metadata2.t index 190b9ae20faf..8bce8182ec39 100644 --- a/t/admin/plugin-metadata2.t +++ b/t/admin/plugin-metadata2.t @@ -45,9 +45,7 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/plugin_metadata', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/plugin_metadata', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -60,4 +58,4 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/plugin_metadata","nodes":[]}} +{"list":[],"total":0} diff --git a/t/admin/plugins-reload.t b/t/admin/plugins-reload.t index e4841f1bf808..c301acf194d4 100644 --- a/t/admin/plugins-reload.t +++ b/t/admin/plugins-reload.t @@ -95,9 +95,14 @@ location /t { ngx.sleep(0.5) local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null apisix: node_listen: 1984 - admin_key: null plugins: - jwt-auth stream_plugins: @@ -133,7 +138,6 @@ filter(): [{"name":"jwt-auth"},{"name":"mqtt-proxy","stream":true}] --- yaml_config apisix: node_listen: 1984 - admin_key: null plugins: - example-plugin plugin_attr: @@ -145,9 +149,14 @@ location /t { local core = require "apisix.core" ngx.sleep(0.1) local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null apisix: node_listen: 1984 - admin_key: null plugins: - example-plugin plugin_attr: @@ -165,9 +174,14 @@ plugin_attr: ngx.sleep(0.1) local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null apisix: node_listen: 1984 - admin_key: null plugins: - example-plugin plugin_attr: @@ -207,7 +221,6 @@ example-plugin get plugin attr val: 1 --- yaml_config apisix: node_listen: 1984 - admin_key: null plugins: - public-api - prometheus @@ -238,9 +251,14 @@ location /t { ngx.say(code) local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null apisix: node_listen: 1984 - admin_key: null plugins: - public-api - prometheus @@ -275,7 +293,6 @@ done --- yaml_config apisix: node_listen: 1984 - admin_key: null plugins: - skywalking plugin_attr: @@ -292,9 +309,14 @@ location /t { local t = require("lib.test_admin").test local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null apisix: node_listen: 1984 - admin_key: null plugins: - prometheus ]] diff --git a/t/admin/plugins.t b/t/admin/plugins.t index d7881249d40e..b13919138f08 100644 --- a/t/admin/plugins.t +++ b/t/admin/plugins.t @@ -78,6 +78,7 @@ csrf uri-blocker request-validation openid-connect +cas-auth authz-casbin authz-casdoor wolf-rbac @@ -93,6 +94,7 @@ authz-keycloak proxy-mirror proxy-cache proxy-rewrite +workflow api-breaker limit-conn limit-count @@ -108,6 +110,7 @@ grpc-web public-api prometheus datadog +elasticsearch-logger echo loggly http-logger @@ -122,10 +125,12 @@ syslog udp-logger file-logger clickhouse-logger +tencent-cloud-cls example-plugin aws-lambda azure-functions openwhisk +openfunction serverless-post-function ext-plugin-post-req ext-plugin-post-resp @@ -150,7 +155,7 @@ GET /apisix/admin/plugins ngx.HTTP_GET, nil, [[ - {"type":"object","required":["rate","burst","key"],"properties":{"rate":{"type":"number","exclusiveMinimum":0},"key_type":{"type":"string","enum":["var","var_combination"],"default":"var"},"burst":{"type":"number","minimum":0},"disable":{"type":"boolean"},"nodelay":{"type":"boolean","default":false},"key":{"type":"string"},"rejected_code":{"type":"integer","minimum":200,"maximum":599,"default":503},"rejected_msg":{"type":"string","minLength":1},"allow_degradation":{"type":"boolean","default":false}}} + {"type":"object","required":["rate","burst","key"],"properties":{"rate":{"type":"number","exclusiveMinimum":0},"key_type":{"type":"string","enum":["var","var_combination"],"default":"var"},"burst":{"type":"number","minimum":0},"nodelay":{"type":"boolean","default":false},"key":{"type":"string"},"rejected_code":{"type":"integer","minimum":200,"maximum":599,"default":503},"rejected_msg":{"type":"string","minLength":1},"allow_degradation":{"type":"boolean","default":false}}} ]] ) @@ -172,7 +177,7 @@ plugins: ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"type":"object"} +{"properties":{},"type":"object"} ]] ) @@ -191,7 +196,7 @@ plugins: ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"type":"object"} +{"properties":{},"type":"object"} ]] ) @@ -210,7 +215,7 @@ plugins: ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) @@ -265,7 +270,7 @@ plugins: } } --- response_body eval -qr/\{"metadata_schema":\{"properties":\{"ikey":\{"minimum":0,"type":"number"\},"skey":\{"type":"string"\}\},"required":\["ikey","skey"\],"type":"object"\},"priority":0,"schema":\{"\$comment":"this is a mark for our injected plugin schema","properties":\{"_meta":\{"properties":\{"error_response":\{"oneOf":\[\{"type":"string"\},\{"type":"object"\}\]\},"priority":\{"description":"priority of plugins by customized order","type":"integer"\}\},"type":"object"\},"disable":\{"type":"boolean"\},"i":\{"minimum":0,"type":"number"\},"ip":\{"type":"string"\},"port":\{"type":"integer"\},"s":\{"type":"string"\},"t":\{"minItems":1,"type":"array"\}\},"required":\["i"\],"type":"object"\},"version":0.1\}/ +qr/\{"metadata_schema":\{"properties":\{"ikey":\{"minimum":0,"type":"number"\},"skey":\{"type":"string"\}\},"required":\["ikey","skey"\],"type":"object"\},"priority":0,"schema":\{"\$comment":"this is a mark for our injected plugin schema","properties":\{"_meta":\{"properties":\{"disable":\{"type":"boolean"\},"error_response":\{"oneOf":\[\{"type":"string"\},\{"type":"object"\}\]\},"filter":\{"description":"filter determines whether the plugin needs to be executed at runtime","type":"array"\},"priority":\{"description":"priority of plugins by customized order","type":"integer"\}\},"type":"object"\},"i":\{"minimum":0,"type":"number"\},"ip":\{"type":"string"\},"port":\{"type":"integer"\},"s":\{"type":"string"\},"t":\{"minItems":1,"type":"array"\}\},"required":\["i"\],"type":"object"\},"version":0.1\}/ @@ -366,15 +371,12 @@ qr/\{"properties":\{"password":\{"type":"string"\},"username":\{"type":"string"\ } } --- response_body -{"priority":1003,"schema":{"$comment":"this is a mark for our injected plugin schema","properties":{"_meta":{"properties":{"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"priority":{"description":"priority of plugins by customized order","type":"integer"}},"type":"object"},"burst":{"minimum":0,"type":"integer"},"conn":{"exclusiveMinimum":0,"type":"integer"},"default_conn_delay":{"exclusiveMinimum":0,"type":"number"},"disable":{"type":"boolean"},"key":{"type":"string"},"key_type":{"default":"var","enum":["var","var_combination"],"type":"string"},"only_use_default_delay":{"default":false,"type":"boolean"}},"required":["conn","burst","default_conn_delay","key"],"type":"object"},"version":0.1} +{"priority":1003,"schema":{"$comment":"this is a mark for our injected plugin schema","properties":{"_meta":{"properties":{"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"filter":{"description":"filter determines whether the plugin needs to be executed at runtime","type":"array"},"priority":{"description":"priority of plugins by customized order","type":"integer"}},"type":"object"},"burst":{"minimum":0,"type":"integer"},"conn":{"exclusiveMinimum":0,"type":"integer"},"default_conn_delay":{"exclusiveMinimum":0,"type":"number"},"key":{"type":"string"},"key_type":{"default":"var","enum":["var","var_combination"],"type":"string"},"only_use_default_delay":{"default":false,"type":"boolean"}},"required":["conn","burst","default_conn_delay","key"],"type":"object"},"version":0.1} === TEST 12: confirm the scope of plugin ---- yaml_config -apisix: - node_listen: 1984 - admin_key: null +--- extra_yaml_config plugins: - batch-requests - error-log-logger diff --git a/t/admin/proto.t b/t/admin/proto.t index 3a05a26df9a8..e560ffffcd20 100644 --- a/t/admin/proto.t +++ b/t/admin/proto.t @@ -43,7 +43,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/1', + local code, message = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content": "syntax = \"proto3\"; @@ -61,15 +61,10 @@ __DATA__ // Sends a greeting rpc SayHi (HelloRequest) returns (HelloResponse){} }" - }]], - [[ - { - "action": "set" - } - ]] - ) + }]] + ) - if code ~= 200 then + if code ~= 201 then ngx.status = code ngx.say("[put proto] code: ", code, " message: ", message) return @@ -79,7 +74,7 @@ __DATA__ } } --- response_body -[put proto] code: 200 message: passed +[put proto] code: 201 message: passed @@ -88,13 +83,9 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_DELETE + ) if code ~= 200 then ngx.status = code @@ -115,7 +106,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/2', + local code, message = t('/apisix/admin/protos/2', ngx.HTTP_PUT, [[{ "content": "syntax = \"proto3\"; @@ -133,15 +124,10 @@ __DATA__ // Sends a greeting rpc SayHi (HelloRequest) returns (HelloResponse){} }" - }]], - [[ - { - "action": "set" - } - ]] - ) + }]] + ) - if code ~= 200 then + if code ~= 201 then ngx.status = code ngx.say("[put proto] code: ", code, " message: ", message) return @@ -155,7 +141,9 @@ __DATA__ "methods": ["GET"], "plugins": { "grpc-transcode": { - "disable": false, + "_meta": { + "disable": false + }, "method": "SayHi", "proto_id": 2, "service": "proto.Hello" @@ -169,13 +157,10 @@ __DATA__ }, "uri": "/grpc/sayhi", "name": "hi-grpc" - }]], - [[{ - "action": "set" }]] - ) + ) - if code ~= 200 then + if code ~= 201 then ngx.status = code ngx.say("[route refer proto] code: ", code, " message: ", message) return @@ -184,20 +169,16 @@ __DATA__ ngx.sleep(0.1) -- ensure reference is synced from etcd - code, message = t('/apisix/admin/proto/2', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/protos/2', + ngx.HTTP_DELETE + ) ngx.say("[delete proto] code: ", code) } } --- response_body -[put proto] code: 200 message: passed -[route refer proto] code: 200 message: passed +[put proto] code: 201 message: passed +[route refer proto] code: 201 message: passed [delete proto] code: 400 @@ -207,7 +188,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/1', + local code, message = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content": "syntax = \"proto3\"; diff --git a/t/admin/protos.t b/t/admin/protos.t new file mode 100644 index 000000000000..320c6179ee7f --- /dev/null +++ b/t/admin/protos.t @@ -0,0 +1,77 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: test /apisix/admin/protos/{id} +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content": "syntax = \"proto3\"; + package proto; + message HelloRequest{ + string name = 1; + } + + message HelloResponse{ + int32 code = 1; + string msg = 2; + } + // The greeting service definition. + service Hello { + // Sends a greeting + rpc SayHi (HelloRequest) returns (HelloResponse){} + }" + }]] + ) + + if code ~= 201 then + ngx.status = code + ngx.say("[put proto] code: ", code, " message: ", message) + return + end + + ngx.say("[put proto] code: ", code, " message: ", message) + } + } +--- response_body +[put proto] code: 201 message: passed diff --git a/t/admin/response_body_format.t b/t/admin/response_body_format.t new file mode 100644 index 000000000000..86f4e5d809e4 --- /dev/null +++ b/t/admin/response_body_format.t @@ -0,0 +1,255 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v3 +apisix: + node_listen: 1984 +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: use v3 admin api, no action in response body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: response body format only have total and list (total is 1) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + res = json.decode(res) + assert(res.total == 1) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/routes/1") + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: response body format only have total and list (total is 2) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.total == 2) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/routes/1") + assert(res.list[2].createdIndex ~= nil) + assert(res.list[2].modifiedIndex ~= nil) + assert(res.list[2].key == "/apisix/routes/2") + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 4: response body format (test services) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 001" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/services/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 002" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, message, res = t('/apisix/admin/services', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.total == 2) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/services/1") + assert(res.list[2].createdIndex ~= nil) + assert(res.list[2].modifiedIndex ~= nil) + assert(res.list[2].key == "/apisix/services/2") + ngx.say(message) + } + } +--- response_body +passed +passed +passed diff --git a/t/admin/routes-array-nodes.t b/t/admin/routes-array-nodes.t index c9b141883a28..c25b016df786 100644 --- a/t/admin/routes-array-nodes.t +++ b/t/admin/routes-array-nodes.t @@ -47,27 +47,24 @@ __DATA__ "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -88,30 +85,27 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_GET, - nil, + ngx.HTTP_GET, + nil, [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } }, - "action": "get" + "key": "/apisix/routes/1" }]] - ) + ) ngx.status = code ngx.say(body) diff --git a/t/admin/routes.t b/t/admin/routes.t index a16ccdbdfab4..1e7575fe7887 100644 --- a/t/admin/routes.t +++ b/t/admin/routes.t @@ -45,23 +45,20 @@ __DATA__ "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -87,23 +84,20 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "get" + "key": "/apisix/routes/1" }]] ) @@ -126,12 +120,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -150,12 +140,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code = t('/apisix/admin/routes/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code) } } @@ -187,21 +173,18 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "create" + } }]] ) @@ -212,8 +195,7 @@ GET /t end ngx.say("[push] code: ", code, " message: ", message) - - local id = string.sub(res.node.key, #"/apisix/routes/" + 1) + local id = string.sub(res.key, #"/apisix/routes/" + 1) local res = assert(etcd.get('/routes/' .. id)) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") @@ -221,12 +203,8 @@ GET /t assert(update_time ~= nil, "update_time is nil") code, message = t('/apisix/admin/routes/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -259,18 +237,15 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "set" + } }]] ) @@ -318,20 +293,17 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "uri": "/index.html", - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } } - }, - "action": "set" + } }]] ) @@ -671,7 +643,9 @@ GET /t "time_window": 60, "rejected_code": 503, "key": "remote_addr", - "disable": true + "_meta": { + "disable": true + } } }, "uri": "/index.html" @@ -714,20 +688,17 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "host": "*.foo.com", - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "host": "*.foo.com", + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) diff --git a/t/admin/routes2.t b/t/admin/routes2.t index 42cbc805b120..fce777957910 100644 --- a/t/admin/routes2.t +++ b/t/admin/routes2.t @@ -194,18 +194,18 @@ GET /t end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + res.key = nil + res.value.create_time = nil + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"create","node":{"value":{"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/not_unwanted_data_post"}}} +{"value":{"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/not_unwanted_data_post"}} --- no_error_log [error] @@ -239,15 +239,15 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"set","node":{"key":"/apisix/routes/1","value":{"id":1,"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index.html"}}} +{"key":"/apisix/routes/1","value":{"id":1,"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index.html"}} --- no_error_log [error] @@ -280,15 +280,15 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}}} +{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}} --- no_error_log [error] @@ -311,20 +311,21 @@ GET /t end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"get","node":{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}}} +{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}} --- no_error_log [error] @@ -353,7 +354,7 @@ GET /t --- request GET /t --- response_body -{"action":"delete","deleted":"1","key":"/apisix/routes/1","node":{}} +{"deleted":"1","key":"/apisix/routes/1"} --- no_error_log [error] @@ -550,25 +551,22 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "您好": "世界" - } + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "labels": { + "您好": "世界" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) diff --git a/t/admin/routes3.t b/t/admin/routes3.t index 60e4d4134655..331f1b2d4573 100644 --- a/t/admin/routes3.t +++ b/t/admin/routes3.t @@ -60,7 +60,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/routes","nodes":[]}} +{"list":[],"total":0} @@ -82,20 +82,17 @@ __DATA__ "uri": "/index.html" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "uri": "/index.html" + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "uri": "/index.html" }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -128,20 +125,17 @@ passed "uri": "/index.html" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.0/24", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.0/24", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "uri": "/index.html" + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "uri": "/index.html" }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -235,13 +229,10 @@ passed "uri": "/patch_test" }]], [[{ - "node": { - "value": { - "uri": "/patch_test" - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/patch_test" }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -278,23 +269,20 @@ passed "desc": "new route" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/patch_test", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.2:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/patch_test", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.2:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -318,13 +306,10 @@ passed "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] }]], [[{ - "node": { - "value": { - "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] - }, - "key": "/apisix/routes/1" + "value": { + "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -348,13 +333,10 @@ passed "methods": ["GET", "POST"] }]], [[{ - "node": { - "value": { - "methods": ["GET", "POST"] - }, - "key": "/apisix/routes/1" + "value": { + "methods": ["GET", "POST"] }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -376,15 +358,12 @@ passed ngx.HTTP_PATCH, '["POST"]', [[{ - "node": { - "value": { - "methods": [ - "POST" - ] - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "POST" + ] }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -406,13 +385,10 @@ passed ngx.HTTP_PATCH, '"/patch_uri_test"', [[{ - "node": { - "value": { - "uri": "/patch_uri_test" - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/patch_uri_test" }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -444,23 +420,20 @@ passed "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -492,10 +465,8 @@ passed "desc": "new route" }]], [[{ - "node": { - "value": { - "hosts": ["foo.com", "*.bar.com"] - } + "value": { + "hosts": ["foo.com", "*.bar.com"] } }]] ) @@ -559,10 +530,8 @@ passed "desc": "new route" }]], [[{ - "node": { - "value": { - "remote_addrs": ["127.0.0.1", "192.0.0.1/8", "::1", "fe80::/32"] - } + "value": { + "remote_addrs": ["127.0.0.1", "192.0.0.1/8", "::1", "fe80::/32"] } }]] ) @@ -595,10 +564,8 @@ passed "desc": "new route" }]=], [=[{ - "node": { - "value": { - "vars": [["arg_name", "==", "json"], ["arg_age", ">", 18]] - } + "value": { + "vars": [["arg_name", "==", "json"], ["arg_age", ">", 18]] } }]=] ) @@ -630,10 +597,8 @@ passed } }]=], [=[{ - "node": { - "value": { - "filter_func": "function(vars) return vars.arg_name == 'json' end" - } + "value": { + "filter_func": "function(vars) return vars.arg_name == 'json' end" } }]=] ) @@ -738,13 +703,10 @@ passed ngx.HTTP_PATCH, 'false', [[{ - "node": { - "value": { - "enable_websocket": false - }, - "key": "/apisix/routes/1" + "value": { + "enable_websocket": false }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -766,13 +728,10 @@ passed ngx.HTTP_PATCH, 'true', [[{ - "node": { - "value": { - "enable_websocket": true - }, - "key": "/apisix/routes/1" + "value": { + "enable_websocket": true }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) diff --git a/t/admin/routes4.t b/t/admin/routes4.t index 158d6ad6bdc8..3c799be8be90 100644 --- a/t/admin/routes4.t +++ b/t/admin/routes4.t @@ -69,12 +69,10 @@ location /t { ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "uri": "/index.html" - }, - "key": "/apisix/routes/1" - } + "value": { + "uri": "/index.html" + }, + "key": "/apisix/routes/1" }]] ) @@ -119,7 +117,7 @@ location /t { }, "uri": "/index.html" }]], - [[{"action": "create"}]] + [[{}]] ) if code >= 300 then @@ -131,7 +129,7 @@ location /t { ngx.say("[push] succ: ", body) ngx.sleep(2.5) - local id = string.sub(res.node.key, #"/apisix/routes/" + 1) + local id = string.sub(res.key, #"/apisix/routes/" + 1) code, body = t('/apisix/admin/routes/' .. id, ngx.HTTP_GET) ngx.say("code: ", code) @@ -198,13 +196,10 @@ location /t { "uri": "/index.html" }]], [[{ - "node": { - "value": { - "priority": 0 - }, - "key": "/apisix/routes/1" + "value": { + "priority": 0 }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -237,13 +232,10 @@ passed "priority": 1 }]], [[{ - "node": { - "value": { - "priority": 1 - }, - "key": "/apisix/routes/1" + "value": { + "priority": 1 }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -389,13 +381,10 @@ passed "uri": "/index.html" }]], [[{ - "node": { - "value": { - "name": "test name" - }, - "key": "/apisix/routes/1" + "value": { + "name": "test name" }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -664,27 +653,24 @@ failed to read request body: request size 1678025 is greater than the maximum si "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "build": "16", - "env": "production", - "version": "v2" - } + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "labels": { + "build": "16", + "env": "production", + "version": "v2" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -710,27 +696,24 @@ passed } }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "env": "production", - "version": "v2", - "build": "17" - } + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "labels": { + "env": "production", + "version": "v2", + "build": "17" + } }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -788,21 +771,18 @@ passed "update_time": 1602893670 }]], [[{ - "node": { - "value": { - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "create_time": 1602883670, - "update_time": 1602893670 + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "create_time": 1602883670, + "update_time": 1602893670 }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -821,12 +801,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/admin/services-array-nodes.t b/t/admin/services-array-nodes.t index 7ca2c6cb5f8a..2d0251377c72 100644 --- a/t/admin/services-array-nodes.t +++ b/t/admin/services-array-nodes.t @@ -32,8 +32,8 @@ __DATA__ content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": [{ "host": "127.0.0.1", @@ -45,23 +45,20 @@ __DATA__ "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - }, - "desc": "new service" + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -85,23 +82,20 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - }, - "desc": "new service" + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "get" + "key": "/apisix/services/1" }]] - ) + ) ngx.status = code ngx.say(body) diff --git a/t/admin/services-string-id.t b/t/admin/services-string-id.t index 15160c5a6c63..4c786b81d1b8 100644 --- a/t/admin/services-string-id.t +++ b/t/admin/services-string-id.t @@ -43,21 +43,18 @@ __DATA__ "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) ngx.status = code ngx.say(body) @@ -78,24 +75,21 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_GET, - nil, + ngx.HTTP_GET, + nil, [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new service" }, - "action": "get" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) ngx.status = code ngx.say(body) @@ -115,13 +109,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -139,13 +127,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/services/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/services/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -165,8 +147,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/services', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -175,19 +157,16 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "create" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -197,14 +176,8 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/services/" + 1) - code, message = t('/apisix/admin/services/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local id = string.sub(res.key, #"/apisix/services/" + 1) + code, message = t('/apisix/admin/services/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -225,8 +198,8 @@ GET /t local core = require("apisix.core") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -235,19 +208,17 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) if code ~= 200 then ngx.status = code @@ -274,8 +245,8 @@ GET /t local core = require("apisix.core") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "plugins": { "limit-count": { "count": 2, @@ -286,21 +257,19 @@ GET /t } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } } }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) if code ~= 200 then ngx.status = code @@ -326,8 +295,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/*invalid_id$', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "plugins": { "limit-count": { "count": 2, @@ -337,7 +306,7 @@ GET /t } } }]] - ) + ) ngx.exit(code) } @@ -356,12 +325,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "3", "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -383,21 +352,18 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "5eeb3dc90f747328b2930b0b", "plugins": {} }]], [[{ - "node": { - "value": { - "plugins": {} - }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "value": { + "plugins": {} }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) ngx.status = code ngx.say(body) @@ -418,12 +384,12 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": -100, "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -450,7 +416,7 @@ GET /t "id": "*invalid_id$", "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -472,12 +438,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "5eeb3dc90f747328b2930b0b", "upstream_id": "invalid$" }]] - ) + ) ngx.status = code ngx.print(body) @@ -499,12 +465,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "5eeb3dc90f747328b2930b0b", "upstream_id": "9999999999" }]] - ) + ) ngx.status = code ngx.print(body) @@ -526,11 +492,11 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -557,7 +523,7 @@ GET /t "id": "5eeb3dc90f747328b2930b0b", "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -590,19 +556,16 @@ GET /t "desc": "new 20 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 20 service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new 20 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] ) @@ -630,19 +593,16 @@ passed "desc": "new 19 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 19 service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new 19 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] ) @@ -676,16 +636,14 @@ passed } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1, - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" } } }]] @@ -710,8 +668,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -719,7 +677,8 @@ passed "type": "chash" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.print(body) @@ -741,8 +700,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -751,7 +710,8 @@ GET /t "hash_on": "header" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.print(body) @@ -773,8 +733,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -783,7 +743,8 @@ GET /t "hash_on": "cookie" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.print(body) @@ -805,8 +766,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -815,7 +776,8 @@ GET /t "hash_on": "consumer" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.say(code .. " " .. body) diff --git a/t/admin/services.t b/t/admin/services.t index 409e47c8ca4f..7bdfb9e5522c 100644 --- a/t/admin/services.t +++ b/t/admin/services.t @@ -44,19 +44,16 @@ __DATA__ "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -88,19 +85,16 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "get" + "key": "/apisix/services/1" }]] ) @@ -122,13 +116,8 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message,res = t('/apisix/admin/services/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) } } @@ -146,13 +135,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/services/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/services/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -183,17 +166,14 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "create" + } }]] ) @@ -205,20 +185,14 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/services/" + 1) + local id = string.sub(res.key, #"/apisix/services/" + 1) local res = assert(etcd.get('/services/' .. id)) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time assert(update_time ~= nil, "update_time is nil") - code, message = t('/apisix/admin/services/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/services/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -249,17 +223,14 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "set" + } }]] ) @@ -300,19 +271,16 @@ GET /t } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } } - }, - "action": "set" + } }]] ) @@ -370,8 +338,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 3, "plugins": {} }]] @@ -397,19 +365,16 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "1", "plugins": {} }]], [[{ - "node": { - "value": { - "plugins": {} - }, - "key": "/apisix/services/1" + "value": { + "plugins": {} }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -432,8 +397,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": -100, "plugins": {} }]] @@ -459,8 +424,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "invalid_id$", "plugins": {} }]] @@ -486,8 +451,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 1, "upstream_id": "invalid$" }]] @@ -513,8 +478,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 1, "upstream_id": "9999999999" }]] @@ -540,8 +505,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "plugins": {} }]] ) @@ -612,19 +577,16 @@ GET /t "desc": "new 20 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 20 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 20 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -658,19 +620,16 @@ passed "desc": "new 19 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 19 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 19 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -704,16 +663,14 @@ passed } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1, - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" } } }]] @@ -749,19 +706,16 @@ passed "desc": "new 22 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 22 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 22 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -787,19 +741,16 @@ passed ngx.HTTP_PATCH, '"new 23 service"', [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 23 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 23 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -831,15 +782,13 @@ passed "type": "roundrobin" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.2:8081": 3, - "127.0.0.3:8082": 4 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.2:8081": 3, + "127.0.0.3:8082": 4 + }, + "type": "roundrobin" } } }]] @@ -1001,19 +950,16 @@ GET /t "name": "test service name" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "name": "test service name" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "name": "test service name" }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -1070,10 +1016,9 @@ GET /t ngx.HTTP_PUT, '{}', [[{ - "node": { - "value": {"id":"1"} - }, - "action": "set" + "value": { + "id":"1" + } }]] ) @@ -1115,30 +1060,27 @@ passed } }]], [[{ - "node":{ - "value":{ - "desc":"empty service", - "plugins":{ - "limit-count":{ - "time_window":60, - "count":2, - "rejected_code":503, - "key":"remote_addr", - "policy":"local" - } - }, - "upstream":{ - "type":"roundrobin", - "nodes":{ - "127.0.0.1:80":1 - }, - "hash_on":"vars", - "pass_host":"pass" + "value":{ + "desc":"empty service", + "plugins":{ + "limit-count":{ + "time_window":60, + "count":2, + "rejected_code":503, + "key":"remote_addr", + "policy":"local" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 }, - "id":"1" - } - }, - "action":"compareAndSwap" + "hash_on":"vars", + "pass_host":"pass" + }, + "id":"1" + } }]] ) @@ -1177,24 +1119,21 @@ passed "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "build": "16", - "env": "production", - "version": "v2" - }, - "desc": "new service" + "type": "roundrobin" + }, + "labels": { + "build": "16", + "env": "production", + "version": "v2" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -1224,24 +1163,21 @@ passed } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - }, - "labels": { - "build": "17", - "env": "production", - "version": "v2" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "labels": { + "build": "17", + "env": "production", + "version": "v2" + }, + "desc": "new service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -1311,20 +1247,17 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "create_time": 1602883670, - "update_time": 1602893670 - } - }, - "key": "/apisix/services/1" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "create_time": 1602883670, + "update_time": 1602893670 + } }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -1346,13 +1279,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -1370,10 +1297,8 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - require("toolkit.json").encode({name = ("1"):rep(101)}) - ) + local code, body = t('/apisix/admin/services/1', ngx.HTTP_PUT, + require("toolkit.json").encode({name = ("1"):rep(101)})) ngx.status = code ngx.print(body) @@ -1406,19 +1331,16 @@ GET /t "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/a.b" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/a.b" }]] ) diff --git a/t/admin/services2.t b/t/admin/services2.t index 9cbe8e725727..a47592badf74 100644 --- a/t/admin/services2.t +++ b/t/admin/services2.t @@ -63,16 +63,18 @@ __DATA__ end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"value":{"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -101,13 +103,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -136,13 +138,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -152,9 +154,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/services/1', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/services/1', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -163,18 +163,19 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -199,7 +200,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/services/1","node":{}} +{"deleted":"1","key":"/apisix/services/1"} @@ -237,8 +238,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "service_id": 1, "uri": "/index.html" }]] @@ -261,11 +262,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.print("[delete] code: ", code, " message: ", message) } } @@ -280,11 +277,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -299,11 +292,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/admin/ssl.t b/t/admin/ssl.t index 0232d21102fe..e8bf91064c7c 100644 --- a/t/admin/ssl.t +++ b/t/admin/ssl.t @@ -34,24 +34,21 @@ __DATA__ local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) ngx.status = code ngx.say(body) - local res = assert(etcd.get('/ssl/1')) + local res = assert(etcd.get('/ssls/1')) local prev_create_time = res.body.node.value.create_time assert(prev_create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time @@ -73,19 +70,15 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/ssl/1', + local code, body = t('/apisix/admin/ssls/1', ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "sni": "test.com", - "key": null - }, - - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "key": null }, - "action": "get" + "key": "/apisix/ssls/1" }]] ) @@ -107,13 +100,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/ssl/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/ssls/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -131,13 +118,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/ssl/99999999999999', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/ssls/99999999999999', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } } @@ -161,16 +142,13 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "foo.com"} - local code, message, res = t.test('/apisix/admin/ssl', + local code, message, res = t.test('/apisix/admin/ssls', ngx.HTTP_POST, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "foo.com" - } - }, - "action": "create" + "value": { + "sni": "foo.com" + } }]] ) @@ -182,14 +160,8 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/ssl/" + 1) - code, message = t.test('/apisix/admin/ssl/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local id = string.sub(res.key, #"/apisix/ssls/" + 1) + code, message = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -214,17 +186,14 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {sni = "foo.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "foo.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "foo.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -253,17 +222,14 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.foo.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.foo.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.foo.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -294,17 +260,14 @@ passed snis = {"*.foo.com", "bar.com"}, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "snis": ["*.foo.com", "bar.com"] - }, - "key": "/apisix/ssl/1" + "value": { + "snis": ["*.foo.com", "bar.com"] }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -336,18 +299,15 @@ passed exptime = 1588262400 + 60 * 60 * 24 * 365, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "bar.com", - "exptime": 1619798400 - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "bar.com", + "exptime": 1619798400 }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -375,7 +335,7 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123', + local code, body = t.test('/apisix/admin/ssls/a-b-c-ABC_0123', ngx.HTTP_PUT, core.json.encode(data) ) @@ -405,7 +365,7 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123', + local code, body = t.test('/apisix/admin/ssls/a-b-c-ABC_0123', ngx.HTTP_DELETE ) if code > 300 then @@ -434,7 +394,7 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/*invalid', + local code, body = t.test('/apisix/admin/ssls/*invalid', ngx.HTTP_PUT, core.json.encode(data) ) @@ -471,17 +431,14 @@ GET /t keys = {ssl_ecc_key} } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -513,17 +470,14 @@ passed keys = {}, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -552,23 +506,19 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", labels = { version = "v2", build = "16", env = "production"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com", - "labels": { - "version": "v2", - "build": "16", - "env": "production" - } - }, - - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "labels": { + "version": "v2", + "build": "16", + "env": "production" + } }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -596,21 +546,17 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", labels = { env = {"production", "release"}}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com", - "labels": { - "env": ["production", "release"] - } - }, - - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "labels": { + "env": ["production", "release"] + } }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -647,21 +593,18 @@ GET /t validity_end = 1603893670 } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com", - "create_time": 1602883670, - "update_time": 1602893670, - "validity_start": 1602873670, - "validity_end": 1603893670 - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "create_time": 1602883670, + "update_time": 1602893670, + "validity_start": 1602873670, + "validity_end": 1603893670 }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -683,13 +626,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/ssl/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/ssls/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -714,18 +651,15 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body, res = t.test('/apisix/admin/ssl', + local code, body, res = t.test('/apisix/admin/ssls', ngx.HTTP_POST, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - } - }, - "action": "create" + "value": { + "sni": "test.com" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -733,17 +667,17 @@ GET /t return end - local id = string.sub(res.node.key, #"/apisix/ssl/" + 1) - local res = assert(etcd.get('/ssl/' .. id)) + local id = string.sub(res.key, #"/apisix/ssls/" + 1) + local res = assert(etcd.get('/ssls/' .. id)) local prev_create_time = res.body.node.value.create_time assert(prev_create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time assert(update_time ~= nil, "update_time is nil") - local code, body = t.test('/apisix/admin/ssl/' .. id, + local code, body = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_PATCH, core.json.encode({create_time = 0, update_time = 1}) - ) + ) if code ~= 201 then ngx.status = code @@ -751,16 +685,14 @@ GET /t return end - local res = assert(etcd.get('/ssl/' .. id)) + local res = assert(etcd.get('/ssls/' .. id)) local create_time = res.body.node.value.create_time assert(create_time == 0, "create_time mismatched") local update_time = res.body.node.value.update_time assert(update_time == 1, "update_time mismatched") -- clean up - local code, body = t.test('/apisix/admin/ssl/' .. id, - ngx.HTTP_DELETE - ) + local code, body = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_DELETE) ngx.status = code ngx.say(body) } @@ -785,14 +717,11 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "key": "/apisix/ssl/1" - }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -821,14 +750,11 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {type = "client", cert = ssl_cert, key = ssl_key} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "key": "/apisix/ssl/1" - }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) diff --git a/t/admin/ssl2.t b/t/admin/ssl2.t index 865652ce2e89..da286db1a44b 100644 --- a/t/admin/ssl2.t +++ b/t/admin/ssl2.t @@ -48,7 +48,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "not-unwanted-post.com"} - local code, message, res = t.test('/apisix/admin/ssl', + local code, message, res = t.test('/apisix/admin/ssls', ngx.HTTP_POST, json.encode(data) ) @@ -60,18 +60,23 @@ __DATA__ end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - res.node.value.cert = "" - res.node.value.key = "" - assert(res.node.value.id ~= nil) - res.node.value.id = nil + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"cert":"","key":"","sni":"not-unwanted-post.com","status":1,"type":"server"}}} +{"value":{"cert":"","key":"","sni":"not-unwanted-post.com","status":1,"type":"server"}} @@ -84,7 +89,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -96,15 +101,19 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil - res.node.value.cert = "" - res.node.value.key = "" + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"test.com","status":1,"type":"server"}}} +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","key":"","sni":"test.com","status":1,"type":"server"}} @@ -117,7 +126,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "t.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, json.encode(data) ) @@ -129,15 +138,19 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil - res.node.value.cert = "" - res.node.value.key = "" + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"t.com","status":1,"type":"server"}}} +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","key":"","sni":"t.com","status":1,"type":"server"}} @@ -147,7 +160,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin") - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_GET ) @@ -158,21 +171,22 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(value.cert ~= nil) - value.cert = "" - assert(value.key == nil) - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key == nil) ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","sni":"t.com","status":1,"type":"server"}}} +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","sni":"t.com","status":1,"type":"server"}} @@ -185,7 +199,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE ) @@ -200,7 +214,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/ssl/1","node":{}} +{"deleted":"1","key":"/apisix/ssls/1"} @@ -217,7 +231,7 @@ BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= -----END CERTIFICATE----- ]], key = ssl_key, sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -250,7 +264,7 @@ MIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5 jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== -----END RSA PRIVATE KEY-----]], sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -288,7 +302,7 @@ U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= }, keys = {ssl_key} } - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -324,7 +338,7 @@ jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== -----END RSA PRIVATE KEY-----]]} } - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -353,7 +367,7 @@ wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, snis = {}} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -382,7 +396,7 @@ wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, snis = {"test.com"}} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -395,7 +409,7 @@ wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== local data = {"update1.com", "update2.com"} - local code, message, res = t.test('/apisix/admin/ssl/1/snis', + local code, message, res = t.test('/apisix/admin/ssls/1/snis', ngx.HTTP_PATCH, json.encode(data) ) @@ -417,7 +431,6 @@ qr/"snis":\["update1.com","update2.com"\]/ --- yaml_config apisix: node_listen: 1984 - admin_key: null ssl: key_encrypt_salt: "edd1c9f0985e76a2" --- config @@ -429,7 +442,7 @@ apisix: local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, certs = {ssl_cert}, keys = {ssl_key}} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, json.encode(data) ) @@ -441,8 +454,8 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.key == ssl_key) - ngx.say(res.node.value.keys[1] == ssl_key) + ngx.say(res.value.key == ssl_key) + ngx.say(res.value.keys[1] == ssl_key) } } --- response_body @@ -455,7 +468,6 @@ false --- yaml_config apisix: node_listen: 1984 - admin_key: null ssl: key_encrypt_salt: "edd1c9f0985e76a2" --- config @@ -465,7 +477,7 @@ apisix: local t = require("lib.test_admin") local ssl_key = t.read_file("t/certs/apisix.key") - local code, message, res = t.test('/apisix/admin/ssl/1/keys', + local code, message, res = t.test('/apisix/admin/ssls/1/keys', ngx.HTTP_PATCH, json.encode({ssl_key}) ) @@ -477,7 +489,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.keys[1] == ssl_key) + ngx.say(res.value.keys[1] == ssl_key) } } --- response_body diff --git a/t/admin/ssl3.t b/t/admin/ssl3.t index cb09b5119223..f9c1cd0ca3bd 100644 --- a/t/admin/ssl3.t +++ b/t/admin/ssl3.t @@ -45,7 +45,7 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/ssl', + local code, message, res = t('/apisix/admin/ssls', ngx.HTTP_GET ) @@ -60,4 +60,4 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/ssl","nodes":[]}} +{"list":[],"total":0} diff --git a/t/admin/ssl4.t b/t/admin/ssl4.t new file mode 100644 index 000000000000..2c1403407d85 --- /dev/null +++ b/t/admin/ssl4.t @@ -0,0 +1,357 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +no_root_location(); + +add_block_preprocessor( sub{ + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + my $TEST_NGINX_HTML_DIR ||= html_dir(); + + my $config = <<_EOC_; +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + + local req = "GET /hello HTTP/1.0\\r\\nHost: www.test.com\\r\\nConnection: close\\r\\n\\r\\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +_EOC_ + + if (!$block->config) { + $block->set_value("config", $config) + } +} + +); + + +run_tests; + +__DATA__ + +=== TEST 1: set ssl(sni: www.test.com), encrypt with the first key_encrypt_salt +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: + - edd1c9f0985e76a1 + - edd1c9f0985e76a2 +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: "edd1c9f0985e76a1" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: client request with the old style key_encrypt_salt +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: "edd1c9f0985e76a1" +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 4: client request with the new style key_encrypt_salt +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: + - edd1c9f0985e76a1 +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 5: client request failed with the wrong key_encrypt_salt +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: + - edd1c9f0985e76a2 +--- error_log +decrypt ssl key failed +[alert] + + + +=== TEST 6: client request successfully, use the two key_encrypt_salt to decrypt in turn +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: + - edd1c9f0985e76a2 + - edd1c9f0985e76a1 +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +[alert] + + + +=== TEST 7: remove test ssl certs +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: + - edd1c9f0985e76a1 +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + } +} + + + +=== TEST 8: set ssl(sni: www.test.com), do not encrypt +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: null +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 9: client request without key_encrypt_salt +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: null +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 10: remove test ssl certs +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: null +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + } +} diff --git a/t/admin/ssls.t b/t/admin/ssls.t new file mode 100644 index 000000000000..675275628443 --- /dev/null +++ b/t/admin/ssls.t @@ -0,0 +1,75 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: test /apisix/admin/ssls/{id} +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local etcd = require("apisix.core.etcd") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/ssls/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed diff --git a/t/admin/stream-routes-disable.t b/t/admin/stream-routes-disable.t index 7752663bf68e..7d7ffbb090ef 100644 --- a/t/admin/stream-routes-disable.t +++ b/t/admin/stream-routes-disable.t @@ -29,7 +29,6 @@ add_block_preprocessor(sub { my $user_yaml_config = <<_EOC_; apisix: node_listen: 1984 - admin_key: null _EOC_ $block->set_value("yaml_config", $user_yaml_config); diff --git a/t/admin/stream-routes.t b/t/admin/stream-routes.t index 01062fbc84f8..8710d88eaa04 100644 --- a/t/admin/stream-routes.t +++ b/t/admin/stream-routes.t @@ -46,21 +46,18 @@ __DATA__ "desc": "new route" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "desc": "test-desc", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "desc": "test-desc", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new route" + "type": "roundrobin" }, - "key": "/apisix/stream_routes/1" + "desc": "new route" }, - "action": "set" + "key": "/apisix/stream_routes/1" }]] ) @@ -93,20 +90,17 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new route" + "type": "roundrobin" }, - "key": "/apisix/stream_routes/1" + "desc": "new route" }, - "action": "get" + "key": "/apisix/stream_routes/1" }]] ) @@ -128,13 +122,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/stream_routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/stream_routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -166,19 +154,16 @@ GET /t "desc": "new route" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new route" - } - }, - "action": "create" + "type": "roundrobin" + }, + "desc": "new route" + } }]] ) @@ -190,7 +175,7 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/stream_routes/" + 1) + local id = string.sub(res.key, #"/apisix/stream_routes/" + 1) local ret = assert(etcd.get('/stream_routes/' .. id)) local create_time = ret.body.node.value.create_time @@ -200,13 +185,7 @@ GET /t id = ret.body.node.value.id assert(id ~= nil, "id is nil") - code, message = t('/apisix/admin/stream_routes/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/stream_routes/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -232,12 +211,19 @@ GET /t "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "ip": "127.0.0.1", - "port": 1980 - } + "protocol_level": 4 } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] } }]] ) @@ -271,12 +257,19 @@ passed "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "ip": "127.0.0.1", - "port": 1980 - } + "protocol_level": 4 } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] } }]] ) @@ -301,13 +294,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/stream_routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/stream_routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -357,9 +344,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', - ngx.HTTP_DELETE - ) + local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code end @@ -433,7 +418,7 @@ GET /t res = json.decode(res) -- clean data - local id = string.sub(res.node.key, #"/apisix/stream_routes/" + 1) + local id = string.sub(res.key, #"/apisix/stream_routes/" + 1) local code, message = t('/apisix/admin/stream_routes/' .. id, ngx.HTTP_DELETE ) @@ -444,16 +429,19 @@ GET /t return end - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"value":{"remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} --- request GET /t --- no_error_log @@ -487,13 +475,15 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} --- request GET /t --- no_error_log @@ -518,17 +508,19 @@ GET /t end res = json.decode(res) - assert(res.count ~= nil) - assert(res.node.value.create_time ~= nil) - assert(res.node.value.update_time ~= nil) - res.count = nil - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} --- request GET /t --- no_error_log @@ -557,7 +549,7 @@ GET /t } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/stream_routes/1","node":{}} +{"deleted":"1","key":"/apisix/stream_routes/1"} --- request GET /t --- no_error_log diff --git a/t/admin/token.t b/t/admin/token.t index 22308e1c9016..43cdf3605fca 100644 --- a/t/admin/token.t +++ b/t/admin/token.t @@ -27,6 +27,13 @@ add_block_preprocessor(sub { my ($block) = @_; my $user_yaml_config = <<_EOC_; +deployment: + admin: + admin_key: + - name: admin + role: admin + key: edd1c9f034335f136f87ad84b625c8f1 + apisix: node_listen: 1984 _EOC_ diff --git a/t/admin/upstream-array-nodes.t b/t/admin/upstream-array-nodes.t index 16855526c1c7..e7220c13d851 100644 --- a/t/admin/upstream-array-nodes.t +++ b/t/admin/upstream-array-nodes.t @@ -43,21 +43,18 @@ __DATA__ "desc": "new upstream" }]], [[{ - "node": { - "value": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin", - "desc": "new upstream" - }, - "key": "/apisix/upstreams/1" + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -81,21 +78,18 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin", - "desc": "new upstream" - }, - "key": "/apisix/upstreams/1" + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" }, - "action": "get" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -115,13 +109,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -139,13 +127,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/upstreams/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/upstreams/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -175,19 +157,16 @@ GET /t "type": "roundrobin" }]], [[{ - "node": { - "value": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - } - }, - "action": "create" + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -197,14 +176,8 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/upstreams/" + 1) - code, message = t('/apisix/admin/upstreams/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local id = string.sub(res.key, #"/apisix/upstreams/" + 1) + code, message = t('/apisix/admin/upstreams/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -230,7 +203,7 @@ GET /t "nodes": [], "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -263,7 +236,7 @@ passed "upstream_id": "1", "uri": "/index.html" }]] - ) + ) if code >= 300 then ngx.status = code @@ -310,7 +283,7 @@ no valid upstream node "_service_name": "xyz", "_discovery_type": "nacos" }]] - ) + ) ngx.status = code ngx.say(body) @@ -341,7 +314,7 @@ passed }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -373,7 +346,7 @@ GET /t }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -405,7 +378,7 @@ GET /t }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -437,7 +410,7 @@ GET /t }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -473,7 +446,7 @@ GET /t }, "uri": "/index.html" }]] - ) + ) ngx.status = code ngx.say(body) diff --git a/t/admin/upstream.t b/t/admin/upstream.t index 16bfb5157b7b..12681780cd17 100644 --- a/t/admin/upstream.t +++ b/t/admin/upstream.t @@ -43,19 +43,16 @@ so that we can delete it later) "desc": "new upstream" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/admin_up" + "type": "roundrobin", + "desc": "new upstream" }, - "action": "set" + "key": "/apisix/upstreams/admin_up" }]] - ) + ) ngx.status = code ngx.say(body) @@ -85,19 +82,16 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/admin_up" + "type": "roundrobin", + "desc": "new upstream" }, - "action": "get" + "key": "/apisix/upstreams/admin_up" }]] - ) + ) ngx.status = code ngx.say(body) @@ -117,13 +111,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/admin_up', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/upstreams/admin_up', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -141,13 +129,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/upstreams/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/upstreams/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -176,17 +158,14 @@ GET /t "type": "roundrobin" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "action": "create" + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -196,20 +175,14 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/upstreams/" + 1) + local id = string.sub(res.key, #"/apisix/upstreams/" + 1) local res = assert(etcd.get('/upstreams/' .. id)) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time assert(update_time ~= nil, "update_time is nil") - code, message = t('/apisix/admin/upstreams/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/upstreams/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -236,7 +209,7 @@ GET /t }, "type": "roundrobin" }]] - ) + ) ngx.exit(code) } @@ -263,7 +236,7 @@ GET /t }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -294,18 +267,15 @@ GET /t "type": "roundrobin" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -334,7 +304,7 @@ passed }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -364,7 +334,7 @@ GET /t }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -396,7 +366,7 @@ GET /t "_service_name": "xyz", "_discovery_type": "nacos" }]] - ) + ) ngx.status = code ngx.say(body) @@ -426,19 +396,16 @@ passed "type": "chash" }]], [[{ - "node": { - "value": { - "key": "remote_addr", - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "chash" + "value": { + "key": "remote_addr", + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "chash" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -467,7 +434,7 @@ passed }, "type": "unknown" }]] - ) + ) ngx.status = code ngx.print(body) @@ -496,7 +463,7 @@ passed }, "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -518,15 +485,15 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 1, "nodes": { "127.0.0.1:8080": -100 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -548,14 +515,14 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -577,14 +544,14 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -606,15 +573,15 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "id": 1, "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -651,9 +618,9 @@ GET /t } } local code, body = t.test('/apisix/admin/upstreams', - ngx.HTTP_POST, - core.json.encode(data) - ) + ngx.HTTP_POST, + core.json.encode(data) + ) ngx.status = code ngx.print(body) @@ -686,9 +653,9 @@ qr/{"error_msg":"invalid configuration: property \\\"tls\\\" validation failed: } } local code, body = t.test('/apisix/admin/upstreams', - ngx.HTTP_POST, - core.json.encode(data) - ) + ngx.HTTP_POST, + core.json.encode(data) + ) ngx.status = code ngx.print(body) @@ -717,7 +684,7 @@ GET /t cert = ssl_cert, key = ssl_key } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/admin/upstream2.t b/t/admin/upstream2.t index 7f22d1149052..618861c74310 100644 --- a/t/admin/upstream2.t +++ b/t/admin/upstream2.t @@ -61,16 +61,19 @@ __DATA__ end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"value":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -81,14 +84,14 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -97,13 +100,15 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -114,14 +119,14 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_PATCH, - [[{ + ngx.HTTP_PATCH, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -130,13 +135,15 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -146,9 +153,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/upstreams/unwanted', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -157,18 +162,19 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -178,9 +184,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_DELETE - ) + local code, message, res = t('/apisix/admin/upstreams/unwanted', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code @@ -193,7 +197,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/upstreams/unwanted","node":{}} +{"deleted":"1","key":"/apisix/upstreams/unwanted"} @@ -204,12 +208,12 @@ __DATA__ local core = require("apisix.core") local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": {}, "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -238,7 +242,7 @@ passed "upstream_id": "1", "uri": "/index.html" }]] - ) + ) if code >= 300 then ngx.status = code @@ -269,8 +273,8 @@ no valid upstream node content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, @@ -281,7 +285,7 @@ no valid upstream node "read": 0 } }]] - ) + ) ngx.status = code ngx.print(body) } diff --git a/t/admin/upstream3.t b/t/admin/upstream3.t index 070c8b3d2368..e40e24e99b4a 100644 --- a/t/admin/upstream3.t +++ b/t/admin/upstream3.t @@ -60,7 +60,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/upstreams","nodes":[]}} +{"list":[],"total":0} @@ -139,17 +139,14 @@ __DATA__ "desc": "new upstream" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -179,17 +176,14 @@ passed "desc": "new 21 upstream" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new 21 upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new 21 upstream" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -216,16 +210,14 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1, - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 - }, - "type": "roundrobin", - "desc": "new 21 upstream" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin", + "desc": "new 21 upstream" } }]] ) @@ -253,15 +245,13 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 0 - }, - "type": "roundrobin", - "desc": "new 21 upstream" - } + "value": { + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 0 + }, + "type": "roundrobin", + "desc": "new 21 upstream" } }]] ) @@ -290,17 +280,14 @@ passed "desc": "new upstream 24" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream 24" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream 24" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -322,17 +309,14 @@ passed ngx.HTTP_PATCH, '"new 25 upstream"', [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new 25 upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new 25 upstream" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -357,15 +341,13 @@ passed "127.0.0.7:8082": 4 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.6:8081": 3, - "127.0.0.7:8082": 4 - }, - "type": "roundrobin", - "desc": "new 25 upstream" - } + "value": { + "nodes": { + "127.0.0.6:8081": 3, + "127.0.0.7:8082": 4 + }, + "type": "roundrobin", + "desc": "new 25 upstream" } }]] ) @@ -391,15 +373,13 @@ passed "127.0.0.8:8082": 4 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.7:8081": 0, - "127.0.0.8:8082": 4 - }, - "type": "roundrobin", - "desc": "new 25 upstream" - } + "value": { + "nodes": { + "127.0.0.7:8081": 0, + "127.0.0.8:8082": 4 + }, + "type": "roundrobin", + "desc": "new 25 upstream" } }]] ) @@ -419,15 +399,15 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "server_name", "nodes": { "127.0.0.1:8080": 1 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.say(body) @@ -454,7 +434,7 @@ passed "key": "not_support", "desc": "new upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -472,8 +452,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "arg_device_id", "nodes": { "127.0.0.1:8080": 1 @@ -481,7 +461,7 @@ passed "type": "chash", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -498,15 +478,15 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "server_name", "nodes": { "127.0.0.1:8080": 1 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.say(body) @@ -533,7 +513,7 @@ passed "key": "not_support", "desc": "new upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -560,7 +540,7 @@ passed "type": "chash", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -577,8 +557,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "arg_device_id", "nodes": { "127.0.0.1:8080": 1 @@ -587,7 +567,7 @@ passed "hash_on": "vars", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -604,8 +584,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "custom_header", "nodes": { "127.0.0.1:8080": 1 @@ -614,7 +594,7 @@ passed "hash_on": "header", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -631,8 +611,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "$#^@", "nodes": { "127.0.0.1:8080": 1 @@ -641,7 +621,7 @@ passed "hash_on": "header", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -659,8 +639,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "custom_cookie", "nodes": { "127.0.0.1:8080": 1 @@ -669,7 +649,7 @@ passed "hash_on": "cookie", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -686,8 +666,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "$#^@abc", "nodes": { "127.0.0.1:8080": 1 @@ -696,7 +676,7 @@ passed "hash_on": "cookie", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -714,8 +694,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, @@ -723,7 +703,7 @@ passed "hash_on": "consumer", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -740,8 +720,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, @@ -750,7 +730,7 @@ passed "key": "invalid-key", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -767,8 +747,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "dsadas", "nodes": { "127.0.0.1:8080": 1 @@ -777,7 +757,7 @@ passed "hash_on": "aabbcc", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.print(body) diff --git a/t/admin/upstream4.t b/t/admin/upstream4.t index 1f55fc92ea46..99c840f944f1 100644 --- a/t/admin/upstream4.t +++ b/t/admin/upstream4.t @@ -53,17 +53,14 @@ __DATA__ "name": "test upstream name" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "name": "test upstream name" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "name": "test upstream name" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] ) @@ -106,9 +103,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', - ngx.HTTP_DELETE - ) + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code end @@ -201,40 +196,14 @@ passed -=== TEST 7: invalid route: multi nodes with `node` mode to pass host ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ - "nodes": { - "httpbin.org:8080": 1, - "test.com:8080": 1 - }, - "type": "roundrobin", - "pass_host": "node" - }]] - ) - - ngx.status = code - ngx.print(body) - } - } ---- skip_nginx: 5: > 1.19.0 ---- error_code: 400 - - - -=== TEST 8: invalid route: empty `upstream_host` when `pass_host` is `rewrite` +=== TEST 7: invalid route: empty `upstream_host` when `pass_host` is `rewrite` --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "httpbin.org:8080": 1, "test.com:8080": 1 @@ -243,7 +212,7 @@ passed "pass_host": "rewrite", "upstream_host": "" }]] - ) + ) ngx.status = code ngx.print(body) @@ -253,7 +222,7 @@ passed -=== TEST 9: set upstream(with labels) +=== TEST 8: set upstream(with labels) --- config location /t { content_by_lua_block { @@ -272,23 +241,20 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "labels": { - "build":"16", - "env":"production", - "version":"v2" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "labels": { + "build":"16", + "env":"production", + "version":"v2" + } }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -299,30 +265,27 @@ passed -=== TEST 10: get upstream(with labels) +=== TEST 9: get upstream(with labels) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_GET, - nil, + ngx.HTTP_GET, + nil, [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "labels": { - "version":"v2", - "build":"16", - "env":"production" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "labels": { + "version":"v2", + "build":"16", + "env":"production" + } }, - "action": "get" + "key": "/apisix/upstreams/1" }]] ) @@ -335,7 +298,7 @@ passed -=== TEST 11: patch upstream(only labels) +=== TEST 10: patch upstream(only labels) --- config location /t { content_by_lua_block { @@ -348,23 +311,20 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "labels": { - "version":"v2", - "build":"17", - "env":"production" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "labels": { + "version":"v2", + "build":"17", + "env":"production" + } }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -375,7 +335,7 @@ passed -=== TEST 12: invalid format of label value: set upstream +=== TEST 11: invalid format of label value: set upstream --- config location /t { content_by_lua_block { @@ -391,7 +351,7 @@ passed "env": ["production", "release"] } }]] - ) + ) ngx.status = code ngx.print(body) @@ -403,7 +363,7 @@ passed -=== TEST 13: patch upstream(whole, create_time) +=== TEST 12: patch upstream(whole, create_time) --- config location /t { content_by_lua_block { @@ -421,18 +381,15 @@ passed "create_time": 1705252779 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream", - "create_time": 1705252779 + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream", + "create_time": 1705252779 }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -453,7 +410,7 @@ passed -=== TEST 14: patch upstream(whole, update_time) +=== TEST 13: patch upstream(whole, update_time) --- config location /t { content_by_lua_block { @@ -471,18 +428,15 @@ passed "update_time": 1705252779 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream", - "create_time": 1705252779 + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream", + "create_time": 1705252779 }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -503,7 +457,7 @@ passed -=== TEST 15: create upstream with create_time and update_time +=== TEST 14: create upstream with create_time and update_time --- config location /t { content_by_lua_block { @@ -519,20 +473,17 @@ passed "update_time": 1602893670 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "create_time": 1602883670, - "update_time": 1602893670 + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/up_create_update_time" + "type": "roundrobin", + "create_time": 1602883670, + "update_time": 1602893670 }, - "action": "set" + "key": "/apisix/upstreams/up_create_update_time" }]] - ) + ) ngx.status = code ngx.say(body) @@ -543,18 +494,12 @@ passed -=== TEST 16: delete test upstream +=== TEST 15: delete test upstream --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/up_create_update_time', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/upstreams/up_create_update_time', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -563,7 +508,7 @@ passed -=== TEST 17: patch upstream with sub_path, the data is number +=== TEST 16: patch upstream with sub_path, the data is number --- config location /t { content_by_lua_block { @@ -589,8 +534,8 @@ passed ngx.sleep(1) local code, message = t('/apisix/admin/upstreams/1/retries', - ngx.HTTP_PATCH, - json.encode(1) + ngx.HTTP_PATCH, + json.encode(1) ) if code >= 300 then ngx.status = code @@ -606,20 +551,20 @@ passed -=== TEST 18: set upstream(id: 1) +=== TEST 17: set upstream(id: 1) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.say(body) @@ -630,17 +575,17 @@ passed -=== TEST 19: set service(id: 1) +=== TEST 18: set service(id: 1) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream_id": 1 }]] - ) + ) if code >= 300 then ngx.status = code @@ -653,18 +598,18 @@ passed -=== TEST 20: set route(id: 1) +=== TEST 19: set route(id: 1) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream_id": 1, "uri": "/index.html" }]] - ) + ) if code >= 300 then ngx.status = code @@ -677,17 +622,13 @@ passed -=== TEST 21: delete upstream(id: 1) +=== TEST 20: delete upstream(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) ngx.print("[delete] code: ", code, " message: ", message) } } @@ -696,17 +637,13 @@ passed -=== TEST 22: delete route(id: 1) +=== TEST 21: delete route(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -715,17 +652,13 @@ passed -=== TEST 23: delete service(id: 1) +=== TEST 22: delete service(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -734,17 +667,13 @@ passed -=== TEST 24: delete upstream(id: 1) +=== TEST 23: delete upstream(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/bin/gen_snippet.lua b/t/bin/gen_snippet.lua index 085409b6b5ae..9ef21c32805b 100755 --- a/t/bin/gen_snippet.lua +++ b/t/bin/gen_snippet.lua @@ -33,6 +33,13 @@ local yaml_conf, err = file.read_yaml_conf("t/servroot") if not yaml_conf then error(err) end + +if yaml_conf.deployment.role == "data_plane" and + yaml_conf.deployment.config_provider == "yaml" + or yaml_conf.deployment.config_provider == "xds" then + return +end + local ok, err = schema.validate(yaml_conf) if not ok then error(err) diff --git a/t/certs/localhost_slapd_cert.pem b/t/certs/localhost_slapd_cert.pem new file mode 100644 index 000000000000..6140ea5f630c --- /dev/null +++ b/t/certs/localhost_slapd_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIECDCCAnCgAwIBAgIUc40/PofbLcrqu/2MJMEkYfrxB+4wDQYJKoZIhvcNAQEL +BQAwVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMMCHRlc3QuY29tMB4XDTIy +MDgwMjA1NDI1OFoXDTIzMDgwMjA1NDI1OFowLjESMBAGA1UEAxMJbG9jYWxob3N0 +MRgwFgYDVQQKEw9FeGFtcGxlIENvbXBhbnkwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQCxE5zfta69uPsQVDiV0OwWHDGxTBYNzmp5zsVwOF3bOH+hyB4M ++qFxPEuH84/Ib4GJdLM67qZth1azHudKy/QGPFkoeFUW1JhB9QGyjh/URwxTy05b +Ce5w7Ee1rMV/GWu6fxMfIE3o5U0XuW1IKQFaZVdNuQlvG4VjL59BfnEF+YXb1QDB +kIpvf59q+UuZgit8CrO1dDYeJ/xO3N9v2CS2u6si9/XWgIwayw67tmb7cbTu/srB +C99w97IMP5/Vkeu6fkg2jTuvCRARzMQJ11krDmtGeYum9SSCdyTLxK1u7w33DuhQ +3HE/PfHJj9QV1MKIeruVjEvawJsRiWQG0Ai7AgMBAAGjdjB0MAwGA1UdEwEB/wQC +MAAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0PAQH/BAUDAwegADAdBgNVHQ4E +FgQUcGOrPCoztq5Z7mjgGtaCkPkmDWowHwYDVR0jBBgwFoAUmbUr1fJgcJdG6ZLx +bYMojlFHG7MwDQYJKoZIhvcNAQELBQADggGBABNOTIiLHNQJfyV20UxcyzZ9xTuc +DuMzEexWJ6S33yJTyp5jni0vFaF9wnT1MOtp+Zizz0hQq0d+GvsmBzjkDdipFqUB +Dt4517l4Z/H4n4FV0jhqQhhzcPRWI5H2MNU0Ezno1iCaKD29Kq61fo2qrU7SNDre +RjnGueTW6u+YLj1ss+UK2rTCRX/Nqqz+MrvIift5Kj4c/8sAD3Zn2aXlH0dXSTcX +DaqNDPQvcdlqNMRSJSthLXYBn40Ro6mH7uA+e4aIVn4jyYvyb8qY5LhQPesTcJZw +IEDmIgFEIh0k1YoGvLD6TkMdKPUG536zH+4iZjKpwGwNQ/dTBgn4+5UOqguiYgXd +MP/eeXSCGLAIjQ4+i1ghv1eAlHuHSQ3Dm75icpAL7VHFdoI7I3wqeE5+IyrUXjX0 +s1bCjIuwGxgoBBTzv25OijmTmMcLYDp04PR5qSwckvsrrxHr+2ujeqS+AGxzZ4Sk +N1JSJL69zUwfCVdE3mR+6OmmDcuVlB3u+grLFQ== +-----END CERTIFICATE----- diff --git a/t/certs/localhost_slapd_key.pem b/t/certs/localhost_slapd_key.pem new file mode 100644 index 000000000000..fa33248c6240 --- /dev/null +++ b/t/certs/localhost_slapd_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsROc37Wuvbj7EFQ4ldDsFhwxsUwWDc5qec7FcDhd2zh/ocge +DPqhcTxLh/OPyG+BiXSzOu6mbYdWsx7nSsv0BjxZKHhVFtSYQfUBso4f1EcMU8tO +WwnucOxHtazFfxlrun8THyBN6OVNF7ltSCkBWmVXTbkJbxuFYy+fQX5xBfmF29UA +wZCKb3+favlLmYIrfAqztXQ2Hif8Ttzfb9gktrurIvf11oCMGssOu7Zm+3G07v7K +wQvfcPeyDD+f1ZHrun5INo07rwkQEczECddZKw5rRnmLpvUkgncky8Stbu8N9w7o +UNxxPz3xyY/UFdTCiHq7lYxL2sCbEYlkBtAIuwIDAQABAoIBAGDANpaEzlUbHRJu +8fvpixUJkp0s1V/1yHeFYptOMPn2hMYAcWrmBg+4wgwmKAl742sXOFaazpRJvjVg +TT+w8EP39T8HgHZY8lgXZjYJMZrqtvGRw946Lu3EK+o33DD10sazZ98551e48cZk +qjEjNnoNpQXydBUhFGB9RKakT1zTb8e+ZQdsrE+ZzgM9/xVFRx4gsfNbed/5TMHZ +QbwaqPzQRiS9ScRwvZ+TE20cGQ66qZqR6+JCatc8BpXA9Q6ZmTj61MSl6MMzCuOS +yIGm5J+siPkLV/ki+MAHk59G9iEsTjS1T1l4aQn0kTtdMx9oVCPODY6Jdi8jIaU/ +TwGWuQECgYEAxJEg/YKjZGQFhidP64OGi1ochFZxuJFwcZ17DgmZPkiU+vpC8KYl +QpR0r0zN9vqP+71nMMoVJfektXRMP4cy0ebSAbx47X5IfdYUhID+/OAlxbl1O9ah +lGWk90zknVvQKahImtYZqepQEYyetQiDB4gX2bLT+8IIt16ebGC/TyUCgYEA5p3g +Tcj69nxyy4BuGxYuNfTORTCzd9zhURN7325HVBMlhen/f1e+yjV1zth9yLDl5Wyl +99jkVCvy6p83s+1EDKdgOTYrxgD31Y934De/m53U6P/yHeic3z9dIgIAn+qcJqU6 +CL28lXEV8jKLNmlR0crWSjtSBDIpA3BWWN834l8CgYAxgcPnVZHFZROnGBue2391 +dXqdMhBuReMmGl21yWEZOLqdA478gTv9KtrAk/2D6NN+udNVjHALIfYP5XyWu3xn +NVVLLqbeWeH0H4kHXl3aXrHkvLL0ITiM4ZTM3EbwAwHInCO9K5NHIkaMRPhr6/rk +WLh5Efsl+1aqqGAKN8u3KQKBgFDjcUh3RSdtkSo12ujfR8gfHLaCFYDmVZWFev5s +hNJFgPTOlZJJ6Z6tT6wEnWHmQkzNZg1f4v5vB94piHUwtJynnIWUrZfewQ8EKmzX +wPpJSuOK2paI/3UCmZ0TDLsKpEidzZRBUMMuDh+MgO3N1Sf7uFwDIIpeOap+HZtA +eC6LAoGAFaN/0hr3kBCGGUQ0MKSEw1A4jJntR+Enz5+vJ1F/yW7E3SNp5gHz8sF1 +ppt3OZKtZeIoaCapIEr4hRZzzZr2zNHu3tyizscLAdcqKbt2o7OlPK7Z5mhREN8E +F4obLQI+YsAv2aOY2EFTSPq70N2OL45NLsdq3igpKZEIbpUgnwA= +-----END RSA PRIVATE KEY----- diff --git a/t/chaos/delayetcd/delayetcd.go b/t/chaos/delayetcd/delayetcd.go index a8245e6a4099..4cb1c9ed3bcb 100644 --- a/t/chaos/delayetcd/delayetcd.go +++ b/t/chaos/delayetcd/delayetcd.go @@ -100,6 +100,8 @@ func deleteChaosAndCheck(eSilent *httpexpect.Expect, cliSet *utils.ClientSet, ch var _ = ginkgo.Describe("Test APISIX Delay When Add ETCD Delay", func() { ctx := context.Background() e := httpexpect.New(ginkgo.GinkgoT(), utils.Host) + eDataPanel := httpexpect.New(ginkgo.GinkgoT(), utils.DataPanelHost) + ePrometheus := httpexpect.New(ginkgo.GinkgoT(), utils.PrometheusHost) eSilent := utils.GetSilentHttpexpectClient() var cliSet *utils.ClientSet @@ -123,8 +125,8 @@ var _ = ginkgo.Describe("Test APISIX Delay When Add ETCD Delay", func() { utils.SetRoute(e, httpexpect.Status2xx) utils.GetRouteList(e, http.StatusOK) - utils.WaitUntilMethodSucceed(e, http.MethodGet, 1) - utils.TestPrometheusEtcdMetric(e, 1) + utils.WaitUntilMethodSucceed(eDataPanel, http.MethodGet, 1) + utils.TestPrometheusEtcdMetric(ePrometheus, 1) }) // get default diff --git a/t/chaos/killetcd/killetcd.go b/t/chaos/killetcd/killetcd.go index bb75db8abcea..4f92cd9ccb38 100644 --- a/t/chaos/killetcd/killetcd.go +++ b/t/chaos/killetcd/killetcd.go @@ -64,7 +64,8 @@ func getEtcdKillChaos() *v1alpha1.PodChaos { var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { e := httpexpect.New(ginkgo.GinkgoT(), utils.Host) - eSilent := utils.GetSilentHttpexpectClient() + eDataPanel := httpexpect.New(ginkgo.GinkgoT(), utils.DataPanelHost) + ePrometheus := httpexpect.New(ginkgo.GinkgoT(), utils.PrometheusHost) var cliSet *utils.ClientSet var apisixPod *v1.Pod @@ -89,8 +90,8 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { utils.SetRoute(e, httpexpect.Status2xx) utils.GetRouteList(e, http.StatusOK) - utils.WaitUntilMethodSucceed(e, http.MethodGet, 1) - utils.TestPrometheusEtcdMetric(e, 1) + utils.WaitUntilMethodSucceed(eDataPanel, http.MethodGet, 1) + utils.TestPrometheusEtcdMetric(ePrometheus, 1) }) ginkgo.It("run request in background", func() { @@ -99,7 +100,7 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { for { go func() { defer ginkgo.GinkgoRecover() - utils.GetRoute(eSilent, http.StatusOK) + utils.GetRoute(eDataPanel, http.StatusOK) }() time.Sleep(100 * time.Millisecond) stopLoop := false @@ -119,7 +120,7 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { ginkgo.It("get stats before kill etcd", func() { timeStart := time.Now() - bandwidthBefore, durationBefore = utils.GetEgressBandwidthPerSecond(e) + bandwidthBefore, durationBefore = utils.GetEgressBandwidthPerSecond(ePrometheus) bpsBefore = bandwidthBefore / durationBefore gomega.Expect(bpsBefore).NotTo(gomega.BeZero()) @@ -141,15 +142,15 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { ginkgo.It("get stats after kill etcd", func() { timeStart := time.Now() utils.SetRoute(e, httpexpect.Status5xx) - utils.GetRoute(e, http.StatusOK) - utils.TestPrometheusEtcdMetric(e, 0) + utils.GetRoute(eDataPanel, http.StatusOK) + utils.TestPrometheusEtcdMetric(ePrometheus, 0) - bandwidthAfter, durationAfter = utils.GetEgressBandwidthPerSecond(e) + bandwidthAfter, durationAfter = utils.GetEgressBandwidthPerSecond(ePrometheus) bpsAfter = bandwidthAfter / durationAfter errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) gomega.Expect(err).To(gomega.BeNil()) - gomega.Ω(errorLog).Should(gomega.ContainSubstring("no healthy etcd endpoint available")) + gomega.Ω(errorLog).Should(gomega.ContainSubstring("invalid response code: 502")) }) ginkgo.It("ingress bandwidth per second not change much", func() { diff --git a/t/chaos/kubernetes/deployment.yaml b/t/chaos/kubernetes/deployment.yaml index 0413db47b664..3076f9c0bd5d 100644 --- a/t/chaos/kubernetes/deployment.yaml +++ b/t/chaos/kubernetes/deployment.yaml @@ -77,7 +77,7 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - image: "apache/apisix:latest" + image: "apache/apisix:alpine-local" imagePullPolicy: IfNotPresent name: apisix-gw-deployment ports: @@ -87,6 +87,9 @@ spec: - containerPort: 9443 name: https protocol: TCP + - containerPort: 9180 + name: admin-port + protocol: TCP readinessProbe: failureThreshold: 6 initialDelaySeconds: 10 diff --git a/t/chaos/kubernetes/service.yaml b/t/chaos/kubernetes/service.yaml index c4406f58728d..f0ffdae60463 100644 --- a/t/chaos/kubernetes/service.yaml +++ b/t/chaos/kubernetes/service.yaml @@ -32,10 +32,10 @@ spec: port: 9443 protocol: TCP targetPort: 9443 - # - name: admin-port - # port: 9180 - # protocol: TCP - # targetPort: 9180 + - name: admin-port + port: 9180 + protocol: TCP + targetPort: 9180 selector: app: apisix-gw type: NodePort diff --git a/t/chaos/utils/Dockerfile b/t/chaos/utils/Dockerfile index 700108283799..3eecfd580a7a 100644 --- a/t/chaos/utils/Dockerfile +++ b/t/chaos/utils/Dockerfile @@ -67,7 +67,7 @@ RUN mkdir -p logs && touch logs/access.log && touch logs/error.log \ ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin -EXPOSE 9080 9443 +EXPOSE 9080 9180 9443 CMD ["sh", "-c", "/usr/bin/apisix init && /usr/bin/apisix init_etcd && /usr/local/openresty/bin/openresty -p /usr/local/apisix -g 'daemon off;'"] diff --git a/t/chaos/utils/setup_chaos_utils.sh b/t/chaos/utils/setup_chaos_utils.sh index 9c08fed04052..4b41bb6e3fd8 100755 --- a/t/chaos/utils/setup_chaos_utils.sh +++ b/t/chaos/utils/setup_chaos_utils.sh @@ -34,19 +34,23 @@ modify_config() { DNS_IP=$(kubectl get svc -n kube-system -l k8s-app=kube-dns -o 'jsonpath={..spec.clusterIP}') echo "dns_resolver: - ${DNS_IP} -etcd: - host: - - \"http://etcd.default.svc.cluster.local:2379\" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - \"http://etcd.default.svc.cluster.local:2379\" plugin_attr: prometheus: enable_export_server: false " > ./conf/config.yaml - sed -i -e 's/apisix:latest/apisix:alpine-local/g' t/chaos/kubernetes/deployment.yaml } port_forward() { apisix_pod_name=$(kubectl get pod -l app=apisix-gw -o 'jsonpath={.items[0].metadata.name}') nohup kubectl port-forward svc/apisix-gw-lb 9080:9080 >/dev/null 2>&1 & + nohup kubectl port-forward svc/apisix-gw-lb 9180:9180 >/dev/null 2>&1 & nohup kubectl port-forward $apisix_pod_name 9091:9091 >/dev/null 2>&1 & ps aux | grep '[p]ort-forward' } diff --git a/t/chaos/utils/utils.go b/t/chaos/utils/utils.go index e4db50b329b6..207a7b9c10ad 100644 --- a/t/chaos/utils/utils.go +++ b/t/chaos/utils/utils.go @@ -30,9 +30,13 @@ import ( ) var ( - token = "edd1c9f034335f136f87ad84b625c8f1" - Host = "http://127.0.0.1:9080" - setRouteBody = `{ + token = "edd1c9f034335f136f87ad84b625c8f1" + // TODO: refactor the code. We should move the endpoint from the expect to the http call. + // So we don't need to remember to pass the correct expect. + Host = "http://127.0.0.1:9180" + DataPanelHost = "http://127.0.0.1:9080" + PrometheusHost = "http://127.0.0.1:9080" + setRouteBody = `{ "uri": "/get", "plugins": { "prometheus": {} @@ -168,11 +172,11 @@ func DeleteRoute(e *httpexpect.Expect) *httpexpect.Response { func SetPrometheusMetricsPublicAPI(e *httpexpect.Expect) *httpexpect.Response { return caseCheck(httpTestCase{ - E: e, - Method: http.MethodPut, - Path: "/apisix/admin/routes/metrics", - Headers: map[string]string{"X-API-KEY": token}, - Body: `{ + E: e, + Method: http.MethodPut, + Path: "/apisix/admin/routes/metrics", + Headers: map[string]string{"X-API-KEY": token}, + Body: `{ "uri": "/apisix/prometheus/metrics", "plugins": { "public-api": {} diff --git a/t/cli/test_access_log.sh b/t/cli/test_access_log.sh index 252a931d2692..7c40b35a3b8a 100755 --- a/t/cli/test_access_log.sh +++ b/t/cli/test_access_log.sh @@ -57,7 +57,7 @@ if [ $count_test_access_log -eq 0 ]; then fi count_access_log_off=`grep -c "access_log off;" conf/nginx.conf || true` -if [ $count_access_log_off -eq 4 ]; then +if [ $count_access_log_off -eq 5 ]; then echo "failed: nginx.conf file find access_log off; when enable access log" exit 1 fi @@ -92,7 +92,7 @@ if [ $count_test_access_log -eq 1 ]; then fi count_access_log_off=`grep -c "access_log off;" conf/nginx.conf || true` -if [ $count_access_log_off -ne 4 ]; then +if [ $count_access_log_off -ne 5 ]; then echo "failed: nginx.conf file doesn't find access_log off; when disable access log" exit 1 fi @@ -151,7 +151,7 @@ rm logs/error.log make init make run -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') make stop if [ ! $code -eq 200 ]; then @@ -187,12 +187,14 @@ echo "don't log uninitialized access log variable when the HTTP request is malfo # TLS upstream echo " -apisix: - admin_api_mtls: - admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' - admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' - port_admin: 9180 - https_admin: true +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' nginx_config: http: access_log_format: '\"\$upstream_scheme://\$upstream_host\" \$ssl_server_name' diff --git a/t/cli/test_admin.sh b/t/cli/test_admin.sh index 789a61fb8941..5336244e3372 100755 --- a/t/cli/test_admin.sh +++ b/t/cli/test_admin.sh @@ -24,12 +24,14 @@ git checkout conf/config.yaml echo " -apisix: - admin_api_mtls: - admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' - admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' - port_admin: 9180 - https_admin: true +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' " > conf/config.yaml make init @@ -55,9 +57,11 @@ echo "passed: admin https enabled" echo ' apisix: enable_admin: true - admin_listen: - ip: 127.0.0.2 - port: 9181 +deployment: + admin: + admin_listen: + ip: 127.0.0.2 + port: 9181 ' > conf/config.yaml make init @@ -99,9 +103,10 @@ echo "passed: rollback to the default admin config" # set allow_admin in conf/config.yaml echo " -apisix: - allow_admin: - - 127.0.0.9 +deployment: + admin: + allow_admin: + - 127.0.0.9 " > conf/config.yaml make init @@ -113,8 +118,9 @@ if [ $count -eq 0 ]; then fi echo " -apisix: - allow_admin: ~ +deployment: + admin: + allow_admin: ~ " > conf/config.yaml make init @@ -132,9 +138,10 @@ echo "passed: empty allow_admin in conf/config.yaml" git checkout conf/config.yaml echo ' -apisix: - allow_admin: ~ - admin_key: ~ +deployment: + admin: + admin_key: ~ + allow_admin: ~ ' > conf/config.yaml make init > output.log 2>&1 | true @@ -150,13 +157,14 @@ echo "pass: missing admin key and show ERROR message" # admin api, allow any IP but use default key echo ' -apisix: - allow_admin: ~ - admin_key: - - - name: "admin" - key: edd1c9f034335f136f87ad84b625c8f1 - role: admin +deployment: + admin: + allow_admin: ~ + admin_key: + - + name: "admin" + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin ' > conf/config.yaml make init > output.log 2>&1 | true @@ -169,10 +177,12 @@ fi echo "pass: show WARNING message if the user used default token and allow any IP to access" -# port_admin set +# admin_listen set echo ' -apisix: - port_admin: 9180 +deployment: + admin: + admin_listen: + port: 9180 ' > conf/config.yaml rm logs/error.log @@ -192,22 +202,26 @@ if grep -E 'using uninitialized ".+" variable while logging request' logs/error. exit 1 fi -echo "pass: uninitialized variable not found during writing access log (port_admin set)" +echo "pass: uninitialized variable not found during writing access log (admin_listen set)" -# Admin API can only be used with etcd config_center +# Admin API can only be used with etcd config_provider +## if role is data_plane, and config_provider is yaml, then enable_admin is set to false echo ' apisix: enable_admin: true - config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml ' > conf/config.yaml out=$(make init 2>&1 || true) -if ! echo "$out" | grep "Admin API can only be used with etcd config_center"; then - echo "failed: Admin API can only be used with etcd config_center" +if echo "$out" | grep "Admin API can only be used with etcd config_provider"; then + echo "failed: Admin API can only be used with etcd config_provider" exit 1 fi -echo "passed: Admin API can only be used with etcd config_center" +echo "passed: Admin API can only be used with etcd config_provider" # disable Admin API and init plugins syncer echo ' @@ -248,7 +262,7 @@ make init make run # initialize node-status public API routes #1 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/node-status \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/node-status \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/apisix/status\", @@ -256,7 +270,7 @@ code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0 \"public-api\": {} } }") -if [ ! $code -eq 201 ]; then +if [ ! $code -lt 300 ]; then echo "failed: initialize node status public API failed #1" exit 1 fi @@ -275,7 +289,7 @@ make init sleep 1 # initialize node-status public API routes #2 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/node-status \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/node-status \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/apisix/status\", @@ -333,7 +347,7 @@ if ! grep -E 'new plugins: {"public-api":true,"node-status":true}' logs/error.lo fi # check stream plugins(no plugins under stream, it will be added below) -if ! grep -E 'failed to read stream plugin list from local file' logs/error.log; then +if grep -E 'failed to read stream plugin list from local file' logs/error.log; then echo "failed: first time load stream plugins list failed" exit 1 fi diff --git a/t/cli/test_admin_mtls.sh b/t/cli/test_admin_mtls.sh index 7bdb06e431c3..7bbad286e416 100755 --- a/t/cli/test_admin_mtls.sh +++ b/t/cli/test_admin_mtls.sh @@ -22,14 +22,15 @@ # The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns echo ' -apisix: - port_admin: 9180 - https_admin: true - - admin_api_mtls: - admin_ssl_cert: "../t/certs/mtls_server.crt" - admin_ssl_cert_key: "../t/certs/mtls_server.key" - admin_ssl_ca_cert: "../t/certs/mtls_ca.crt" +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: "../t/certs/mtls_server.crt" + admin_ssl_cert_key: "../t/certs/mtls_server.key" + admin_ssl_ca_cert: "../t/certs/mtls_ca.crt" ' > conf/config.yaml diff --git a/t/cli/test_apisix_mirror.sh b/t/cli/test_apisix_mirror.sh index b0547ca265c1..f54d7ddfdd3d 100755 --- a/t/cli/test_apisix_mirror.sh +++ b/t/cli/test_apisix_mirror.sh @@ -32,7 +32,7 @@ make init make run sleep 0.1 -curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { diff --git a/t/cli/test_ci_only.sh b/t/cli/test_ci_only.sh index a440cf255ac2..d7d9f5bd1bbc 100755 --- a/t/cli/test_ci_only.sh +++ b/t/cli/test_ci_only.sh @@ -26,10 +26,14 @@ git checkout conf/config.yaml echo ' -etcd: - host: - - "http://127.0.0.1:3379" - prefix: "/apisix" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:3379" + prefix: "/apisix" ' > conf/config.yaml out=$(make init 2>&1 || true) diff --git a/t/cli/test_cmd.sh b/t/cli/test_cmd.sh new file mode 100755 index 000000000000..bd8da86bb7b0 --- /dev/null +++ b/t/cli/test_cmd.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +git checkout conf/config.yaml + +# check restart with old nginx.pid exist +echo "-1" > logs/nginx.pid +out=$(./bin/apisix start 2>&1 || true) +if echo "$out" | grep "APISIX is running"; then + rm logs/nginx.pid + echo "failed: should reject bad nginx.pid" + exit 1 +fi + +./bin/apisix stop +sleep 0.5 +rm logs/nginx.pid || true + +# check no corresponding process +make run +oldpid=$(< logs/nginx.pid) +make stop +sleep 0.5 +echo $oldpid > logs/nginx.pid +out=$(make run || true) +if ! echo "$out" | grep "nginx.pid exists but there's no corresponding process with pid"; then + echo "failed: should find no corresponding process" + exit 1 +fi +make stop +echo "pass: no corresponding process" + +# check running when run repeatedly +out=$(make run; make run || true) +if ! echo "$out" | grep "APISIX is running"; then + echo "failed: should find APISIX running" + exit 1 +fi + +make stop +echo "pass: check APISIX running" + +# check customized config.yaml is copied and reverted. + +git checkout conf/config.yaml + +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' +" > conf/customized_config.yaml + +cp conf/config.yaml conf/config_original.yaml + +make init + +if ./bin/apisix start -c conf/not_existed_config.yaml; then + echo "failed: apisix still start with invalid customized config.yaml" + exit 1 +fi + +./bin/apisix start -c conf/customized_config.yaml + +if cmp -s "conf/config.yaml" "conf/config_original.yaml"; then + rm conf/config_original.yaml + echo "failed: customized config.yaml copied failed" + exit 1 +fi + +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +if [ ! $code -eq 200 ]; then + rm conf/config_original.yaml conf/customized_config.yaml + echo "failed: customized config.yaml not be used" + exit 1 +fi + +make stop + +if ! cmp -s "conf/config.yaml" "conf/config_original.yaml"; then + rm conf/config_original.yaml conf/customized_config.yaml + echo "failed: customized config.yaml reverted failed" + exit 1 +fi + +rm conf/config_original.yaml conf/customized_config.yaml +echo "passed: customized config.yaml copied and reverted succeeded" diff --git a/t/cli/test_deployment_control_plane.sh b/t/cli/test_deployment_control_plane.sh new file mode 100755 index 000000000000..fa7210378da4 --- /dev/null +++ b/t/cli/test_deployment_control_plane.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + cert: t/certs/mtls_server.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "cert_key" is required'; then + echo "failed: should check deployment schema during init" + exit 1 +fi + +echo "passed: should check deployment schema during init" + +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +apisix: + enable_admin: false +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: admin.apisix.dev:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +make run +sleep 1 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: control_plane should enable Admin API" + exit 1 +fi + +echo "passed: control_plane should enable Admin API" + +# use https +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: admin.apisix.dev:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +make run +sleep 1 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') + +if [ ! $code -eq 200 ]; then + make stop + echo "failed: could not work with etcd" + exit 1 +fi + +echo "passed: work well with etcd in control plane" + +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/c -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop +if [ ! $code -eq 404 ]; then + echo "failed: should disable request proxy" + exit 1 +fi + +echo "passed: should disable request proxy" diff --git a/t/cli/test_deployment_data_plane.sh b/t/cli/test_deployment_data_plane.sh index 379265319b1c..ef5ef61e37c6 100755 --- a/t/cli/test_deployment_data_plane.sh +++ b/t/cli/test_deployment_data_plane.sh @@ -30,12 +30,11 @@ deployment: config_provider: control_plane control_plane: host: - - http://127.0.0.1:2379 + - https://127.0.0.1:12379 + prefix: "/apisix" timeout: 30 - certs: - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - trusted_ca_cert: /path/to/ca-cert + tls: + verify: false ' > conf/config.yaml make run @@ -50,3 +49,34 @@ if [ ! $res -eq 0 ]; then fi echo "passed: data_plane does not write data to etcd" + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop + +if [ ! $code -eq 404 ]; then + echo "failed: data_plane should not enable Admin API" + exit 1 +fi + +echo "passed: data_plane should not enable Admin API" + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: control_plane + control_plane: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 +' > conf/config.yaml + +out=$(make run 2>&1 || true) +make stop +if ! echo "$out" | grep 'failed to load the configuration: https://127.0.0.1:12379: certificate verify failed'; then + echo "failed: should verify certificate by default" + exit 1 +fi + +echo "passed: should verify certificate by default" diff --git a/t/cli/test_deployment_mtls.sh b/t/cli/test_deployment_mtls.sh new file mode 100755 index 000000000000..5fa4c6984a21 --- /dev/null +++ b/t/cli/test_deployment_mtls.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +exit_if_not_customed_nginx + +# use mTLS +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: admin.apisix.dev:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +make run +sleep 1 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: could not work with etcd" + exit 1 +fi + +echo "passed: work well with etcd in control plane" + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: control_plane + control_plane: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + timeout: 30 + tls: + verify: false + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +rm logs/error.log +make run +sleep 1 + +make stop + +if grep '\[error\] .\+ https://admin.apisix.dev:22379' logs/error.log; then + echo "failed: work well with control plane in data plane" + exit 1 +fi + +echo "passed: work well with control plane in data plane" diff --git a/t/cli/test_deployment_traditional.sh b/t/cli/test_deployment_traditional.sh index 6a89ca0a65f4..1dead769bc10 100755 --- a/t/cli/test_deployment_traditional.sh +++ b/t/cli/test_deployment_traditional.sh @@ -19,21 +19,6 @@ . ./t/cli/common.sh -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'invalid deployment traditional configuration: property "etcd" is required'; then - echo "failed: should check deployment schema during init" - exit 1 -fi - -echo "passed: should check deployment schema during init" - # HTTP echo ' deployment: @@ -49,7 +34,7 @@ deployment: make run sleep 1 -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') make stop if [ ! $code -eq 200 ]; then @@ -77,7 +62,7 @@ deployment: make run sleep 1 -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') make stop if [ ! $code -eq 200 ]; then @@ -154,7 +139,7 @@ deployment: make run sleep 1 -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') make stop if [ ! $code -eq 200 ]; then diff --git a/t/cli/test_etcd.sh b/t/cli/test_etcd.sh index c417baaec94d..033cab5beb0f 100755 --- a/t/cli/test_etcd.sh +++ b/t/cli/test_etcd.sh @@ -32,13 +32,17 @@ etcdctl --endpoints=127.0.0.1:2379 auth enable etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix echo ' -etcd: - host: - - http://127.0.0.1:2379 - prefix: /apisix - timeout: 30 - user: root - password: apache-api6 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 + user: root + password: apache-api6 ' > conf/config.yaml make init @@ -52,10 +56,10 @@ init_kv=( "/apisix/global_rules/ init_dir" "/apisix/plugin_metadata/ init_dir" "/apisix/plugins/ init_dir" -"/apisix/proto/ init_dir" +"/apisix/protos/ init_dir" "/apisix/routes/ init_dir" "/apisix/services/ init_dir" -"/apisix/ssl/ init_dir" +"/apisix/ssls/ init_dir" "/apisix/stream_routes/ init_dir" "/apisix/upstreams/ init_dir" ) @@ -84,10 +88,14 @@ echo "passed: properly handle the error when connecting to etcd without auth" git checkout conf/config.yaml echo ' -etcd: - host: - - http://127.0.0.1:2389 - prefix: /apisix +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2389 + prefix: /apisix ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -102,10 +110,14 @@ echo "passed: Show retry time info successfully" git checkout conf/config.yaml echo ' -etcd: - host: - - http://127.0.0.1:2389 - prefix: /apisix +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2389 + prefix: /apisix ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -129,13 +141,17 @@ etcdctl --endpoints=127.0.0.1:2379 auth enable etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix echo ' -etcd: - host: - - http://127.0.0.1:2379 - prefix: /apisix - timeout: 30 - user: root - password: apache-api7 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 + user: root + password: apache-api7 ' > conf/config.yaml out=$(make init 2>&1 || true) diff --git a/t/cli/test_etcd_healthcheck.sh b/t/cli/test_etcd_healthcheck.sh index 34ca4d29a632..52b90bc908d2 100755 --- a/t/cli/test_etcd_healthcheck.sh +++ b/t/cli/test_etcd_healthcheck.sh @@ -30,11 +30,15 @@ if [ -z "logs/error.log" ]; then fi echo ' -etcd: - host: - - "http://127.0.0.1:23790" - - "http://127.0.0.1:23791" - - "http://127.0.0.1:23792" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:23790" + - "http://127.0.0.1:23791" + - "http://127.0.0.1:23792" health_check_timeout: '"$HEALTH_CHECK_RETRY_TIMEOUT"' timeout: 2 ' > conf/config.yaml @@ -45,7 +49,7 @@ docker-compose -f ./t/cli/docker-compose-etcd-cluster.yaml up -d make init && make run docker stop ${ETCD_NAME_0} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ ! $code -eq 200 ]; then echo "failed: apisix got effect when one etcd node out of a cluster disconnected" exit 1 @@ -53,7 +57,7 @@ fi docker start ${ETCD_NAME_0} docker stop ${ETCD_NAME_1} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ ! $code -eq 200 ]; then echo "failed: apisix got effect when one etcd node out of a cluster disconnected" exit 1 @@ -71,7 +75,7 @@ docker stop ${ETCD_NAME_0} && docker stop ${ETCD_NAME_1} && docker stop ${ETCD_N sleep_till=$(date +%s -d "$DATE + $HEALTH_CHECK_RETRY_TIMEOUT second") -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ $code -eq 200 ]; then echo "failed: apisix not got effect when all etcd nodes disconnected" exit 1 @@ -86,7 +90,7 @@ if [ "$sleep_seconds" -gt 0 ]; then sleep $sleep_seconds fi -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ ! $code -eq 200 ]; then echo "failed: apisix could not recover when etcd node recover" docker ps diff --git a/t/cli/test_etcd_mtls.sh b/t/cli/test_etcd_mtls.sh index 371330e939a2..d61d6d517c1f 100755 --- a/t/cli/test_etcd_mtls.sh +++ b/t/cli/test_etcd_mtls.sh @@ -25,14 +25,18 @@ exit_if_not_customed_nginx # etcd mTLS verify echo ' -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -44,12 +48,16 @@ fi echo "passed: certificate verify success expectedly" echo ' -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + verify: false ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -65,13 +73,17 @@ echo ' apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -95,13 +107,17 @@ apisix: - addr: 9100 ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -132,13 +148,17 @@ echo ' apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key ' > conf/config.yaml rm logs/error.log || true @@ -147,7 +167,7 @@ make run sleep 1 make stop -if ! grep -E 'certificate host mismatch' logs/error.log; then +if ! grep -E 'upstream SSL certificate does not match \"127.0.0.1\" while SSL handshaking to upstream' logs/error.log; then echo "failed: should got certificate host mismatch when use host in etcd.host as sni" exit 1 fi @@ -161,14 +181,18 @@ echo ' apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - sni: "admin.apisix.dev" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + sni: "admin.apisix.dev" ' > conf/config.yaml rm logs/error.log || true @@ -183,4 +207,3 @@ if grep -E 'certificate host mismatch' logs/error.log; then fi echo "passed: specify custom sni instead of using etcd.host" - diff --git a/t/cli/test_etcd_tls.sh b/t/cli/test_etcd_tls.sh index 906a2b91d1b2..39db833f9674 100755 --- a/t/cli/test_etcd_tls.sh +++ b/t/cli/test_etcd_tls.sh @@ -27,10 +27,17 @@ git checkout conf/config.yaml echo ' -etcd: - host: - - "https://127.0.0.1:12379" - prefix: "/apisix" +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + prefix: "/apisix" ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -46,12 +53,16 @@ echo "passed: Show certificate verify failed info successfully" git checkout conf/config.yaml echo ' -etcd: - host: - - "https://127.0.0.1:12379" - tls: - verify: false - prefix: "/apisix" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + prefix: "/apisix" + tls: + verify: false ' > conf/config.yaml out=$(make init 2>&1 || true) diff --git a/t/cli/test_http_config.sh b/t/cli/test_http_config.sh index 20837f65a069..4059ca69a712 100755 --- a/t/cli/test_http_config.sh +++ b/t/cli/test_http_config.sh @@ -39,22 +39,6 @@ echo "passed: define custom shdict" git checkout conf/config.yaml -echo ' -nginx_config: - http: - lua_shared_dicts: - my_dict: 1m -' > conf/config.yaml - -make init - -if ! grep "lua_shared_dict my_dict 1m;" conf/nginx.conf > /dev/null; then - echo "failed: define custom shdict in the old way" - exit 1 -fi - -echo "passed: define custom shdict in the old way" - echo " plugins: - ip-restriction diff --git a/t/cli/test_kubernetes.sh b/t/cli/test_kubernetes.sh index bc371ee01a35..f60f856b5327 100755 --- a/t/cli/test_kubernetes.sh +++ b/t/cli/test_kubernetes.sh @@ -26,23 +26,88 @@ discovery: host: ${HOST_ENV} client: token: ${TOKEN_ENV} -' > conf/config.yaml +' >conf/config.yaml make init if ! grep "env HOST_ENV" conf/nginx.conf; then - echo "kubernetes discovery env inject failed" - exit 1 + echo "kubernetes discovery env inject failed" + exit 1 fi if ! grep "env KUBERNETES_SERVICE_PORT" conf/nginx.conf; then - echo "kubernetes discovery env inject failed" - exit 1 + echo "kubernetes discovery env inject failed" + exit 1 fi if ! grep "env TOKEN_ENV" conf/nginx.conf; then - echo "kubernetes discovery env inject failed" - exit 1 + echo "kubernetes discovery env inject failed" + exit 1 fi -echo "kubernetes discovery env inject success" +if ! grep "lua_shared_dict kubernetes 1m;" conf/nginx.conf; then + echo "kubernetes discovery lua_shared_dict inject failed" + exit 1 +fi + +echo ' +discovery: + kubernetes: + - id: dev + service: + host: ${DEV_HOST} + port: ${DEV_PORT} + client: + token: ${DEV_TOKEN} + - id: pro + service: + host: ${PRO_HOST} + port: ${PRO_PORT} + client: + token: ${PRO_TOKEN} + shared_size: 2m +' >conf/config.yaml + +make init + +if ! grep "env DEV_HOST" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env DEV_PORT" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env DEV_TOKEN" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env PRO_HOST" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env PRO_PORT" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env PRO_TOKEN" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "lua_shared_dict kubernetes-dev 1m;" conf/nginx.conf; then + echo "kubernetes discovery lua_shared_dict inject failed" + exit 1 +fi + +if ! grep "lua_shared_dict kubernetes-pro 2m;" conf/nginx.conf; then + echo "kubernetes discovery lua_shared_dict inject failed" + exit 1 +fi + +echo "kubernetes discovery inject success" diff --git a/t/cli/test_main.sh b/t/cli/test_main.sh index ea54c53b8425..ecd55bf3d06f 100755 --- a/t/cli/test_main.sh +++ b/t/cli/test_main.sh @@ -59,7 +59,9 @@ echo "passed: nginx.conf file contains reuseport configuration" echo " apisix: ssl: - listen_port: 8443 + listen: + - port: 8443 + " > conf/config.yaml make init @@ -87,10 +89,11 @@ apisix: - 9081 - 9082 ssl: - listen_port: - - 9443 - - 9444 - - 9445 + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 " > conf/config.yaml make init @@ -253,9 +256,13 @@ echo "passed: resolve variables wrapped with whitespace" # support environment variables in local_conf echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" ' > conf/config.yaml ETCD_HOST=127.0.0.1 ETCD_PORT=2379 make init @@ -267,9 +274,13 @@ fi # don't override user's envs configuration echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" nginx_config: envs: - ETCD_HOST @@ -288,9 +299,13 @@ if ! grep "env ETCD_HOST;" conf/nginx.conf > /dev/null; then fi echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" nginx_config: envs: - ETCD_HOST=1.1.1.1 @@ -505,54 +520,6 @@ fi sed -i 's/worker_processes: 2/worker_processes: auto/' conf/config.yaml echo "passed: worker_processes number is configurable" -# check customized config.yaml is copied and reverted. - -git checkout conf/config.yaml - -echo " -apisix: - admin_api_mtls: - admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' - admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' - port_admin: 9180 - https_admin: true -" > conf/customized_config.yaml - -cp conf/config.yaml conf/config_original.yaml - -make init - -if ./bin/apisix start -c conf/not_existed_config.yaml; then - echo "failed: apisix still start with invalid customized config.yaml" - exit 1 -fi - -./bin/apisix start -c conf/customized_config.yaml - -if cmp -s "conf/config.yaml" "conf/config_original.yaml"; then - rm conf/config_original.yaml - echo "failed: customized config.yaml copied failed" - exit 1 -fi - -code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ ! $code -eq 200 ]; then - rm conf/config_original.yaml conf/customized_config.yaml - echo "failed: customized config.yaml not be used" - exit 1 -fi - -make stop - -if ! cmp -s "conf/config.yaml" "conf/config_original.yaml"; then - rm conf/config_original.yaml conf/customized_config.yaml - echo "failed: customized config.yaml reverted failed" - exit 1 -fi - -rm conf/config_original.yaml conf/customized_config.yaml -echo "passed: customized config.yaml copied and reverted succeeded" - # check disable cpu affinity git checkout conf/config.yaml @@ -624,7 +591,7 @@ stream_plugins: - 3rd-party ' > conf/config.yaml -rm logs/error.log +rm logs/error.log || true make init make run @@ -694,43 +661,6 @@ fi echo "passed: hook can take effect" -# check restart with old nginx.pid exist -echo "-1" > logs/nginx.pid -out=$(./bin/apisix start 2>&1 || true) -if echo "$out" | grep "APISIX is running"; then - rm logs/nginx.pid - echo "failed: should reject bad nginx.pid" - exit 1 -fi - -./bin/apisix stop -sleep 0.5 -rm logs/nginx.pid || true - -# check no corresponding process -make run -oldpid=$(< logs/nginx.pid) -make stop -sleep 0.5 -echo $oldpid > logs/nginx.pid -out=$(make run || true) -if ! echo "$out" | grep "nginx.pid exists but there's no corresponding process with pid"; then - echo "failed: should find no corresponding process" - exit 1 -fi -make stop -echo "pass: no corresponding process" - -# check running when run repeatedly -out=$(make run; make run || true) -if ! echo "$out" | grep "APISIX is running"; then - echo "failed: should find APISIX running" - exit 1 -fi - -make stop -echo "pass: check APISIX running" - # check the keepalive related parameter settings in the upstream git checkout conf/config.yaml diff --git a/t/cli/test_makefile.sh b/t/cli/test_makefile.sh new file mode 100755 index 000000000000..30a196d0067c --- /dev/null +++ b/t/cli/test_makefile.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +make run + +echo " +deployment: + admin: + admin_listen: + ip: 127.0.0.2 + port: 9181 +apisix: + enable_admin: true +" > conf/config.yaml + +make reload +make stop + +if ! grep "listen 127.0.0.2:9181;" conf/nginx.conf > /dev/null; then + echo "failed: regenerate nginx conf in 'make reload'" + exit 1 +fi + +echo "passed: regenerate nginx conf in 'make reload'" diff --git a/t/cli/test_prometheus.sh b/t/cli/test_prometheus.sh index eb4ce0300193..15f54f9114ee 100755 --- a/t/cli/test_prometheus.sh +++ b/t/cli/test_prometheus.sh @@ -77,7 +77,7 @@ plugin_attr: IP=127.0.0.1 PORT=9092 make run # initialize prometheus metrics public API route #1 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/metrics1 \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/metrics1 \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/prometheus/metrics\", @@ -152,7 +152,7 @@ plugin_attr: IP=127.0.0.1 PORT=9092 make run # initialize prometheus metrics public API route #2 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/metrics2 \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/metrics2 \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/apisix/prometheus/metrics\", diff --git a/t/cli/test_prometheus_stream.sh b/t/cli/test_prometheus_stream.sh index 347774b2769c..561b9a820cf5 100755 --- a/t/cli/test_prometheus_stream.sh +++ b/t/cli/test_prometheus_stream.sh @@ -34,7 +34,7 @@ stream_plugins: make run sleep 0.5 -curl -v -k -i -m 20 -o /dev/null -s -X PUT http://127.0.0.1:9080/apisix/admin/stream_routes/1 \ +curl -v -k -i -m 20 -o /dev/null -s -X PUT http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d '{ "plugins": { diff --git a/t/cli/test_serverless.sh b/t/cli/test_serverless.sh index 8c1132c5bdbf..8f1d7e103ccd 100755 --- a/t/cli/test_serverless.sh +++ b/t/cli/test_serverless.sh @@ -31,7 +31,10 @@ rm logs/error.log || echo '' echo ' apisix: enable_admin: false - config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml ' > conf/config.yaml make init diff --git a/t/cli/test_snippet.sh b/t/cli/test_snippet.sh index 0684d6c1f659..1b545dd9cf0a 100755 --- a/t/cli/test_snippet.sh +++ b/t/cli/test_snippet.sh @@ -25,7 +25,6 @@ echo ' apisix: node_listen: 9080 enable_admin: true - port_admin: 9180 stream_proxy: only: false tcp: diff --git a/t/cli/test_standalone.sh b/t/cli/test_standalone.sh index b4e6f3955420..2a3add66662b 100755 --- a/t/cli/test_standalone.sh +++ b/t/cli/test_standalone.sh @@ -30,7 +30,10 @@ trap standalone EXIT echo ' apisix: enable_admin: false - config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml ' > conf/config.yaml echo ' @@ -65,3 +68,46 @@ if [ ! $code -eq 200 ]; then fi echo "passed: resolve variables in apisix.yaml conf success" + +# configure standalone via deployment +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +' > conf/config.yaml + +var_test_path=/test make run +sleep 0.1 +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes) +if [ ! $code -eq 404 ]; then + echo "failed: admin API should be disabled automatically" + exit 1 +fi + +echo "passed: admin API should be disabled automatically" + +# support environment variables +echo ' +routes: + - + uri: ${{var_test_path}} + plugins: + proxy-rewrite: + uri: ${{var_test_proxy_rewrite_uri:=/apisix/nginx_status}} + upstream: + nodes: + "127.0.0.1:9091": 1 + type: roundrobin +#END +' > conf/apisix.yaml + +var_test_path=/test make run +sleep 0.1 +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/test) +if [ ! $code -eq 200 ]; then + echo "failed: resolve variables in apisix.yaml conf failed" + exit 1 +fi + +echo "passed: resolve variables in apisix.yaml conf success" diff --git a/t/cli/test_tls_over_tcp.sh b/t/cli/test_tls_over_tcp.sh index a5a095a52839..566af9418a24 100755 --- a/t/cli/test_tls_over_tcp.sh +++ b/t/cli/test_tls_over_tcp.sh @@ -41,7 +41,7 @@ sleep 0.1 ./utils/create-ssl.py t/certs/mtls_server.crt t/certs/mtls_server.key test.com -curl -k -i http://127.0.0.1:9080/apisix/admin/stream_routes/1 \ +curl -k -i http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d \ '{"upstream":{"nodes":{"127.0.0.1:9101":1},"type":"roundrobin"}}' diff --git a/t/cli/test_upstream_mtls.sh b/t/cli/test_upstream_mtls.sh index a8de39733070..b2e437fffa7e 100755 --- a/t/cli/test_upstream_mtls.sh +++ b/t/cli/test_upstream_mtls.sh @@ -28,10 +28,6 @@ exit_if_not_customed_nginx echo ' apisix: - admin_key: - - name: admin - key: edd1c9f034335f136f87ad84b625c8f1 - role: admin ssl: ssl_trusted_certificate: t/certs/apisix.crt nginx_config: @@ -54,7 +50,7 @@ make init make run sleep 0.1 -curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream": { @@ -73,7 +69,7 @@ curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034 } }' -sleep 0.1 +sleep 1 code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) @@ -91,10 +87,6 @@ echo "passed: connection to upstream with mTLS success" # test proxy_ssl_trusted_certificate and use incorrect ca cert echo ' apisix: - admin_key: - - name: admin - key: edd1c9f034335f136f87ad84b625c8f1 - role: admin ssl: ssl_trusted_certificate: t/certs/apisix_ecc.crt nginx_config: @@ -117,7 +109,7 @@ make init make run sleep 0.1 -curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream": { diff --git a/t/cli/test_validate_config.sh b/t/cli/test_validate_config.sh index 216f1d9fb14d..1c00360f1c30 100755 --- a/t/cli/test_validate_config.sh +++ b/t/cli/test_validate_config.sh @@ -75,10 +75,13 @@ make stop echo "passed: find the certificate correctly" echo ' +deployment: + admin: + admin_listen: + port: 9180 apisix: node_listen: 9080 enable_admin: true - port_admin: 9180 stream_proxy: tcp: - "localhost:9100" @@ -204,9 +207,13 @@ fi echo "passed: check the realip configuration for batch-requests" echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - 127.0.0.1 + - 127.0.0.1 ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -215,17 +222,4 @@ if ! echo "$out" | grep 'property "host" validation failed'; then exit 1 fi -echo ' -etcd: - prefix: "/apisix/" - host: - - https://127.0.0.1 -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'property "prefix" validation failed'; then - echo "failed: should check etcd schema during init" - exit 1 -fi - echo "passed: check etcd schema during init" diff --git a/t/config-center-yaml/consumer.t b/t/config-center-yaml/consumer.t index 62cfb321d735..cb503219fd10 100644 --- a/t/config-center-yaml/consumer.t +++ b/t/config-center-yaml/consumer.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/config-center-yaml/global-rule.t b/t/config-center-yaml/global-rule.t index 660ace689951..5fe5d0fe5e8b 100644 --- a/t/config-center-yaml/global-rule.t +++ b/t/config-center-yaml/global-rule.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/config-center-yaml/plugin-configs.t b/t/config-center-yaml/plugin-configs.t index e7a22b7ba074..468216dbfe8c 100644 --- a/t/config-center-yaml/plugin-configs.t +++ b/t/config-center-yaml/plugin-configs.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -113,7 +115,7 @@ routes: --- request GET /echo --- response_body -hello +world --- response_headers in: out --- error_log eval diff --git a/t/config-center-yaml/plugin-metadata.t b/t/config-center-yaml/plugin-metadata.t index 6e0a9971e879..87f6ec1ca0d2 100644 --- a/t/config-center-yaml/plugin-metadata.t +++ b/t/config-center-yaml/plugin-metadata.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/config-center-yaml/plugin.t b/t/config-center-yaml/plugin.t index e3d13b97d86c..cc9a6ea4ec64 100644 --- a/t/config-center-yaml/plugin.t +++ b/t/config-center-yaml/plugin.t @@ -27,8 +27,11 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -84,7 +87,7 @@ GET /t --- response_body hello world --- error_log -use config_center: yaml +use config_provider: yaml load(): loaded plugin and sort by priority: 3000 name: ip-restriction load(): loaded plugin and sort by priority: 2510 name: jwt-auth load_stream(): loaded stream plugin and sort by priority: 1000 name: mqtt-proxy @@ -102,8 +105,11 @@ load(): new plugins --- yaml_config apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml plugins: - ip-restriction - jwt-auth @@ -169,8 +175,11 @@ GET /apisix/prometheus/metrics --- yaml_config apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml plugins: - ip-restriction - jwt-auth @@ -188,3 +197,32 @@ hello world property "stream" validation failed: wrong type: expected boolean, got string --- no_error_log load(): plugins not changed + + + +=== TEST 6: empty plugin list +--- apisix_yaml +plugins: +stream_plugins: +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello world +--- error_log +use config_provider: yaml +load(): new plugins: {} +load_stream(): new plugins: {} diff --git a/t/config-center-yaml/route-service.t b/t/config-center-yaml/route-service.t index c83af6fb4470..b2d8e2ff6b73 100644 --- a/t/config-center-yaml/route-service.t +++ b/t/config-center-yaml/route-service.t @@ -24,8 +24,10 @@ no_shuffle(); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ run_tests(); diff --git a/t/config-center-yaml/route-upstream.t b/t/config-center-yaml/route-upstream.t index ff50ce274d1d..71b9a0311c3c 100644 --- a/t/config-center-yaml/route-upstream.t +++ b/t/config-center-yaml/route-upstream.t @@ -24,8 +24,10 @@ no_shuffle(); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ run_tests(); diff --git a/t/config-center-yaml/route.t b/t/config-center-yaml/route.t index e77cf732f3f8..6214da0cc622 100644 --- a/t/config-center-yaml/route.t +++ b/t/config-center-yaml/route.t @@ -24,8 +24,10 @@ no_shuffle(); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ run_tests(); @@ -49,7 +51,7 @@ GET /hello --- response_body hello world --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -72,7 +74,7 @@ routes: GET /hello --- error_code: 404 --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -153,10 +155,12 @@ hello world --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false router: http: "radixtree_host_uri" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml --- apisix_yaml routes: - @@ -182,10 +186,12 @@ property "uri" validation failed --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false router: http: "radixtree_host_uri" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml --- apisix_yaml routes: - @@ -210,10 +216,12 @@ GET /hello --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false router: http: "radixtree_host_uri" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml --- apisix_yaml routes: - @@ -238,8 +246,10 @@ GET /hello --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml --- apisix_yaml routes: - diff --git a/t/config-center-yaml/ssl.t b/t/config-center-yaml/ssl.t index d4745a21f0ba..401ba4ec0b04 100644 --- a/t/config-center-yaml/ssl.t +++ b/t/config-center-yaml/ssl.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -108,7 +110,7 @@ __DATA__ === TEST 1: sanity --- apisix_yaml -ssl: +ssls: - cert: | -----BEGIN CERTIFICATE----- @@ -181,7 +183,7 @@ server name: "test.com" === TEST 2: single sni --- apisix_yaml -ssl: +ssls: - cert: | -----BEGIN CERTIFICATE----- @@ -252,7 +254,7 @@ server name: "test.com" === TEST 3: bad cert --- apisix_yaml -ssl: +ssls: - cert: | -----BEGIN CERTIFICATE----- diff --git a/t/config-center-yaml/stream-route.t b/t/config-center-yaml/stream-route.t index b6bfabff9aa3..6792b1bb9446 100644 --- a/t/config-center-yaml/stream-route.t +++ b/t/config-center-yaml/stream-route.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -113,9 +115,6 @@ stream_routes: mqtt-proxy: protocol_name: "MQTT" protocol_level: 4 - upstream: - ip: "127.0.0.1" - port: 1995 upstreams: - nodes: "127.0.0.1:1995": 1 diff --git a/t/control/discovery.t b/t/control/discovery.t index ab78da6af8b7..88c5c3493a48 100644 --- a/t/control/discovery.t +++ b/t/control/discovery.t @@ -27,9 +27,10 @@ our $yaml_config = <<_EOC_; apisix: enable_control: true node_listen: 1984 - config_center: yaml - enable_admin: false - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: eureka: host: @@ -131,8 +132,10 @@ GET /v1/discovery/dns/dump apisix: enable_control: true node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: diff --git a/t/control/healthcheck.t b/t/control/healthcheck.t index 9e0a3804806e..d9ef02c24b4d 100644 --- a/t/control/healthcheck.t +++ b/t/control/healthcheck.t @@ -29,8 +29,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/control/plugin-metadata.t b/t/control/plugin-metadata.t new file mode 100644 index 000000000000..21e784186b22 --- /dev/null +++ b/t/control/plugin-metadata.t @@ -0,0 +1,117 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadatas +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val", + "ikey": 1 + }]] + ) + if code >= 300 then + ngx.status = code + return + end + + local code = t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_PUT, + [[ + {"log_format": {"upstream_response_time": "$upstream_response_time"}} + ]] + ) + if code >= 300 then + ngx.status = code + return + end + } + } +--- error_code: 200 + + + +=== TEST 2: dump all plugin metadatas +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local _, _, res = t('/v1/plugin_metadatas', ngx.HTTP_GET) + local json = require("toolkit.json") + res = json.decode(res) + for _, metadata in ipairs(res) do + if metadata.id == "file-logger" then + ngx.say("check log_format: ", metadata.log_format.upstream_response_time == "$upstream_response_time") + elseif metadata.id == "example-plugin" then + ngx.say("check skey: ", metadata.skey == "val") + ngx.say("check ikey: ", metadata.ikey == 1) + end + end + } + } +--- response_body +check log_format: true +check skey: true +check ikey: true + + + +=== TEST 3: dump file-logger metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local _, _, res = t('/v1/plugin_metadata/file-logger', ngx.HTTP_GET) + local json = require("toolkit.json") + metadata = json.decode(res) + if metadata.id == "file-logger" then + ngx.say("check log_format: ", metadata.log_format.upstream_response_time == "$upstream_response_time") + end + } + } +--- response_body +check log_format: true + + + +=== TEST 4: plugin without metadata +--- request +GET /v1/plugin_metadata/batch-requests +--- error_code: 404 +--- response_body +{"error_msg":"plugin metadata[batch-requests] not found"} diff --git a/t/control/routes.t b/t/control/routes.t index 180a69095ecf..a24bc2c15e1a 100644 --- a/t/control/routes.t +++ b/t/control/routes.t @@ -29,8 +29,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/control/schema.t b/t/control/schema.t index 5b3c7799720c..f3e9f7d6263e 100644 --- a/t/control/schema.t +++ b/t/control/schema.t @@ -69,7 +69,11 @@ __DATA__ "schema": { "type":"object", "properties": { - "disable": {"type": "boolean"} + "_meta": { + "properties": { + "disable": {"type": "boolean"} + } + } } }, "metadata_schema": {"type":"object"} @@ -84,7 +88,11 @@ __DATA__ "schema": { "type":"object", "properties": { - "disable": {"type": "boolean"} + "_meta": { + "properties": { + "disable": {"type": "boolean"} + } + } } }, "priority": 1000 @@ -102,10 +110,7 @@ passed === TEST 2: confirm the scope of plugin ---- yaml_config -apisix: - node_listen: 1984 - admin_key: null +--- extra_yaml_config plugins: - batch-requests - error-log-logger diff --git a/t/control/services.t b/t/control/services.t index c702a7ceb0cd..0003bcc9d1aa 100644 --- a/t/control/services.t +++ b/t/control/services.t @@ -29,8 +29,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/control/upstreams.t b/t/control/upstreams.t index c2d76fbd2f24..09e9104ad558 100644 --- a/t/control/upstreams.t +++ b/t/control/upstreams.t @@ -29,8 +29,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/core/config-default.t b/t/core/config-default.t index 17ccedd5c285..a9546c97dbe3 100644 --- a/t/core/config-default.t +++ b/t/core/config-default.t @@ -32,7 +32,7 @@ __DATA__ ngx.say("node_listen: ", config.apisix.node_listen) ngx.say("stream_proxy: ", encode_json(config.apisix.stream_proxy)) - ngx.say("admin_key: ", encode_json(config.apisix.admin_key)) + ngx.say("admin_key: ", encode_json(config.deployment.admin.admin_key)) } } --- request @@ -56,15 +56,16 @@ failed to parse yaml config: failed to merge, path[apisix->node_listen] expect: === TEST 3: use `null` means delete --- yaml_config -apisix: - admin_key: null +deployment: + admin: + admin_key: null --- config location /t { content_by_lua_block { local encode_json = require("toolkit.json").encode local config = require("apisix.core").config.local_conf() - ngx.say("admin_key: ", encode_json(config.apisix.admin_key)) + ngx.say("admin_key: ", encode_json(config.deployment.admin.admin_key)) } } --- request @@ -76,15 +77,16 @@ admin_key: null === TEST 4: use `~` means delete --- yaml_config -apisix: - admin_key: ~ +deployment: + admin: + admin_key: null --- config location /t { content_by_lua_block { local encode_json = require("toolkit.json").encode local config = require("apisix.core").config.local_conf() - ngx.say("admin_key: ", encode_json(config.apisix.admin_key)) + ngx.say("admin_key: ", encode_json(config.deployment.admin.admin_key)) } } --- request diff --git a/t/core/config.t b/t/core/config.t index b87fe1224e0c..29d1cc52dc07 100644 --- a/t/core/config.t +++ b/t/core/config.t @@ -55,12 +55,15 @@ first plugin: "real-ip" } } --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" # etcd address - prefix: "/apisix" # apisix configurations prefix - timeout: 1 - +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" # etcd address + prefix: "/apisix" # apisix configurations prefix + timeout: 1 plugins: - example-plugin diff --git a/t/core/config_etcd.t b/t/core/config_etcd.t index 0d6a77989686..a117689c3680 100644 --- a/t/core/config_etcd.t +++ b/t/core/config_etcd.t @@ -29,10 +29,15 @@ __DATA__ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "http://127.0.0.1:7777" -- wrong etcd port - timeout: 1 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - "http://127.0.0.1:7777" -- wrong etcd port + timeout: 1 --- config location /t { content_by_lua_block { @@ -54,9 +59,15 @@ qr/(connection refused){1,}/ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "https://127.0.0.1:2379" + ssl: + ssl_trusted_certificate: t/servroot/conf/cert/etcd.pem +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:2379" --- extra_init_by_lua local health_check = require("resty.etcd.health_check") health_check.get_target_status = function() @@ -73,9 +84,9 @@ end --- request GET /t --- grep_error_log chop -handshake failed +peer closed connection in SSL handshake while SSL handshaking to upstream --- grep_error_log_out eval -qr/(handshake failed){1,}/ +qr/(peer closed connection in SSL handshake while SSL handshaking to upstream){1,}/ @@ -83,9 +94,13 @@ qr/(handshake failed){1,}/ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "http://127.0.0.1:12379" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:12379" --- config location /t { content_by_lua_block { @@ -107,9 +122,15 @@ qr/(closed){1,}/ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "https://127.0.0.1:12379" + ssl: + ssl_trusted_certificate: t/servroot/conf/cert/etcd.pem +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" --- extra_init_by_lua local health_check = require("resty.etcd.health_check") health_check.get_target_status = function() @@ -126,9 +147,9 @@ end --- request GET /t --- grep_error_log chop -18: self signed certificate +10:certificate has expired --- grep_error_log_out eval -qr/(18: self signed certificate){1,}/ +qr/(10:certificate has expired){1,}/ @@ -136,12 +157,17 @@ qr/(18: self signed certificate){1,}/ --- yaml_config apisix: node_listen: 1984 - admin_key: null -etcd: - host: - - "https://127.0.0.1:12379" - tls: - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null + etcd: + host: + - "https://127.0.0.1:12379" + tls: + verify: false --- config location /t { content_by_lua_block { @@ -159,9 +185,8 @@ etcd: "desc": "new route", "uri": "/index.html" }]] - ) + ) - ngx.status = code ngx.say(body) } } @@ -179,11 +204,17 @@ passed apisix: node_listen: 1984 admin_key: null -etcd: - host: - - "https://127.0.0.1:12379" - tls: - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + etcd: + host: + - "https://127.0.0.1:12379" + tls: + verify: false --- config location /t { content_by_lua_block { @@ -210,12 +241,16 @@ passed --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "http://127.0.0.1:1980" -- fake server port - timeout: 1 - user: root # root username for etcd - password: 5tHkHhYkjr6cQY # root password for etcd +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:1980" -- fake server port + timeout: 1 + user: root # root username for etcd + password: 5tHkHhYkjr6cQY # root password for etcd --- extra_init_by_lua local health_check = require("resty.etcd.health_check") health_check.get_target_status = function() @@ -248,7 +283,7 @@ etcd auth failed local config = core.config.new() local res = config:getkey("/routes/") if res and res.status == 200 and res.body - and res.body.node and res.body.node.key == "/apisix/routes" then + and res.body.count and tonumber(res.body.count) >= 1 then ngx.say("passed") else ngx.say("failed") diff --git a/t/core/config_util.t b/t/core/config_util.t index 80f01a5d2ae4..2b012fc97be0 100644 --- a/t/core/config_util.t +++ b/t/core/config_util.t @@ -70,3 +70,42 @@ __DATA__ end } } + + + +=== TEST 2: add_clean_handler / cancel_clean_handler / fire_all_clean_handlers +--- config + location /t { + content_by_lua_block { + local util = require("apisix.core.config_util") + local function setup() + local item = {clean_handlers = {}} + local idx1 = util.add_clean_handler(item, function() + ngx.log(ngx.WARN, "fire one") + end) + local idx2 = util.add_clean_handler(item, function() + ngx.log(ngx.WARN, "fire two") + end) + return item, idx1, idx2 + end + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx1, true) + util.cancel_clean_handler(item, idx2, true) + + local item, idx1, idx2 = setup() + util.fire_all_clean_handlers(item) + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx2) + util.fire_all_clean_handlers(item) + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx1) + util.fire_all_clean_handlers(item) + } + } +--- grep_error_log eval +qr/fire \w+/ +--- grep_error_log_out eval +"fire one\nfire two\n" x 3 diff --git a/t/core/etcd-auth-fail.t b/t/core/etcd-auth-fail.t index 708b1d243e34..c85f660dc06d 100644 --- a/t/core/etcd-auth-fail.t +++ b/t/core/etcd-auth-fail.t @@ -62,7 +62,6 @@ __DATA__ } --- request GET /t ---- error_code: 500 --- error_log eval qr /insufficient credentials code: 401/ @@ -80,12 +79,16 @@ qr /insufficient credentials code: 401/ } } --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" - prefix: "/apisix" - user: apisix - password: abc123 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + user: apisix + password: abc123 --- request GET /t --- error_log eval diff --git a/t/core/etcd-auth.t b/t/core/etcd-auth.t index f2f322db9b47..448893b264ec 100644 --- a/t/core/etcd-auth.t +++ b/t/core/etcd-auth.t @@ -85,12 +85,16 @@ test_value } } --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" - prefix: "/apisix" - user: apisix - password: abc123 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + user: apisix + password: abc123 --- request GET /t --- no_error_log diff --git a/t/core/etcd-mtls.t b/t/core/etcd-mtls.t index a004aef04711..05b3121f9ffc 100644 --- a/t/core/etcd-mtls.t +++ b/t/core/etcd-mtls.t @@ -24,7 +24,6 @@ if ($out !~ m/function:/) { plan('no_plan'); } - add_block_preprocessor(sub { my ($block) = @_; @@ -39,14 +38,18 @@ __DATA__ === TEST 1: run etcd in init phase --- yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- init_by_lua_block local apisix = require("apisix") apisix.http_init() @@ -90,14 +93,18 @@ init_by_lua:26: 404 === TEST 2: run etcd in init phase (stream) --- yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- stream_init_by_lua_block apisix = require("apisix") apisix.stream_init() @@ -140,14 +147,18 @@ init_by_lua:26: 404 === TEST 3: sync --- extra_yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- config location /t { content_by_lua_block { @@ -196,14 +207,18 @@ waitdir key === TEST 4: sync (stream) --- extra_yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- stream_server_config content_by_lua_block { local core = require("apisix.core") @@ -245,13 +260,17 @@ waitdir key apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key --- init_by_lua_block local apisix = require("apisix") apisix.http_init() diff --git a/t/core/etcd-sync.t b/t/core/etcd-sync.t index a1e674218f91..28a89b21f6cb 100644 --- a/t/core/etcd-sync.t +++ b/t/core/etcd-sync.t @@ -24,9 +24,13 @@ __DATA__ === TEST 1: minus timeout to watch repeatedly --- extra_yaml_config -etcd: - host: - - "http://127.0.0.1:2379" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" --- config location /t { content_by_lua_block { diff --git a/t/core/etcd.t b/t/core/etcd.t index 8d1bdc958920..1b8f25db4ab2 100644 --- a/t/core/etcd.t +++ b/t/core/etcd.t @@ -415,3 +415,23 @@ qr/init_by_lua:\d+: \S+/ init_by_lua:12: ab init_by_lua:19: 200 init_by_lua:26: 404 + + + +=== TEST 8: error handling in server_version +--- config + location /t { + content_by_lua_block { + local etcd_lib = require("resty.etcd") + etcd_lib.new = function() + return nil, "ouch" + end + local etcd = require("apisix.core.etcd") + local res, err = etcd.server_version() + ngx.say(err) + } + } +--- request +GET /t +--- response_body +ouch diff --git a/t/core/grpc.t b/t/core/grpc.t new file mode 100644 index 000000000000..c6d4d7be16a3 --- /dev/null +++ b/t/core/grpc.t @@ -0,0 +1,177 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: unary +--- config + location /t { + content_by_lua_block { + local core = require "apisix.core" + local gcli = core.grpc + assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) + local conn = assert(gcli.connect("127.0.0.1:50051")) + local res, err = conn:call("helloworld.Greeter", "SayHello", { + name = "apisix" }) + conn:close() + if not res then + ngx.status = 503 + ngx.say(err) + return + end + ngx.say(res.message) + } + } +--- response_body +Hello apisix + + + +=== TEST 2: server stream +--- config + location /t { + content_by_lua_block { + local core = require "apisix.core" + local gcli = core.grpc + assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) + local conn = assert(gcli.connect("127.0.0.1:50051")) + local st, err = conn:new_server_stream("helloworld.Greeter", + "SayHelloServerStream", { name = "apisix" }) + if not st then + ngx.status = 503 + ngx.say(err) + return + end + + for i = 1, 5 do + local res, err = st:recv() + if not res then + ngx.status = 503 + ngx.say(err) + return + end + ngx.say(res.message) + end + } + } +--- response_body eval +"Hello apisix\n" x 5 + + + +=== TEST 3: client stream +--- config + location /t { + content_by_lua_block { + local core = require "apisix.core" + local gcli = core.grpc + assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) + local conn = assert(gcli.connect("127.0.0.1:50051")) + local st, err = conn:new_client_stream("helloworld.Greeter", + "SayHelloClientStream", { name = "apisix" }) + if not st then + ngx.status = 503 + ngx.say(err) + return + end + + for i = 1, 3 do + local ok, err = st:send({ name = "apisix" }) + if not ok then + ngx.status = 503 + ngx.say(err) + return + end + end + + local res, err = st:recv_close() + if not res then + ngx.status = 503 + ngx.say(err) + return + end + ngx.say(res.message) + } + } +--- response_body +Hello apisix!Hello apisix!Hello apisix!Hello apisix! + + + +=== TEST 4: bidirectional stream +--- config + location /t { + content_by_lua_block { + local core = require "apisix.core" + local gcli = core.grpc + assert(gcli.load("t/grpc_server_example/proto/helloworld.proto")) + local conn = assert(gcli.connect("127.0.0.1:50051")) + local st, err = conn:new_bidirectional_stream("helloworld.Greeter", + "SayHelloBidirectionalStream", { name = "apisix" }) + if not st then + ngx.status = 503 + ngx.say(err) + return + end + + for i = 1, 3 do + local ok, err = st:send({ name = "apisix" }) + if not ok then + ngx.status = 503 + ngx.say(err) + return + end + end + + assert(st:close_send()) + for i = 1, 5 do + local res, err = st:recv() + if not res then + ngx.status = 503 + ngx.say(err) + return + end + ngx.say(res.message) + end + } + } +--- response_body eval +"Hello apisix\n" x 4 . "stream ended\n" diff --git a/t/core/profile.t b/t/core/profile.t index 663dcf1be457..3e28f9706428 100644 --- a/t/core/profile.t +++ b/t/core/profile.t @@ -32,3 +32,21 @@ __DATA__ --- request GET /t --- error_code: 404 + + + +=== TEST 2: set env "APISIX_PROFILE" to Empty String +--- config + location /t { + content_by_lua_block { + local profile = require("apisix.core.profile") + profile.apisix_home = "./test/" + profile.profile = "" + local local_conf_path = profile:yaml_path("config") + ngx.say(local_conf_path) + } + } +--- request +GET /t +--- response_body +./test/conf/config.yaml diff --git a/t/core/utils.t b/t/core/utils.t index e6c4735d3b01..0d82c93152ad 100644 --- a/t/core/utils.t +++ b/t/core/utils.t @@ -128,8 +128,6 @@ qr/"address":.+,"name":"github.com"/ apisix: node_listen: 1984 enable_server_tokens: false - admin_key: null - --- config location /t { content_by_lua_block { diff --git a/t/debug/debug-mode.t b/t/debug/debug-mode.t index 0fe20a8bb922..bbc1d7457437 100644 --- a/t/debug/debug-mode.t +++ b/t/debug/debug-mode.t @@ -321,12 +321,19 @@ passed "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "ip": "127.0.0.1", - "port": 1995 - } + "protocol_level": 4 } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + } + ] } }]] ) diff --git a/t/deployment/conf_server.t b/t/deployment/conf_server.t index c6a088b380bb..b440591e947f 100644 --- a/t/deployment/conf_server.t +++ b/t/deployment/conf_server.t @@ -14,16 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version =~ m/\/1.17.8/) { - plan(skip_all => "require OpenResty 1.19+"); -} else { - plan('no_plan'); -} +use t::APISIX 'no_plan'; add_block_preprocessor(sub { my ($block) = @_; @@ -38,13 +29,6 @@ add_block_preprocessor(sub { }); -Test::Nginx::Socket::set_http_config_filter(sub { - my $config = shift; - my $snippet = `./t/bin/gen_snippet.lua conf_server`; - $config .= $snippet; - return $config; -}); - run_tests(); __DATA__ @@ -87,11 +71,13 @@ __DATA__ } --- response_body prev_index updated ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: config_provider: etcd + admin: + admin_key: ~ etcd: prefix: "/apisix" host: @@ -111,7 +97,7 @@ deployment: ngx.say(res.body.node.value) } } ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -124,8 +110,6 @@ deployment: - http://[::1]:2379 --- error_log dns resolve localhost, result: ---- no_error_log -[error] --- response_body foo @@ -137,7 +121,7 @@ foo local old_f = resolver.parse_domain local counter = 0 resolver.parse_domain = function (domain) - if domain == "x.com" then + if domain == "localhost" then counter = counter + 1 if counter % 2 == 0 then return "127.0.0.2" @@ -157,7 +141,7 @@ foo ngx.say(res.body.node.value) } } ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -165,12 +149,14 @@ deployment: etcd: prefix: "/apisix" host: - - http://x.com:2379 + # use localhost so the connection is OK in the situation that the DNS + # resolve is not done in APISIX + - http://localhost:2379 --- response_body foo --- error_log -x.com is resolved to: 127.0.0.3 -x.com is resolved to: 127.0.0.2 +localhost is resolved to: 127.0.0.3 +localhost is resolved to: 127.0.0.2 --- no_error_log [error] @@ -178,6 +164,11 @@ x.com is resolved to: 127.0.0.2 === TEST 4: update balancer if the DNS result changed --- extra_init_by_lua + local etcd = require("apisix.core.etcd") + etcd.switch_proxy = function () + return etcd.new() + end + local resolver = require("apisix.core.resolver") local old_f = resolver.parse_domain package.loaded.counter = 0 @@ -218,7 +209,7 @@ x.com is resolved to: 127.0.0.2 end } } ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -234,8 +225,6 @@ OK --- error_log x.com is resolved to: 127.0.0.3 x.com is resolved to: 127.0.0.2 ---- no_error_log -[error] @@ -249,7 +238,7 @@ x.com is resolved to: 127.0.0.2 ngx.say(res.body.node.value) } } ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -294,7 +283,7 @@ server { } --- response_body foo ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -302,11 +291,12 @@ deployment: etcd: prefix: "/apisix" host: + - https://127.0.0.1:12379 - https://localhost:12345 + tls: + verify: false --- error_log Receive SNI: localhost ---- no_error_log -[error] @@ -337,7 +327,7 @@ server { } --- response_body foo ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -345,13 +335,13 @@ deployment: etcd: prefix: "/apisix" host: + - https://127.0.0.1:12379 - https://127.0.0.1:12345 tls: + verify: false sni: "x.com" --- error_log Receive SNI: x.com ---- no_error_log -[error] @@ -377,7 +367,7 @@ server { } --- response_body foo ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -415,7 +405,7 @@ server { } --- response_body foo ---- extra_yaml_config +--- yaml_config deployment: role: traditional role_traditional: @@ -427,3 +417,33 @@ deployment: - http://localhost:12345 --- error_log Receive Host: localhost + + + +=== TEST 10: default timeout +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + local etcd_cli = require("resty.etcd") + local f = etcd_cli.new + local timeout + etcd_cli.new = function(conf) + timeout = conf.timeout + return f(conf) + end + etcd.new() + ngx.say(timeout) + } + } +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +--- response_body +30 diff --git a/t/deployment/conf_server2.t b/t/deployment/conf_server2.t new file mode 100644 index 000000000000..02149053d593 --- /dev/null +++ b/t/deployment/conf_server2.t @@ -0,0 +1,165 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: health check, ensure unhealthy endpoint is skipped +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + if package.loaded.start_to_fail then + ngx.exit(502) + end + } + proxy_pass http://127.0.0.1:2379; + } +} +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + - http://localhost:12345 +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + package.loaded.start_to_fail = true + for i = 1, 7 do + assert(etcd.set("/apisix/test", "foo")) + end + package.loaded.start_to_fail = nil + ngx.say('OK') + } + } +--- response_body +OK +--- error_log +report failure, endpoint: localhost:12345 +endpoint localhost:12345 is unhealthy, skipped + + + +=== TEST 2: health check, all endpoints are unhealthy +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + if package.loaded.start_to_fail then + ngx.exit(502) + end + } + proxy_pass http://127.0.0.1:2379; + } +} +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://localhost:12345 + - http://127.0.0.1:12345 +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + package.loaded.start_to_fail = true + for i = 1, 6 do + etcd.set("/apisix/test", "foo") + end + package.loaded.start_to_fail = nil + local _, err = etcd.set("/apisix/test", "foo") + ngx.say(err) + } + } +--- response_body +invalid response code: 503 +--- error_log +endpoint localhost:12345 is unhealthy, skipped +endpoint 127.0.0.1:12345 is unhealthy, skipped + + + +=== TEST 3: health check, all endpoints recover from unhealthy +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + if package.loaded.start_to_fail then + ngx.exit(502) + end + } + proxy_pass http://127.0.0.1:2379; + } +} +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + health_check_timeout: 1 + prefix: "/apisix" + host: + - http://localhost:12345 + - http://127.0.0.1:12345 +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + package.loaded.start_to_fail = true + for i = 1, 6 do + etcd.set("/apisix/test", "foo") + end + package.loaded.start_to_fail = nil + ngx.sleep(1.2) + local res, err = etcd.set("/apisix/test", "foo") + ngx.say(err or res.body.node.value) + } + } +--- response_body +foo +--- error_log +endpoint localhost:12345 is unhealthy, skipped +endpoint 127.0.0.1:12345 is unhealthy, skipped diff --git a/t/deployment/mtls.t b/t/deployment/mtls.t new file mode 100644 index 000000000000..a0e6cecfac8b --- /dev/null +++ b/t/deployment/mtls.t @@ -0,0 +1,119 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: mTLS for control plane +--- exec +curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:12345/version +--- response_body eval +qr/"etcdserver":/ +--- yaml_config +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: 0.0.0.0:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt + + + +=== TEST 2: no client certificate +--- exec +curl -k https://localhost:12345/version +--- response_body eval +qr/No required SSL certificate was sent/ +--- yaml_config +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: 0.0.0.0:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt + + + +=== TEST 3: wrong client certificate +--- exec +curl --cert t/certs/apisix.crt --key t/certs/apisix.key -k https://localhost:12345/version +--- response_body eval +qr/The SSL certificate error/ +--- yaml_config +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: 0.0.0.0:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt diff --git a/t/discovery/consul_kv.t b/t/discovery/consul_kv.t index 30d40c1fa169..011776eafa0e 100644 --- a/t/discovery/consul_kv.t +++ b/t/discovery/consul_kv.t @@ -81,9 +81,10 @@ _EOC_ our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -194,9 +195,10 @@ routes: --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -250,9 +252,10 @@ skip some keys, return default nodes, get response: missing consul_kv services --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -460,9 +463,10 @@ location /v1/kv { --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -516,9 +520,10 @@ location /sleep { --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: diff --git a/t/discovery/consul_kv_dump.t b/t/discovery/consul_kv_dump.t index 7d4e276ad54c..3b9ad68be1f8 100644 --- a/t/discovery/consul_kv_dump.t +++ b/t/discovery/consul_kv_dump.t @@ -45,10 +45,11 @@ _EOC_ our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -109,10 +110,11 @@ GET /t --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -158,10 +160,11 @@ Configure the invalid consul server addr, and loading the last test 3 generated --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -206,10 +209,11 @@ success --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -266,10 +270,11 @@ test load unexpired /tmp/consul_kv.dump file generated by upper test when initia --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -301,10 +306,11 @@ test load expired ( by check: (dump_file.last_update + dump.expire) < ngx.time ) --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -351,10 +357,11 @@ success --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: @@ -372,10 +379,11 @@ GET /v1/discovery/consul_kv/show_dump_file --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false enable_control: true - +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: consul_kv: servers: diff --git a/t/discovery/dns/mix.t b/t/discovery/dns/mix.t index 5308f4b47ae3..d9c2a503596d 100644 --- a/t/discovery/dns/mix.t +++ b/t/discovery/dns/mix.t @@ -31,8 +31,11 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: # service discovery center dns: servers: diff --git a/t/discovery/dns/sanity.t b/t/discovery/dns/sanity.t index b44ad1edc045..6742223ee294 100644 --- a/t/discovery/dns/sanity.t +++ b/t/discovery/dns/sanity.t @@ -27,8 +27,11 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: # service discovery center dns: servers: @@ -70,8 +73,11 @@ __DATA__ --- yaml_config apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: # service discovery center dns: servers: @@ -114,6 +120,10 @@ upstreams: id: 1 --- response_body hello world +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to [0:0:0:0:0:0:0:1]:1980 @@ -127,6 +137,10 @@ upstreams: id: 1 --- response_body hello world +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1980 @@ -147,9 +161,12 @@ failed to query the DNS server --- yaml_config apisix: node_listen: 1984 - config_center: yaml enable_admin: false enable_resolv_search_option: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: # service discovery center dns: servers: @@ -309,3 +326,115 @@ qr/upstream nodes: \{[^}]+\}/ qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":20|"127.0.0.2:1980":20,"127.0.0.1:1980":60)\}/ --- response_body hello world + + + +=== TEST 16: prefer A than SRV when A is ahead of SRV in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - A + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- error_code: 502 +--- error_log +proxy request to 127.0.0.1:80 + + + +=== TEST 17: Invalid order type in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - B + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- must_die +--- error_log +matches none of the enum values + + + +=== TEST 18: Multiple order type in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - SRV + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- must_die +--- error_log +expected unique items but items 1 and 2 are equal + + + +=== TEST 19: invalid order type in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - a + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- must_die +--- error_log +matches none of the enum values diff --git a/t/discovery/eureka.t b/t/discovery/eureka.t index 9f429f165405..8ee9b1b76337 100644 --- a/t/discovery/eureka.t +++ b/t/discovery/eureka.t @@ -24,8 +24,10 @@ no_shuffle(); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: eureka: host: @@ -60,7 +62,7 @@ GET /eureka/apps/APISIX-EUREKA --- response_body_like .*APISIX-EUREKA.* --- error_log -use config_center: yaml +use config_provider: yaml default_weight:80. fetch_interval:10. eureka uri:http://127.0.0.1:8761/eureka/. @@ -110,7 +112,7 @@ GET /eureka-test/eureka/apps/APISIX-EUREKA --- response_body_like .*APISIX-EUREKA.* --- error_log -use config_center: yaml +use config_provider: yaml default_weight:80. fetch_interval:10. eureka uri:http://127.0.0.1:8761/eureka/. diff --git a/t/discovery/nacos.t b/t/discovery/nacos.t index 98c309c78920..9acd3b5a21e0 100644 --- a/t/discovery/nacos.t +++ b/t/discovery/nacos.t @@ -25,8 +25,10 @@ workers(4); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: nacos: host: @@ -44,8 +46,10 @@ _EOC_ our $yaml_auth_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: nacos: host: diff --git a/t/discovery/nacos2.t b/t/discovery/nacos2.t index 51365c79307d..bb17fb68759f 100644 --- a/t/discovery/nacos2.t +++ b/t/discovery/nacos2.t @@ -38,8 +38,10 @@ __DATA__ --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: nacos: host: @@ -223,7 +225,6 @@ done --- yaml_config apisix: node_listen: 1984 - admin_key: null --- extra_yaml_config discovery: nacos: diff --git a/t/error_page/error_page.t b/t/error_page/error_page.t index 70cc34c68af9..d6ec79a0093a 100644 --- a/t/error_page/error_page.t +++ b/t/error_page/error_page.t @@ -136,12 +136,8 @@ X-Test-Status: 500 content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -198,3 +194,58 @@ qr/(stash|fetch) ngx ctx/ --- grep_error_log_out stash ngx ctx fetch ngx ctx + + + +=== TEST 11: check if the phases after proxy are run when 500 happens before proxy +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function() if ngx.var.http_x_test_status ~= nil then;ngx.exit(tonumber(ngx.var.http_x_test_status));end;end"] + }, + "serverless-pre-function": { + "phase": "log", + "functions" : ["return function() ngx.log(ngx.WARN, 'run log phase in error_page') end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 12: hit +--- request +GET /hello +--- more_headers +X-Test-Status: 500 +--- error_code: 500 +--- response_body_like +.*apisix.apache.org.* +--- error_log +run log phase in error_page diff --git a/t/fuzzing/client_abort.py b/t/fuzzing/client_abort.py index 707297eaab64..3a75442a32a5 100755 --- a/t/fuzzing/client_abort.py +++ b/t/fuzzing/client_abort.py @@ -24,7 +24,7 @@ from public import check_leak, run_test def create_route(): - command = '''curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/client_abort", "upstream": { diff --git a/t/fuzzing/http_upstream.py b/t/fuzzing/http_upstream.py new file mode 100755 index 000000000000..877f298cdd5e --- /dev/null +++ b/t/fuzzing/http_upstream.py @@ -0,0 +1,89 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file provides a fuzzing test with different upstreams +import http.client +import json +import random +import threading +from public import check_leak, run_test, connect_admin + + +REQ_PER_THREAD = 50 +THREADS_NUM = 4 +TOTOL_ROUTES = 10 + + +def create_route(): + for i in range(TOTOL_ROUTES): + conn = connect_admin() + scheme = "http" if i % 2 == 0 else "https" + port = ":6666" if i % 2 == 0 else ":6667" + suffix = str(i + 1) + i = str(i) + conf = json.dumps({ + "uri": "/*", + "host": "test" + i + ".com", + "plugins": { + }, + "upstream": { + "scheme": scheme, + "nodes": { + "127.0.0." + suffix + port: 1 + }, + "type": "roundrobin" + }, + }) + + conn.request("PUT", "/apisix/admin/routes/" + i, conf, + headers={ + "X-API-KEY":"edd1c9f034335f136f87ad84b625c8f1", + }) + response = conn.getresponse() + assert response.status <= 300, response.read() + +def req(): + route_id = random.randrange(TOTOL_ROUTES) + conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn.request("GET", "/server_addr", + headers={ + "Host":"test" + str(route_id) + ".com", + }) + response = conn.getresponse() + assert response.status == 200, response.read() + ip = response.read().rstrip().decode() + suffix = str(route_id + 1) + assert "127.0.0." + suffix == ip, f"expect: 127.0.0.{suffix}, actual: {ip}" + +def run_in_thread(): + for i in range(REQ_PER_THREAD): + req() + +@check_leak +def run(): + th = [threading.Thread(target=run_in_thread) for i in range(THREADS_NUM)] + for t in th: + t.start() + for t in th: + t.join() + + +if __name__ == "__main__": + run_test(create_route, run) + diff --git a/t/fuzzing/public.py b/t/fuzzing/public.py index 500b3d39b222..0897ec476bbe 100644 --- a/t/fuzzing/public.py +++ b/t/fuzzing/public.py @@ -30,6 +30,10 @@ def apisix_pwd(): return os.environ.get("APISIX_FUZZING_PWD") or \ (str(Path.home()) + "/work/apisix/apisix") +def connect_admin(): + conn = http.client.HTTPConnection("127.0.0.1", port=9180) + return conn + def check_log(): boofuzz_log = cur_dir() + "/test.log" apisix_errorlog = apisix_pwd() + "/logs/error.log" diff --git a/t/fuzzing/serverless_route_test.py b/t/fuzzing/serverless_route_test.py index e84085f574e0..564914734877 100644 --- a/t/fuzzing/serverless_route_test.py +++ b/t/fuzzing/serverless_route_test.py @@ -22,7 +22,7 @@ from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_size, s_static, s_string def create_route(): - command = '''curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/post*", "methods": ["POST"], diff --git a/t/fuzzing/simple_http.py b/t/fuzzing/simple_http.py index f2d2099f7ce5..b3db2027e3f2 100755 --- a/t/fuzzing/simple_http.py +++ b/t/fuzzing/simple_http.py @@ -22,7 +22,7 @@ import json import random import threading -from public import check_leak, LEAK_COUNT, run_test +from public import check_leak, LEAK_COUNT, run_test, connect_admin REQ_PER_THREAD = 50 @@ -40,7 +40,7 @@ def create_route(): } } }) - conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn = connect_admin() conn.request("PUT", "/apisix/admin/consumers", conf, headers={ "X-API-KEY":"edd1c9f034335f136f87ad84b625c8f1", @@ -49,7 +49,7 @@ def create_route(): assert response.status <= 300, response.read() for i in range(TOTOL_ROUTES): - conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn = connect_admin() i = str(i) conf = json.dumps({ "uri": "/*", diff --git a/t/fuzzing/simpleroute_test.py b/t/fuzzing/simpleroute_test.py index 8a7e43119810..9ea56ce693b1 100755 --- a/t/fuzzing/simpleroute_test.py +++ b/t/fuzzing/simpleroute_test.py @@ -22,7 +22,7 @@ from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_static, s_string def create_route(): - command = '''curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/get*", "methods": ["GET"], diff --git a/t/fuzzing/upstream/nginx.conf b/t/fuzzing/upstream/nginx.conf index 3c6405370028..7a94517ce069 100644 --- a/t/fuzzing/upstream/nginx.conf +++ b/t/fuzzing/upstream/nginx.conf @@ -53,6 +53,23 @@ http { ngx.sleep(tonumber(ngx.var.arg_seconds or 1)) } } + + location /server_addr { + content_by_lua_block { + ngx.say(ngx.var.server_addr) + } + } } + server { + listen 6667 ssl; + ssl_certificate ../../certs/apisix.crt; + ssl_certificate_key ../../certs/apisix.key; + + location /server_addr { + content_by_lua_block { + ngx.say(ngx.var.server_addr) + } + } + } } diff --git a/t/fuzzing/vars_route_test.py b/t/fuzzing/vars_route_test.py index c2559fe3dc39..dc8325484c3c 100644 --- a/t/fuzzing/vars_route_test.py +++ b/t/fuzzing/vars_route_test.py @@ -22,7 +22,7 @@ from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_static, s_string def create_route(): - command = '''curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/parameter*", "vars": [ diff --git a/t/kubernetes/discovery/kubernetes.t b/t/kubernetes/discovery/kubernetes.t index 4a7b8573bae3..5f1171b727b4 100644 --- a/t/kubernetes/discovery/kubernetes.t +++ b/t/kubernetes/discovery/kubernetes.t @@ -14,74 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # - -BEGIN { - my $token_var_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"; - my $token_from_var = eval {`cat $token_var_file 2>/dev/null`}; - if ($token_from_var) { - - our $yaml_config = <<_EOC_; -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: {} -_EOC_ - our $token_file = $token_var_file; - our $token_value = $token_from_var; - - } - - my $token_tmp_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; - my $token_from_tmp = eval {`cat $token_tmp_file 2>/dev/null`}; - if ($token_from_tmp) { - - our $yaml_config = <<_EOC_; -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: - client: - token_file: /tmp/var/run/secrets/kubernetes.io/serviceaccount/token -_EOC_ - our $token_file = $token_tmp_file; - our $token_value = $token_from_tmp; - } - - our $scale_ns_c = <<_EOC_; -[ - { - "op": "replace_subsets", - "name": "ep", - "namespace": "ns-c", - "subsets": [ - { - "addresses": [ - { - "ip": "10.0.0.1" - } - ], - "ports": [ - { - "name": "p1", - "port": 5001 - } - ] - } - ] - } -] -_EOC_ - -} - use t::APISIX 'no_plan'; repeat_each(1); -log_level('debug'); +log_level('warn'); no_root_location(); no_shuffle(); workers(4); @@ -96,619 +32,257 @@ _EOC_ $block->set_value("apisix_yaml", $apisix_yaml); - my $main_config = $block->main_config // <<_EOC_; -env KUBERNETES_SERVICE_HOST=127.0.0.1; -env KUBERNETES_SERVICE_PORT=6443; -env KUBERNETES_CLIENT_TOKEN=$::token_value; -env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; -_EOC_ - - $block->set_value("main_config", $main_config); - my $config = $block->config // <<_EOC_; - location /queries { - content_by_lua_block { - local core = require("apisix.core") - local d = require("apisix.discovery.kubernetes") - - ngx.sleep(1) - - ngx.req.read_body() - local request_body = ngx.req.get_body_data() - local queries = core.json.decode(request_body) - local response_body = "{" - for _,query in ipairs(queries) do - local nodes = d.nodes(query) - if nodes==nil or #nodes==0 then - response_body=response_body.." "..0 - else - response_body=response_body.." "..#nodes - end - end - ngx.say(response_body.." }") - } - } - location /operators { + location /compare { content_by_lua_block { local http = require("resty.http") local core = require("apisix.core") - local ipairs = ipairs - - ngx.req.read_body() - local request_body = ngx.req.get_body_data() - local operators = core.json.decode(request_body) - - core.log.info("get body ", request_body) - core.log.info("get operators ", #operators) - for _, op in ipairs(operators) do - local method, path, body - local headers = { - ["Host"] = "127.0.0.1:6445" - } - - if op.op == "replace_subsets" then - method = "PATCH" - path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name - if #op.subsets == 0 then - body = '[{"path":"/subsets","op":"replace","value":[]}]' - else - local t = { { op = "replace", path = "/subsets", value = op.subsets } } - body = core.json.encode(t, true) + local local_conf = require("apisix.core.config_local").local_conf() + + local function deep_compare(tbl1, tbl2) + if tbl1 == tbl2 then + return true + elseif type(tbl1) == "table" and type(tbl2) == "table" then + for key1, value1 in pairs(tbl1) do + local value2 = tbl2[key1] + if value2 == nil then + -- avoid the type call for missing keys in tbl2 by directly comparing with nil + return false + elseif value1 ~= value2 then + if type(value1) == "table" and type(value2) == "table" then + if not deep_compare(value1, value2) then + return false + end + else + return false + end + end + end + for key2, _ in pairs(tbl2) do + if tbl1[key2] == nil then + return false + end end - headers["Content-Type"] = "application/json-patch+json" + return true end - if op.op == "replace_labels" then - method = "PATCH" - path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name - local t = { { op = "replace", path = "/metadata/labels", value = op.labels } } - body = core.json.encode(t, true) - headers["Content-Type"] = "application/json-patch+json" - end + return false + end - local httpc = http.new() - core.log.info("begin to connect ", "127.0.0.1:6445") - local ok, message = httpc:connect({ - scheme = "http", - host = "127.0.0.1", - port = 6445, - }) - if not ok then - core.log.error("connect 127.0.0.1:6445 failed, message : ", message) - ngx.say("FAILED") - end - local res, err = httpc:request({ - method = method, - path = path, - headers = headers, - body = body, - }) - if err ~= nil then - core.log.err("operator k8s cluster error: ", err) - return 500 - end - if res.status ~= 200 and res.status ~= 201 and res.status ~= 409 then - return res.status - end + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local expect = core.json.decode(request_body) + local current = local_conf.discovery.kubernetes + if deep_compare(expect,current) then + ngx.say("true") + else + ngx.say("false, current is ",core.json.encode(current,true)) end - ngx.say("DONE") } } _EOC_ $block->set_value("config", $config); + }); run_tests(); __DATA__ -=== TEST 1: create namespace and endpoints ---- yaml_config eval: $::yaml_config ---- request -POST /operators -[ - { - "op": "replace_subsets", - "namespace": "ns-a", - "name": "ep", - "subsets": [ - { - "addresses": [ - { - "ip": "10.0.0.1" - }, - { - "ip": "10.0.0.2" - } - ], - "ports": [ - { - "name": "p1", - "port": 5001 - } - ] - }, - { - "addresses": [ - { - "ip": "20.0.0.1" - }, - { - "ip": "20.0.0.2" - } - ], - "ports": [ - { - "name": "p2", - "port": 5002 - } - ] - } - ] - }, - { - "op": "create_namespace", - "name": "ns-b" - }, - { - "op": "replace_subsets", - "namespace": "ns-b", - "name": "ep", - "subsets": [ - { - "addresses": [ - { - "ip": "10.0.0.1" - }, - { - "ip": "10.0.0.2" - } - ], - "ports": [ - { - "name": "p1", - "port": 5001 - } - ] - }, - { - "addresses": [ - { - "ip": "20.0.0.1" - }, - { - "ip": "20.0.0.2" - } - ], - "ports": [ - { - "name": "p2", - "port": 5002 - } - ] - } - ] - }, - { - "op": "create_namespace", - "name": "ns-c" - }, - { - "op": "replace_subsets", - "namespace": "ns-c", - "name": "ep", - "subsets": [ - { - "addresses": [ - { - "ip": "10.0.0.1" - }, - { - "ip": "10.0.0.2" - } - ], - "ports": [ - { - "port": 5001 - } - ] - }, - { - "addresses": [ - { - "ip": "20.0.0.1" - }, - { - "ip": "20.0.0.2" - } - ], - "ports": [ - { - "port": 5002 - } - ] - } - ] - } -] ---- more_headers -Content-type: application/json ---- error_code: 200 ---- no_error_log -[error] - - - -=== TEST 2: use default parameters ---- yaml_config eval: $::yaml_config ---- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] ---- more_headers -Content-type: application/json ---- response_body eval -qr{ 2 2 2 2 2 2 } ---- no_error_log -[error] - - - -=== TEST 3: use specify parameters +=== TEST 1: default value with minimal configuration --- yaml_config apisix: node_listen: 1984 config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: - kubernetes: - service: - host: "127.0.0.1" - port: "6443" - client: - token: "${KUBERNETES_CLIENT_TOKEN}" ---- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] ---- more_headers -Content-type: application/json ---- response_body eval -qr{ 2 2 2 2 2 2 } ---- no_error_log -[error] - - - -=== TEST 4: use specify environment parameters ---- yaml_config -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: - service: - host: ${KUBERNETES_SERVICE_HOST} - port: ${KUBERNETES_SERVICE_PORT} - client: - token: ${KUBERNETES_CLIENT_TOKEN} + kubernetes: {} --- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] +GET /compare +{ + "service": { + "schema": "https", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token" + }, + "shared_size": "1m", + "default_weight": 50 +} --- more_headers Content-type: application/json ---- response_body eval -qr{ 2 2 2 2 2 2 } ---- no_error_log -[error] +--- response_body +true -=== TEST 5: use token_file +=== TEST 2: default value with minimal service and client configuration --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: kubernetes: - client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + service: {} + client: {} --- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] +GET /compare +{ + "service": { + "schema": "https", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token" + }, + "shared_size": "1m", + "default_weight": 50 +} --- more_headers Content-type: application/json ---- response_body eval -qr{ 2 2 2 2 2 2 } ---- no_error_log -[error] +--- response_body +true -=== TEST 6: use http +=== TEST 3: mixing set custom and default values --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: kubernetes: service: - schema: http - host: "127.0.0.1" - port: "6445" - client: - token: "" ---- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] ---- more_headers -Content-type: application/json ---- response_body eval -qr{ 2 2 2 2 2 2 } ---- no_error_log -[error] - - - -=== TEST 7: use namespace selector equal ---- yaml_config -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: - client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} - namespace_selector: - equal: ns-a ---- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] ---- more_headers -Content-type: application/json ---- response_body eval -qr{ 2 2 0 0 0 0 } ---- no_error_log -[error] - - - -=== TEST 8: use namespace selector not_equal ---- yaml_config -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: - client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} - namespace_selector: - not_equal: ns-a ---- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] ---- more_headers -Content-type: application/json ---- response_body eval -qr{ 0 0 2 2 2 2 } ---- no_error_log -[error] - - - -=== TEST 9: use namespace selector match ---- yaml_config -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: - client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} - namespace_selector: - match: [ns-a,ns-b] + host: "sample.com" + shared_size: "2m" --- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] +GET /compare +{ + "service": { + "schema": "https", + "host": "sample.com", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token_file" : "/var/run/secrets/kubernetes.io/serviceaccount/token" + }, + "shared_size": "2m", + "default_weight": 50 +} --- more_headers Content-type: application/json ---- response_body eval -qr{ 2 2 2 2 0 0 } ---- no_error_log -[error] +--- response_body +true -=== TEST 10: use namespace selector match with regex +=== TEST 4: mixing set custom and default values --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: kubernetes: + service: + schema: "http" client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} - namespace_selector: - match: ["ns-[ab]"] + token: "test" + default_weight: 33 --- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] +GET /compare +{ + "service": { + "schema": "http", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token": "test" + }, + "shared_size": "1m", + "default_weight": 33 +} --- more_headers Content-type: application/json ---- response_body eval -qr{ 2 2 2 2 0 0 } ---- no_error_log -[error] +--- response_body +true -=== TEST 11: use namespace selector not_match +=== TEST 5: multi cluster mode configuration --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: kubernetes: + - id: "debug" + service: + host: "1.cluster.com" + port: "6445" client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} - namespace_selector: - not_match: ["ns-a"] ---- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] ---- more_headers -Content-type: application/json ---- response_body eval -qr{ 0 0 2 2 2 2 } ---- no_error_log -[error] - - - -=== TEST 12: use namespace selector not_match with regex ---- yaml_config -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: + token: "token" + - id: "release" + service: + schema: "http" + host: "2.cluster.com" + port: "${MyPort}" client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} - namespace_selector: - not_match: ["ns-[ab]"] + token_file: "/var/token" + default_weight: 33 + shared_size: "2m" --- request -GET /queries -["ns-a/ep:p1","ns-a/ep:p2","ns-b/ep:p1","ns-b/ep:p2","ns-c/ep:5001","ns-c/ep:5002"] ---- more_headers -Content-type: application/json ---- response_body eval -qr{ 0 0 0 0 2 2 } ---- no_error_log -[error] - - - -=== TEST 13: use label selector ---- yaml_config -apisix: - node_listen: 1984 - config_center: yaml - enable_admin: false -discovery: - kubernetes: - client: - token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} - label_selector: |- - first=1,second ---- request eval +GET /compare [ - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{}}]", - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{}}]", - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{}}]", - -"GET /queries -[\"ns-a/ep:p1\",\"ns-b/ep:p1\",\"ns-c/ep:5001\"]", - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{\"first\":\"1\" }}]", - -"GET /queries -[\"ns-a/ep:p1\",\"ns-b/ep:p1\",\"ns-c/ep:5001\"]", - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", - -"GET /queries -[\"ns-a/ep:p1\",\"ns-b/ep:p1\",\"ns-c/ep:5001\"]", - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"2\",\"second\":\"o\" }}]", - -"GET /queries -[\"ns-a/ep:p1\",\"ns-b/ep:p1\",\"ns-c/ep:5001\"]", - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\" }}]", - -"GET /queries -[\"ns-a/ep:p1\",\"ns-b/ep:p1\",\"ns-c/ep:5001\"]", - -"POST /operators -[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", - -"GET /queries -[\"ns-a/ep:p1\",\"ns-b/ep:p1\",\"ns-c/ep:5001\"]", - -] ---- response_body eval -[ - "DONE\n", - "DONE\n", - "DONE\n", - "{ 0 0 0 }\n", - "DONE\n", - "{ 0 0 0 }\n", - "DONE\n", - "{ 0 2 0 }\n", - "DONE\n", - "{ 0 2 0 }\n", - "DONE\n", - "{ 0 2 0 }\n", - "DONE\n", - "{ 0 2 2 }\n", -] ---- no_error_log -[error] - - - -=== TEST 14: scale endpoints ---- yaml_config eval: $::yaml_config ---- request eval -[ -"GET /queries -[\"ns-a/ep:p1\",\"ns-a/ep:p2\"]", - -"POST /operators -[{\"op\":\"replace_subsets\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"subsets\":[]}]", - -"GET /queries -[\"ns-a/ep:p1\",\"ns-a/ep:p2\"]", - -"GET /queries -[\"ns-c/ep:5001\",\"ns-c/ep:5002\",\"ns-c/ep:p1\"]", - -"POST /operators -$::scale_ns_c", - -"GET /queries -[\"ns-c/ep:5001\",\"ns-c/ep:5002\",\"ns-c/ep:p1\"]", - -] ---- response_body eval -[ - "{ 2 2 }\n", - "DONE\n", - "{ 0 0 }\n", - "{ 2 2 0 }\n", - "DONE\n", - "{ 0 0 1 }\n", + { + "id": "debug", + "service": { + "schema": "https", + "host": "1.cluster.com", + "port": "6445" + }, + "client": { + "token": "token" + }, + "default_weight": 50, + "shared_size": "1m" + }, + { + "id": "release", + "service": { + "schema": "http", + "host": "2.cluster.com", + "port": "${MyPort}" + }, + "client": { + "token_file": "/var/token" + }, + "default_weight": 33, + "shared_size": "2m" + } ] ---- no_error_log -[error] +--- more_headers +Content-type: application/json +--- response_body +true diff --git a/t/kubernetes/discovery/kubernetes2.t b/t/kubernetes/discovery/kubernetes2.t new file mode 100644 index 000000000000..611223ca17cb --- /dev/null +++ b/t/kubernetes/discovery/kubernetes2.t @@ -0,0 +1,755 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + our $token_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; + our $token_value = eval {`cat $token_file 2>/dev/null`}; + + our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: "127.0.0.1" + port: "6443" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + +_EOC_ + + our $scale_ns_c = <<_EOC_; +[ + { + "op": "replace_subsets", + "name": "ep", + "namespace": "ns-c", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + } + ] + } +] +_EOC_ + +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $main_config = $block->main_config // <<_EOC_; +env KUBERNETES_SERVICE_HOST=127.0.0.1; +env KUBERNETES_SERVICE_PORT=6443; +env KUBERNETES_CLIENT_TOKEN=$::token_value; +env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $config = $block->config // <<_EOC_; + location /queries { + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.kubernetes") + + ngx.sleep(1) + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local queries = core.json.decode(request_body) + local response_body = "{" + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + + location /operators { + content_by_lua_block { + local http = require("resty.http") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local operators = core.json.decode(request_body) + + core.log.info("get body ", request_body) + core.log.info("get operators ", #operators) + for _, op in ipairs(operators) do + local method, path, body + local headers = { + ["Host"] = "127.0.0.1:6445" + } + + if op.op == "replace_subsets" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + if #op.subsets == 0 then + body = '[{"path":"/subsets","op":"replace","value":[]}]' + else + local t = { { op = "replace", path = "/subsets", value = op.subsets } } + body = core.json.encode(t, true) + end + headers["Content-Type"] = "application/json-patch+json" + end + + if op.op == "replace_labels" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + local t = { { op = "replace", path = "/metadata/labels", value = op.labels } } + body = core.json.encode(t, true) + headers["Content-Type"] = "application/json-patch+json" + end + + local httpc = http.new() + core.log.info("begin to connect ", "127.0.0.1:6445") + local ok, message = httpc:connect({ + scheme = "http", + host = "127.0.0.1", + port = 6445, + }) + if not ok then + core.log.error("connect 127.0.0.1:6445 failed, message : ", message) + ngx.say("FAILED") + end + local res, err = httpc:request({ + method = method, + path = path, + headers = headers, + body = body, + }) + if err ~= nil then + core.log.err("operator k8s cluster error: ", err) + return 500 + end + if res.status ~= 200 and res.status ~= 201 and res.status ~= 409 then + return res.status + end + end + ngx.say("DONE") + } + } + +_EOC_ + + $block->set_value("config", $config); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create namespace and endpoints +--- yaml_config eval: $::yaml_config +--- request +POST /operators +[ + { + "op": "replace_subsets", + "namespace": "ns-a", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-b" + }, + { + "op": "replace_subsets", + "namespace": "ns-b", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-c" + }, + { + "op": "replace_subsets", + "namespace": "ns-c", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "port": 5002 + } + ] + } + ] + } +] +--- more_headers +Content-type: application/json + + + +=== TEST 2: use default parameters +--- yaml_config eval: $::yaml_config +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 3: use specify environment parameters +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token: ${KUBERNETES_CLIENT_TOKEN} + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} + +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 4: use namespace selector equal +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + equal: ns-a + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 0 0 0 0 2 2 2 2 2 2 } + + + +=== TEST 5: use namespace selector not_equal +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_equal: ns-a + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 6: use namespace selector match +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + match: [ns-a,ns-b] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 0 0 2 2 2 2 2 2 } + + + +=== TEST 7: use namespace selector match with regex +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + match: ["ns-[ab]"] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 0 0 2 2 2 2 2 2 } + + + +=== TEST 8: use namespace selector not_match +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_match: ["ns-a"] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 9: use namespace selector not_match with regex +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_match: ["ns-[ab]"] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 0 0 2 2 2 2 2 2 2 2 } + + + +=== TEST 10: use label selector +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + label_selector: |- + first=1,second + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request eval +[ + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{}}]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{}}]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{}}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{\"first\":\"1\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"2\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +] +--- response_body eval +[ + "DONE\n", + "DONE\n", + "DONE\n", + "{ 0 0 0 }\n", + "DONE\n", + "{ 0 0 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 2 }\n", +] + + + +=== TEST 11: scale endpoints +--- yaml_config eval: $::yaml_config +--- request eval +[ + +"GET /queries +[ + \"first/ns-a/ep:p1\",\"first/ns-a/ep:p2\", + \"second/ns-a/ep:p1\",\"second/ns-a/ep:p2\" +]", + +"POST /operators +[{\"op\":\"replace_subsets\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"subsets\":[]}]", + +"GET /queries +[ + \"first/ns-a/ep:p1\",\"first/ns-a/ep:p2\", + \"second/ns-a/ep:p1\",\"second/ns-a/ep:p2\" +]", + +"GET /queries +[ + \"first/ns-c/ep:5001\",\"first/ns-c/ep:5002\",\"first/ns-c/ep:p1\", + \"second/ns-c/ep:5001\",\"second/ns-c/ep:5002\",\"second/ns-c/ep:p1\" +]", + +"POST /operators +$::scale_ns_c", + +"GET /queries +[ + \"first/ns-c/ep:5001\",\"first/ns-c/ep:5002\",\"first/ns-c/ep:p1\", + \"second/ns-c/ep:5001\",\"second/ns-c/ep:5002\",\"second/ns-c/ep:p1\" +]" + +] +--- response_body eval +[ + "{ 2 2 2 2 }\n", + "DONE\n", + "{ 0 0 0 0 }\n", + "{ 2 2 0 2 2 0 }\n", + "DONE\n", + "{ 0 0 1 0 0 1 }\n", +] diff --git a/t/kubernetes/discovery/kubernetes3.t b/t/kubernetes/discovery/kubernetes3.t new file mode 100644 index 000000000000..611223ca17cb --- /dev/null +++ b/t/kubernetes/discovery/kubernetes3.t @@ -0,0 +1,755 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + our $token_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; + our $token_value = eval {`cat $token_file 2>/dev/null`}; + + our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: "127.0.0.1" + port: "6443" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + +_EOC_ + + our $scale_ns_c = <<_EOC_; +[ + { + "op": "replace_subsets", + "name": "ep", + "namespace": "ns-c", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + } + ] + } +] +_EOC_ + +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $main_config = $block->main_config // <<_EOC_; +env KUBERNETES_SERVICE_HOST=127.0.0.1; +env KUBERNETES_SERVICE_PORT=6443; +env KUBERNETES_CLIENT_TOKEN=$::token_value; +env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $config = $block->config // <<_EOC_; + location /queries { + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.kubernetes") + + ngx.sleep(1) + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local queries = core.json.decode(request_body) + local response_body = "{" + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + + location /operators { + content_by_lua_block { + local http = require("resty.http") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local operators = core.json.decode(request_body) + + core.log.info("get body ", request_body) + core.log.info("get operators ", #operators) + for _, op in ipairs(operators) do + local method, path, body + local headers = { + ["Host"] = "127.0.0.1:6445" + } + + if op.op == "replace_subsets" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + if #op.subsets == 0 then + body = '[{"path":"/subsets","op":"replace","value":[]}]' + else + local t = { { op = "replace", path = "/subsets", value = op.subsets } } + body = core.json.encode(t, true) + end + headers["Content-Type"] = "application/json-patch+json" + end + + if op.op == "replace_labels" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + local t = { { op = "replace", path = "/metadata/labels", value = op.labels } } + body = core.json.encode(t, true) + headers["Content-Type"] = "application/json-patch+json" + end + + local httpc = http.new() + core.log.info("begin to connect ", "127.0.0.1:6445") + local ok, message = httpc:connect({ + scheme = "http", + host = "127.0.0.1", + port = 6445, + }) + if not ok then + core.log.error("connect 127.0.0.1:6445 failed, message : ", message) + ngx.say("FAILED") + end + local res, err = httpc:request({ + method = method, + path = path, + headers = headers, + body = body, + }) + if err ~= nil then + core.log.err("operator k8s cluster error: ", err) + return 500 + end + if res.status ~= 200 and res.status ~= 201 and res.status ~= 409 then + return res.status + end + end + ngx.say("DONE") + } + } + +_EOC_ + + $block->set_value("config", $config); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create namespace and endpoints +--- yaml_config eval: $::yaml_config +--- request +POST /operators +[ + { + "op": "replace_subsets", + "namespace": "ns-a", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-b" + }, + { + "op": "replace_subsets", + "namespace": "ns-b", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-c" + }, + { + "op": "replace_subsets", + "namespace": "ns-c", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "port": 5002 + } + ] + } + ] + } +] +--- more_headers +Content-type: application/json + + + +=== TEST 2: use default parameters +--- yaml_config eval: $::yaml_config +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 3: use specify environment parameters +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token: ${KUBERNETES_CLIENT_TOKEN} + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} + +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 4: use namespace selector equal +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + equal: ns-a + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 0 0 0 0 2 2 2 2 2 2 } + + + +=== TEST 5: use namespace selector not_equal +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_equal: ns-a + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 6: use namespace selector match +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + match: [ns-a,ns-b] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 0 0 2 2 2 2 2 2 } + + + +=== TEST 7: use namespace selector match with regex +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + match: ["ns-[ab]"] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 0 0 2 2 2 2 2 2 } + + + +=== TEST 8: use namespace selector not_match +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_match: ["ns-a"] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 9: use namespace selector not_match with regex +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_match: ["ns-[ab]"] + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 0 0 2 2 2 2 2 2 2 2 } + + + +=== TEST 10: use label selector +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + label_selector: |- + first=1,second + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request eval +[ + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{}}]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{}}]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{}}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{\"first\":\"1\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"2\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +] +--- response_body eval +[ + "DONE\n", + "DONE\n", + "DONE\n", + "{ 0 0 0 }\n", + "DONE\n", + "{ 0 0 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 2 }\n", +] + + + +=== TEST 11: scale endpoints +--- yaml_config eval: $::yaml_config +--- request eval +[ + +"GET /queries +[ + \"first/ns-a/ep:p1\",\"first/ns-a/ep:p2\", + \"second/ns-a/ep:p1\",\"second/ns-a/ep:p2\" +]", + +"POST /operators +[{\"op\":\"replace_subsets\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"subsets\":[]}]", + +"GET /queries +[ + \"first/ns-a/ep:p1\",\"first/ns-a/ep:p2\", + \"second/ns-a/ep:p1\",\"second/ns-a/ep:p2\" +]", + +"GET /queries +[ + \"first/ns-c/ep:5001\",\"first/ns-c/ep:5002\",\"first/ns-c/ep:p1\", + \"second/ns-c/ep:5001\",\"second/ns-c/ep:5002\",\"second/ns-c/ep:p1\" +]", + +"POST /operators +$::scale_ns_c", + +"GET /queries +[ + \"first/ns-c/ep:5001\",\"first/ns-c/ep:5002\",\"first/ns-c/ep:p1\", + \"second/ns-c/ep:5001\",\"second/ns-c/ep:5002\",\"second/ns-c/ep:p1\" +]" + +] +--- response_body eval +[ + "{ 2 2 2 2 }\n", + "DONE\n", + "{ 0 0 0 0 }\n", + "{ 2 2 0 2 2 0 }\n", + "DONE\n", + "{ 0 0 1 0 0 1 }\n", +] diff --git a/t/lib/ext-plugin.lua b/t/lib/ext-plugin.lua index 33bb32b15ed5..6403e74f5c5b 100644 --- a/t/lib/ext-plugin.lua +++ b/t/lib/ext-plugin.lua @@ -36,6 +36,7 @@ local extra_info_req = require("A6.ExtraInfo.Req") local extra_info_var = require("A6.ExtraInfo.Var") local extra_info_resp = require("A6.ExtraInfo.Resp") local extra_info_reqbody = require("A6.ExtraInfo.ReqBody") +local extra_info_respbody = require("A6.ExtraInfo.RespBody") local _M = {} local builder = flatbuffers.Builder(0) @@ -55,6 +56,101 @@ local function build_action(action, ty) end +local function ask_extra_info(sock, case_extra_info) + local data + for _, action in ipairs(case_extra_info) do + if action.type == "closed" then + ngx.exit(-1) + return + end + + if action.type == "var" then + local name = builder:CreateString(action.name) + extra_info_var.Start(builder) + extra_info_var.AddName(builder, name) + local var_req = extra_info_var.End(builder) + build_extra_info(var_req, extra_info.Var) + local req = extra_info_req.End(builder) + builder:Finish(req) + data = builder:Output() + local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) + if not ok then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, "send extra info req successfully") + + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + + assert(ty == constants.RPC_EXTRA_INFO, ty) + local buf = flatbuffers.binaryArray.New(data) + local resp = extra_info_resp.GetRootAsResp(buf, 0) + local res = resp:ResultAsString() + assert(res == action.result, res) + end + + if action.type == "reqbody" then + extra_info_reqbody.Start(builder) + local reqbody_req = extra_info_reqbody.End(builder) + build_extra_info(reqbody_req, extra_info.ReqBody) + local req = extra_info_req.End(builder) + builder:Finish(req) + data = builder:Output() + local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) + if not ok then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, "send extra info req successfully") + + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + + assert(ty == constants.RPC_EXTRA_INFO, ty) + local buf = flatbuffers.binaryArray.New(data) + local resp = extra_info_resp.GetRootAsResp(buf, 0) + local res = resp:ResultAsString() + assert(res == action.result, res) + end + + if action.type == "respbody" then + extra_info_respbody.Start(builder) + local respbody_req = extra_info_respbody.End(builder) + build_extra_info(respbody_req, extra_info.RespBody) + local req = extra_info_req.End(builder) + builder:Finish(req) + data = builder:Output() + local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) + if not ok then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, "send extra info req successfully") + + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + + assert(ty == constants.RPC_EXTRA_INFO, ty) + local buf = flatbuffers.binaryArray.New(data) + local resp = extra_info_resp.GetRootAsResp(buf, 0) + local res = resp:ResultAsString() + assert(res == action.result, res) + end + end + +end + + function _M.go(case) local sock = ngx.req.socket(true) local ty, data = ext.receive(sock) @@ -178,68 +274,7 @@ function _M.go(case) end if case.extra_info then - for _, action in ipairs(case.extra_info) do - if action.type == "closed" then - ngx.exit(-1) - return - end - - if action.type == "var" then - local name = builder:CreateString(action.name) - extra_info_var.Start(builder) - extra_info_var.AddName(builder, name) - local var_req = extra_info_var.End(builder) - build_extra_info(var_req, extra_info.Var) - local req = extra_info_req.End(builder) - builder:Finish(req) - data = builder:Output() - local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) - if not ok then - ngx.log(ngx.ERR, err) - return - end - ngx.log(ngx.WARN, "send extra info req successfully") - - local ty, data = ext.receive(sock) - if not ty then - ngx.log(ngx.ERR, data) - return - end - - assert(ty == constants.RPC_EXTRA_INFO, ty) - local buf = flatbuffers.binaryArray.New(data) - local resp = extra_info_resp.GetRootAsResp(buf, 0) - local res = resp:ResultAsString() - assert(res == action.result, res) - end - - if action.type == "reqbody" then - extra_info_reqbody.Start(builder) - local reqbody_req = extra_info_reqbody.End(builder) - build_extra_info(reqbody_req, extra_info.ReqBody) - local req = extra_info_req.End(builder) - builder:Finish(req) - data = builder:Output() - local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) - if not ok then - ngx.log(ngx.ERR, err) - return - end - ngx.log(ngx.WARN, "send extra info req successfully") - - local ty, data = ext.receive(sock) - if not ty then - ngx.log(ngx.ERR, data) - return - end - - assert(ty == constants.RPC_EXTRA_INFO, ty) - local buf = flatbuffers.binaryArray.New(data) - local resp = extra_info_resp.GetRootAsResp(buf, 0) - local res = resp:ResultAsString() - assert(res == action.result, res) - end - end + ask_extra_info(sock, case.extra_info) end if case.stop == true then @@ -544,6 +579,9 @@ function _M.go(case) http_resp_call_resp.Start(builder) http_resp_call_resp.AddStatus(builder, status) + elseif case.extra_info then + ask_extra_info(sock, case.extra_info) + http_resp_call_resp.Start(builder) else http_resp_call_resp.Start(builder) end diff --git a/t/lib/keycloak_cas.lua b/t/lib/keycloak_cas.lua new file mode 100644 index 000000000000..7e578014ce8f --- /dev/null +++ b/t/lib/keycloak_cas.lua @@ -0,0 +1,215 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local http = require "resty.http" + +local _M = {} + +local default_opts = { + idp_uri = "http://127.0.0.1:8080/realms/test/protocol/cas", + cas_callback_uri = "/cas_callback", + logout_uri = "/logout", +} + +function _M.get_default_opts() + return default_opts +end + +-- Login keycloak and return the login original uri +function _M.login_keycloak(uri, username, password) + local httpc = http.new() + + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + return nil, err + elseif res.status ~= 302 then + return nil, "login was not redirected to keycloak." + else + local cookies = res.headers['Set-Cookie'] + local cookie_str = _M.concatenate_cookies(cookies) + + res, err = httpc:request_uri(res.headers['Location'], {method = "GET"}) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 200 then + -- Unexpected response. + return nil, res.body + end + + -- From the returned form, extract the submit URI and parameters. + local uri, params = res.body:match('.*action="(.*)%?(.*)" method="post">') + + -- Substitute escaped ampersand in parameters. + params = params:gsub("&", "&") + + local auth_cookies = res.headers['Set-Cookie'] + + -- Concatenate cookies into one string as expected when sent in request header. + local auth_cookie_str = _M.concatenate_cookies(auth_cookies) + + -- Invoke the submit URI with parameters and cookies, adding username + -- and password in the body. + -- Note: Username and password are specific to the Keycloak Docker image used. + res, err = httpc:request_uri(uri .. "?" .. params, { + method = "POST", + body = "username=" .. username .. "&password=" .. password, + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + ["Cookie"] = auth_cookie_str + } + }) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "Login form submission did not return redirect to redirect URI." + end + + local keycloak_cookie_str = _M.concatenate_cookies(res.headers['Set-Cookie']) + + -- login callback + local redirect_uri = res.headers['Location'] + res, err = httpc:request_uri(redirect_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "login callback: " .. + "did not return redirect to original URI." + end + + cookies = res.headers['Set-Cookie'] + cookie_str = _M.concatenate_cookies(cookies) + + return res, nil, cookie_str, keycloak_cookie_str + end +end + +-- Login keycloak and return the login original uri +function _M.login_keycloak_for_second_sp(uri, keycloak_cookie_str) + local httpc = http.new() + + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + return nil, err + elseif res.status ~= 302 then + return nil, "login was not redirected to keycloak." + end + + local cookies = res.headers['Set-Cookie'] + local cookie_str = _M.concatenate_cookies(cookies) + + res, err = httpc:request_uri(res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = keycloak_cookie_str + } + }) + ngx.log(ngx.INFO, keycloak_cookie_str) + + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, res.body + end + + -- login callback + res, err = httpc:request_uri(res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "login callback: " .. + "did not return redirect to original URI." + end + + cookies = res.headers['Set-Cookie'] + cookie_str = _M.concatenate_cookies(cookies) + + return res, nil, cookie_str +end + +function _M.logout_keycloak(uri, cookie_str, keycloak_cookie_str) + local httpc = http.new() + + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + return nil, err + elseif res.status ~= 302 then + return nil, "logout was not redirected to keycloak." + else + -- keycloak logout + res, err = httpc:request_uri(res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = keycloak_cookie_str + } + }) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 200 then + return nil, "Logout did not return 200." + end + + return res, nil + end +end + +-- Concatenate cookies into one string as expected when sent in request header. +function _M.concatenate_cookies(cookies) + local cookie_str = "" + if type(cookies) == 'string' then + cookie_str = cookies:match('([^;]*); .*') + else + -- Must be a table. + local len = #cookies + if len > 0 then + cookie_str = cookies[1]:match('([^;]*); .*') + for i = 2, len do + cookie_str = cookie_str .. "; " .. cookies[i]:match('([^;]*); .*') + end + end + end + + return cookie_str, nil +end + +return _M diff --git a/t/lib/server.lua b/t/lib/server.lua index 029f463e534f..a8ec77af6960 100644 --- a/t/lib/server.lua +++ b/t/lib/server.lua @@ -298,9 +298,15 @@ function _M.wolf_rbac_access_check() ngx.say(json_encode({ok=true, data={ userInfo={nickname="administrator", username="admin", id="100"} }})) - else + elseif resName == '/hello/500' then + ngx.status = 500 + ngx.say(json_encode({ok=false, reason="ERR_SERVER_ERROR"})) + elseif resName == '/hello/401' then ngx.status = 401 - ngx.say(json_encode({ok=false, reason="no permission to access"})) + ngx.say(json_encode({ok=false, reason="ERR_TOKEN_INVALID"})) + else + ngx.status = 403 + ngx.say(json_encode({ok=false, reason="ERR_ACCESS_DENIED"})) end end @@ -377,6 +383,10 @@ for i = 1, 100 do _M["print_uri_" .. i] = print_uri end +function _M.print_uri_detailed() + ngx.say("ngx.var.uri: ", ngx.var.uri) + ngx.say("ngx.var.request_uri: ", ngx.var.request_uri) +end function _M.headers() local args = ngx.req.get_uri_args() diff --git a/t/node/chash-hashon.t b/t/node/chash-hashon.t index 0b9c161d9031..3cd24559c1e5 100644 --- a/t/node/chash-hashon.t +++ b/t/node/chash-hashon.t @@ -51,17 +51,14 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "username": "jack", - "plugins": { - "key-auth": { - "key": "auth-jack" - } + "value": { + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" } } - }, - "action": "set" + } }]] ) @@ -82,17 +79,14 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "username": "tom", - "plugins": { - "key-auth": { - "key": "auth-tom" - } + "value": { + "username": "tom", + "plugins": { + "key-auth": { + "key": "auth-tom" } } - }, - "action": "set" + } }]] ) ngx.say(code .. " " ..body) diff --git a/t/node/client-mtls-openresty-1-19.t b/t/node/client-mtls-openresty.t similarity index 94% rename from t/node/client-mtls-openresty-1-19.t rename to t/node/client-mtls-openresty.t index a7bf517c17c1..1779abe09126 100644 --- a/t/node/client-mtls-openresty-1-19.t +++ b/t/node/client-mtls-openresty.t @@ -20,11 +20,7 @@ my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; my $version = eval { `$nginx_binary -V 2>&1` }; if ($version !~ m/\/apisix-nginx-module/) { - if ($version =~ m/\/1.17.8/) { - plan(skip_all => "require OpenResty 1.19+"); - } else { - plan('no_plan'); - } + plan('no_plan'); } else { plan(skip_all => "for vanilla OpenResty only"); } @@ -78,7 +74,7 @@ __DATA__ depth = 2, } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/node/client-mtls.t b/t/node/client-mtls.t index afccc93752b9..aa326dbe98b6 100644 --- a/t/node/client-mtls.t +++ b/t/node/client-mtls.t @@ -58,7 +58,7 @@ __DATA__ ca = ("test.com"):rep(128), } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -92,7 +92,7 @@ GET /t client = { } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -173,7 +173,7 @@ GET /t depth = 2, } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/node/consumer-plugin2.t b/t/node/consumer-plugin2.t index 249441a6c1a4..64c3869bce56 100644 --- a/t/node/consumer-plugin2.t +++ b/t/node/consumer-plugin2.t @@ -238,3 +238,200 @@ x-real-ip: 127.0.0.1 } --- response_body {"key-auth":true,"proxy-rewrite":true} + + + +=== TEST 7: configure non-auth plugins in the consumer and run it's rewrite phase +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "ip-restriction": { + "blacklist": [ + "127.0.0.0/24" + ] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit routes and ip-restriction work well +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} + + + +=== TEST 9: use the latest consumer modifiedIndex as lrucache key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.print(res.body) + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1", "127.0.0.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local res, err = httpc:request_uri(uri, {headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bala" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local headers = { + ["Authorization"] = "Basic Zm9vOmJhbGE=" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +{"message":"Your IP address is not allowed"} +hello world +hello world diff --git a/t/node/grpc-proxy-unary.t b/t/node/grpc-proxy-unary.t index 393016d1578f..f1a063c54a15 100644 --- a/t/node/grpc-proxy-unary.t +++ b/t/node/grpc-proxy-unary.t @@ -70,8 +70,8 @@ routes: methods: [ POST ] - service_protocol: grpc upstream: + scheme: grpc nodes: "127.0.0.1:50051": 1 type: roundrobin @@ -114,7 +114,7 @@ grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plai === TEST 4: Unary API gRPC proxy with tls --- http2 --- apisix_yaml -ssl: +ssls: - id: 1 cert: "-----BEGIN CERTIFICATE-----\nMIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV\nBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G\nA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa\nGA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n\nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM\nCHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe\ncvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb\nVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR\n2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr\nabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2\nWjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/\nEvm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1\n/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh\n/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj\ncTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ\ntSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl\nc3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC\ntC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY\n1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl\nPYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob\nrJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy\nhme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1\n7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y\nIJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve\nU/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM=\n-----END CERTIFICATE-----\n" diff --git a/t/node/grpc-proxy.t b/t/node/grpc-proxy.t index c4338e77bfe3..18427afe6dd5 100644 --- a/t/node/grpc-proxy.t +++ b/t/node/grpc-proxy.t @@ -31,8 +31,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -102,13 +104,13 @@ apikey: user-key upstreams: - id: 1 type: roundrobin + scheme: grpc nodes: "127.0.0.1:9088": 1 routes: - id: 1 methods: - POST - service_protocol: grpc uri: "/hello" upstream_id: 1 #END @@ -130,7 +132,6 @@ routes: - id: 1 methods: - POST - service_protocol: grpc uri: "/hello" plugins: key-auth: @@ -139,6 +140,7 @@ routes: - jack upstream: type: roundrobin + scheme: grpc nodes: "127.0.0.1:9088": 1 #END @@ -183,3 +185,99 @@ GET /hello --- error_code: 502 --- error_log upstream: "grpc://127.0.0.1:80" + + + +=== TEST 7: set authority header +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:50051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- grep_error_log eval +qr/grpc header: "(:authority|host): [^"]+"/ +--- grep_error_log_out eval +qr/grpc header: "(:authority|host): 127.0.0.1:1984"/ + + + +=== TEST 8: set authority header to node header +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + pass_host: node + nodes: + "127.0.0.1:50051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- grep_error_log eval +qr/grpc header: "(:authority|host): [^"]+"/ +--- grep_error_log_out eval +qr/grpc header: "(:authority|host): 127.0.0.1:50051"/ + + + +=== TEST 9: set authority header to specific value +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + pass_host: rewrite + upstream_host: hello.world + nodes: + "127.0.0.1:50051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- grep_error_log eval +qr/grpc header: "(:authority|host): [^"]+"/ +--- grep_error_log_out eval +qr/grpc header: "(:authority|host): hello.world"/ diff --git a/t/node/healthcheck-discovery.t b/t/node/healthcheck-discovery.t index db9c623171d9..d5bf6bf5ad52 100644 --- a/t/node/healthcheck-discovery.t +++ b/t/node/healthcheck-discovery.t @@ -28,8 +28,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/node/healthcheck-passive.t b/t/node/healthcheck-passive.t index be85ecc9df35..f3f694b9f38f 100644 --- a/t/node/healthcheck-passive.t +++ b/t/node/healthcheck-passive.t @@ -165,3 +165,173 @@ GET /t --- error_code: 400 --- response_body {"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: object matches none of the required: [\"active\"] or [\"active\",\"passive\"]"} + + + +=== TEST 4: set route(only active + active & passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello_", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: only one route should have passive healthcheck +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + -- only /hello_ has passive healthcheck + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/enabled healthcheck passive/ +--- grep_error_log_out +enabled healthcheck passive + + + +=== TEST 6: make sure passive healthcheck works (conf is not corrupted by the default value) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/\[healthcheck\] \([^)]+\) unhealthy HTTP increment/ +--- grep_error_log_out +[healthcheck] (upstream#/apisix/routes/2) unhealthy HTTP increment diff --git a/t/node/healthcheck2.t b/t/node/healthcheck2.t index 2939175f2ebb..e52cf13a052f 100644 --- a/t/node/healthcheck2.t +++ b/t/node/healthcheck2.t @@ -30,8 +30,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/node/https-proxy.t b/t/node/https-proxy.t index 236fdaf76316..e7ff50185d1c 100644 --- a/t/node/https-proxy.t +++ b/t/node/https-proxy.t @@ -35,7 +35,7 @@ run_tests; __DATA__ -=== TEST 1: add route to HTTPS upstream (old way) +=== TEST 1: add route to HTTPS upstream --- config location /t { content_by_lua_block { @@ -44,101 +44,6 @@ __DATA__ ngx.HTTP_PUT, [[{ "methods": ["GET"], - "plugins": { - "proxy-rewrite": { - "scheme": "https" - } - }, - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1983": 1 - } - }, - "uri": "/hello" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed - - - -=== TEST 2: hit the upstream (old way) ---- request -GET /hello ---- more_headers -host: www.sni.com ---- error_log -Receive SNI: www.sni.com - - - -=== TEST 3: add route to HTTPS upstream ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "methods": ["GET"], - "upstream": { - "scheme": "https", - "type": "roundrobin", - "nodes": { - "127.0.0.1:1983": 1 - } - }, - "uri": "/hello" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed - - - -=== TEST 4: hit the upstream ---- request -GET /hello ---- more_headers -host: www.sni.com ---- error_log -Receive SNI: www.sni.com - - - -=== TEST 5: add route to HTTPS upstream (mix) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "methods": ["GET"], - "plugins": { - "proxy-rewrite": { - "scheme": "https" - } - }, "upstream": { "scheme": "https", "type": "roundrobin", @@ -163,7 +68,7 @@ passed -=== TEST 6: hit the upstream +=== TEST 2: hit the upstream --- request GET /hello --- more_headers @@ -173,7 +78,7 @@ Receive SNI: www.sni.com -=== TEST 7: use 443 as the default port +=== TEST 3: use 443 as the default port --- apisix_yaml routes: - @@ -192,7 +97,7 @@ upstream: "https://127.0.0.1:443/hello" -=== TEST 8: use 80 as the http's default port +=== TEST 4: use 80 as the http's default port --- apisix_yaml routes: - @@ -210,7 +115,7 @@ upstream: "http://127.0.0.1:80/hello" -=== TEST 9: rewrite SNI +=== TEST 5: rewrite SNI --- log_level: debug --- apisix_yaml routes: @@ -237,7 +142,7 @@ x-real-ip: 127.0.0.1 -=== TEST 10: node's SNI +=== TEST 6: node's SNI --- log_level: debug --- apisix_yaml routes: diff --git a/t/node/least_conn.t b/t/node/least_conn.t index 9df9c0536f43..4a33cab93d45 100644 --- a/t/node/least_conn.t +++ b/t/node/least_conn.t @@ -29,8 +29,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/node/merge-route.t b/t/node/merge-route.t index b6f1d467e8d5..2d1f48f4a1ae 100644 --- a/t/node/merge-route.t +++ b/t/node/merge-route.t @@ -180,7 +180,9 @@ qr/1980/ "time_window": 60, "rejected_code": 503, "key": "remote_addr", - "disable": true + "_meta": { + "disable": true + } } }, "uri": "/server_port", @@ -249,6 +251,7 @@ qr/merge_service_route.*"time_window":60/] ngx.HTTP_PUT, [[{ "upstream": { + "scheme": "https", "type": "roundrobin", "nodes": { "httpbin.org:443": 1 @@ -280,11 +283,11 @@ passed local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, [[{ - "uri": "/get", + "uri": "/fake", "host": "httpbin.org", "plugins": { "proxy-rewrite": { - "scheme": "https" + "uri": "/get" } }, "service_id": "1" @@ -308,7 +311,7 @@ passed === TEST 12: hit route --- request -GET /get +GET /fake --- more_headers host: httpbin.org --- response_body eval @@ -321,7 +324,7 @@ qr/"Host": "httpbin.org"/ === TEST 13: not hit route --- request -GET /get +GET /fake --- more_headers host: httpbin.orgxxx --- error_code: 404 diff --git a/t/node/plugin-configs.t b/t/node/plugin-configs.t index 770392276a77..9c5ab55814ca 100644 --- a/t/node/plugin-configs.t +++ b/t/node/plugin-configs.t @@ -249,3 +249,166 @@ property "block_rules" validation failed --- response_body hello hello world + + + +=== TEST 5: don't override the plugin in the route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + }, + "response-rewrite": { + "body": "hello" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/helloaa", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugin_config_id": 1, + "plugins": { + "response-rewrite": { + "body": "world" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local code, err, org_body = t('/helloaa') + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.say(org_body) + } + } +--- response_body +world + + + +=== TEST 6: use the latest plugin_consigs after merge the plugins from consumer and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.print(res.body) + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1", "127.0.0.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local res, err = httpc:request_uri(uri, {headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +{"message":"Your IP address is not allowed"} +hello world diff --git a/t/node/plugin.t b/t/node/plugin.t new file mode 100644 index 000000000000..f2c54c72bd56 --- /dev/null +++ b/t/node/plugin.t @@ -0,0 +1,48 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: set custom log format +--- extra_init_by_lua + local exp = require("apisix.plugins.example-plugin") + exp.destroy = function() + ngx.log(ngx.WARN, "destroy method called") + end +--- config + location /t { + return 200 "dummy"; + } +--- shutdown_error_log +destroy method called diff --git a/t/node/priority-balancer/health-checker.t b/t/node/priority-balancer/health-checker.t index 445210bd6caf..5e03f0e66b3f 100644 --- a/t/node/priority-balancer/health-checker.t +++ b/t/node/priority-balancer/health-checker.t @@ -30,8 +30,11 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/node/priority-balancer/sanity.t b/t/node/priority-balancer/sanity.t index 2c4b1f6573cb..0ef66bf1d9d4 100644 --- a/t/node/priority-balancer/sanity.t +++ b/t/node/priority-balancer/sanity.t @@ -30,8 +30,11 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/node/route-status.t b/t/node/route-status.t index 24ac4ca735e7..cad6a051ac74 100644 --- a/t/node/route-status.t +++ b/t/node/route-status.t @@ -27,7 +27,6 @@ apisix: node_listen: 1984 router: http: 'radixtree_host_uri' - admin_key: null _EOC_ run_tests(); diff --git a/t/node/upstream-discovery.t b/t/node/upstream-discovery.t index 8ce9606df8c1..9b6ceb28ba94 100644 --- a/t/node/upstream-discovery.t +++ b/t/node/upstream-discovery.t @@ -28,8 +28,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/node/upstream-domain-with-special-dns.t b/t/node/upstream-domain-with-special-dns.t index 0481ab7e4a79..9781e2b381cf 100644 --- a/t/node/upstream-domain-with-special-dns.t +++ b/t/node/upstream-domain-with-special-dns.t @@ -31,8 +31,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -112,9 +114,11 @@ connect to 127.0.0.1:1053 --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false dns_resolver_valid: 900 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml --- apisix_yaml upstreams: - @@ -193,9 +197,11 @@ connect to 127.0.0.1:1053 --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false dns_resolver_valid: 1 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml --- apisix_yaml upstreams: - diff --git a/t/node/upstream-domain-with-special-ipv6-dns.t b/t/node/upstream-domain-with-special-ipv6-dns.t index 5ec838bca7b4..61d2ca1f1e1e 100644 --- a/t/node/upstream-domain-with-special-ipv6-dns.t +++ b/t/node/upstream-domain-with-special-ipv6-dns.t @@ -31,8 +31,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/node/upstream-domain.t b/t/node/upstream-domain.t index 2cf71dd87696..12b7f53aa2d1 100644 --- a/t/node/upstream-domain.t +++ b/t/node/upstream-domain.t @@ -174,9 +174,7 @@ failed to parse domain: httpbin.orgx local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] + ngx.HTTP_DELETE ) if code >= 300 then @@ -202,9 +200,7 @@ passed local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] + ngx.HTTP_DELETE ) if code >= 300 then diff --git a/t/node/upstream-ipv6.t b/t/node/upstream-ipv6.t index 51d2e8b84610..8aa39f6cf080 100644 --- a/t/node/upstream-ipv6.t +++ b/t/node/upstream-ipv6.t @@ -108,3 +108,187 @@ GET /hello hello world --- no_error_log [error] + + + +=== TEST 5: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 6: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 7: set upstream, one array item to specify node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "[::1]", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 8: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 9: set upstream, one hash key to specify node, in wrong format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "::1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: hit routes +--- request +GET /hello +--- error_code: 502 +--- error_log +connect() to [::0.1.25.128]:80 failed + + + +=== TEST 11: set upstream, two array items to specify nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + }, + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 12: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] diff --git a/t/node/upstream-keepalive-pool.t b/t/node/upstream-keepalive-pool.t index 084522b4e07a..26f9306d079c 100644 --- a/t/node/upstream-keepalive-pool.t +++ b/t/node/upstream-keepalive-pool.t @@ -635,3 +635,106 @@ qr/lua balancer: keepalive create pool, .*/ qr/^lua balancer: keepalive create pool, crc32: \S+, size: 8 lua balancer: keepalive create pool, crc32: \S+, size: 4 $/ + + + +=== TEST 14: upstreams with SNI, then without SNI +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local test = require("lib.test_admin").test + local json = require("toolkit.json") + + local code, body = test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "scheme": "https", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + }, + "pass_host": "rewrite", + "upstream_host": "a.com", + "keepalive_pool": { + "size": 4 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "http", + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + pass_host = "rewrite", + upstream_host = "b.com", + keepalive_pool = { + size = 8 + } + } + local code, body = test('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + for i = 1, 2 do + local code, body = test('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri":"/hello/]] .. i .. [[", + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream_id": ]] .. i .. [[ + }]]) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + end + } + } +--- response_body + + + +=== TEST 15: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + for i = 0, 1 do + local idx = i % 2 + 1 + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello/" .. idx) + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- grep_error_log eval +qr/lua balancer: keepalive create pool, .*/ +--- grep_error_log_out eval +qr/^lua balancer: keepalive create pool, crc32: \S+, size: 4 +lua balancer: keepalive create pool, crc32: \S+, size: 8 +$/ diff --git a/t/node/upstream-mtls.t b/t/node/upstream-mtls.t index c909dbc9a64f..3ee1c28cce93 100644 --- a/t/node/upstream-mtls.t +++ b/t/node/upstream-mtls.t @@ -167,7 +167,7 @@ decrypt ssl key failed end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) -- upstream local data = { @@ -203,7 +203,7 @@ decrypt ssl key failed end res = json.decode(res) - ngx.say(res.node.value.tls.client_key == ssl_key) + ngx.say(res.value.tls.client_key == ssl_key) local data = { upstream = { @@ -240,7 +240,7 @@ decrypt ssl key failed end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) } } --- request @@ -341,7 +341,6 @@ GET /t --- yaml_config apisix: node_listen: 1984 - admin_key: null ssl: key_encrypt_salt: null --- config @@ -387,7 +386,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) -- upstream local data = { @@ -423,7 +422,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.tls.client_key == ssl_key) + ngx.say(res.value.tls.client_key == ssl_key) local data = { upstream = { @@ -460,7 +459,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) } } --- request @@ -562,7 +561,7 @@ hello world cert = ssl_cert, key = ssl_key } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -629,7 +628,7 @@ hello world cert = ssl_cert, key = ssl_key } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -664,7 +663,7 @@ failed to get ssl cert: ssl type should be 'client' local t = require("lib.test_admin") local json = require("toolkit.json") - local code, body = t.test('/apisix/admin/ssl/1', ngx.HTTP_DELETE) + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code diff --git a/t/node/upstream-websocket.t b/t/node/upstream-websocket.t index 5ca21d28ee33..a24474749b30 100644 --- a/t/node/upstream-websocket.t +++ b/t/node/upstream-websocket.t @@ -254,7 +254,7 @@ qr/failed to new websocket: bad "upgrade" request header: nil/ local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "127.0.0.1"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/node/upstream.t b/t/node/upstream.t index 70da36145b9b..ec6da1f82f33 100644 --- a/t/node/upstream.t +++ b/t/node/upstream.t @@ -140,11 +140,7 @@ hello world ngx.sleep(0.5) local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.print("[delete] code: ", code, " message: ", message) } @@ -164,11 +160,7 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.say("[delete] code: ", code, " message: ", message) } @@ -188,11 +180,7 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.say("[delete] code: ", code, " message: ", message) } @@ -212,11 +200,7 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.say("[delete] code: ", code) } @@ -436,7 +420,6 @@ GET /t } --- request GET /t ---- skip_nginx: 5: < 1.19.0 --- response_body passed --- no_error_log @@ -447,7 +430,6 @@ passed === TEST 18: hit route --- request GET /uri ---- skip_nginx: 5: < 1.19.0 --- response_body eval qr/host: 127.0.0.1/ --- error_log @@ -494,7 +476,6 @@ proxy request to 127.0.0.1:1980 } --- request GET /t ---- skip_nginx: 5: < 1.19.0 --- response_body passed --- no_error_log @@ -505,7 +486,6 @@ passed === TEST 20: hit route --- request GET /uri ---- skip_nginx: 5: < 1.19.0 --- response_body eval qr/host: localhost/ --- error_log @@ -607,7 +587,6 @@ qr/host: localhost:1980/ ngx.say(body) } } ---- skip_nginx: 5: < 1.19.0 --- request GET /t --- response_body @@ -619,7 +598,6 @@ passed === TEST 24: hit route --- log_level: debug ---- skip_nginx: 5: < 1.19.0 --- request GET /uri --- error_log diff --git a/t/perf/test_http.py b/t/perf/test_http.py index 135f24d067a1..36bb236b4d0b 100755 --- a/t/perf/test_http.py +++ b/t/perf/test_http.py @@ -37,9 +37,15 @@ def create_conf(): with open("./conf/config-perf.yaml", "w") as f: conf = { "apisix": { - "config_center": "yaml", "enable_admin": False, }, + "deployment": { + "role": "data_plane", + "role_data_plane": { + "config_provider": "yaml", + } + + }, "nginx_config": { "worker_processes": 2 } diff --git a/t/plugin/authz-keycloak.t b/t/plugin/authz-keycloak.t index 7eb287b5a8e9..f3428a3ad363 100644 --- a/t/plugin/authz-keycloak.t +++ b/t/plugin/authz-keycloak.t @@ -74,32 +74,7 @@ done -=== TEST 3: minimal valid configuration with audience ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.authz-keycloak") - local ok, err = plugin.check_schema({ - audience = "foo", - discovery = "https://host.domain/auth/realms/foo/.well-known/uma2-configuration" - }) - if not ok then - ngx.say(err) - end - - ngx.say("done") - } - } ---- request -GET /t ---- response_body -done ---- no_error_log -[error] - - - -=== TEST 4: minimal valid configuration w/o discovery when lazy_load_paths=true +=== TEST 3: minimal valid configuration w/o discovery when lazy_load_paths=true --- config location /t { content_by_lua_block { @@ -126,7 +101,7 @@ done -=== TEST 5: minimal valid configuration with discovery when lazy_load_paths=true +=== TEST 4: minimal valid configuration with discovery when lazy_load_paths=true --- config location /t { content_by_lua_block { @@ -152,7 +127,7 @@ done -=== TEST 6: full schema check +=== TEST 5: full schema check --- config location /t { content_by_lua_block { @@ -162,7 +137,6 @@ done token_endpoint = "https://host.domain/auth/realms/foo/protocol/openid-connect/token", resource_registration_endpoint = "https://host.domain/auth/realms/foo/authz/protection/resource_set", client_id = "University", - audience = "University", client_secret = "secret", grant_type = "urn:ietf:params:oauth:grant-type:uma-ticket", policy_enforcement_mode = "ENFORCING", @@ -197,7 +171,7 @@ done -=== TEST 7: token_endpoint and discovery both missing +=== TEST 6: token_endpoint and discovery both missing --- config location /t { content_by_lua_block { @@ -220,7 +194,7 @@ done -=== TEST 8: client_id and audience both missing +=== TEST 7: client_id missing --- config location /t { content_by_lua_block { @@ -236,14 +210,14 @@ done --- request GET /t --- response_body -allOf 2 failed: object matches none of the required: ["client_id"] or ["audience"] +property "client_id" is required done --- no_error_log [error] -=== TEST 9: resource_registration_endpoint and discovery both missing and lazy_load_paths is true +=== TEST 8: resource_registration_endpoint and discovery both missing and lazy_load_paths is true --- config location /t { content_by_lua_block { @@ -263,14 +237,14 @@ done --- request GET /t --- response_body -allOf 3 failed: object matches none of the required +allOf 2 failed: object matches none of the required done --- no_error_log [error] -=== TEST 10: Add https endpoint with ssl_verify true (default) +=== TEST 9: Add https endpoint with ssl_verify true (default) --- config location /t { content_by_lua_block { @@ -312,7 +286,7 @@ passed -=== TEST 11: TEST with fake token and https endpoint +=== TEST 10: TEST with fake token and https endpoint --- config location /t { content_by_lua_block { @@ -345,7 +319,7 @@ Error while sending authz request to https://127.0.0.1:8443/auth/realms/Universi -=== TEST 12: Add https endpoint with ssl_verify false +=== TEST 11: Add https endpoint with ssl_verify false --- config location /t { content_by_lua_block { @@ -388,7 +362,7 @@ passed -=== TEST 13: TEST for https based token verification with ssl_verify false +=== TEST 12: TEST for https based token verification with ssl_verify false --- config location /t { content_by_lua_block { @@ -418,7 +392,7 @@ Request denied: HTTP 401 Unauthorized. Body: {"error":"HTTP 401 Unauthorized"} -=== TEST 14: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values +=== TEST 13: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values --- config location /t { content_by_lua_block { @@ -460,7 +434,7 @@ passed -=== TEST 15: test for permission is empty and enforcement mode is "ENFORCING". +=== TEST 14: test for permission is empty and enforcement mode is "ENFORCING". --- config location /t { content_by_lua_block { @@ -485,7 +459,7 @@ GET /t -=== TEST 16: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values , access_denied_redirect_uri is "http://127.0.0.1/test" +=== TEST 15: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values , access_denied_redirect_uri is "http://127.0.0.1/test" --- config location /t { content_by_lua_block { @@ -528,7 +502,7 @@ passed -=== TEST 17: test for permission is empty and enforcement mode is "ENFORCING" , access_denied_redirect_uri is "http://127.0.0.1/test". +=== TEST 16: test for permission is empty and enforcement mode is "ENFORCING" , access_denied_redirect_uri is "http://127.0.0.1/test". --- config location /t { content_by_lua_block { @@ -555,7 +529,7 @@ Location: http://127.0.0.1/test -=== TEST 18: Add https endpoint with password_grant_token_generation_incoming_uri +=== TEST 17: Add https endpoint with password_grant_token_generation_incoming_uri --- config location /t { content_by_lua_block { @@ -629,7 +603,7 @@ true -=== TEST 19: no username or password +=== TEST 18: no username or password --- config location /t { content_by_lua_block { diff --git a/t/plugin/authz-keycloak2.t b/t/plugin/authz-keycloak2.t index 48d887449972..de6d2e5f773d 100644 --- a/t/plugin/authz-keycloak2.t +++ b/t/plugin/authz-keycloak2.t @@ -582,49 +582,7 @@ true -=== TEST 14: add plugin with lazy_load_paths and http_method_as_scope (using audience) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "authz-keycloak": { - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/uma2-configuration", - "audience": "course_management", - "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", - "lazy_load_paths": true, - "http_method_as_scope": true - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1982": 1 - }, - "type": "roundrobin" - }, - "uri": "/course/foo" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed ---- no_error_log -[error] - - - -=== TEST 15: Get access token for teacher and access view course route. +=== TEST 14: Get access token for teacher and access view course route. --- config location /t { content_by_lua_block { @@ -672,7 +630,7 @@ true -=== TEST 16: Get access token for student and access view course route. +=== TEST 15: Get access token for student and access view course route. --- config location /t { content_by_lua_block { diff --git a/t/plugin/basic-auth.t b/t/plugin/basic-auth.t index 5d626edd07af..ca2a82055b90 100644 --- a/t/plugin/basic-auth.t +++ b/t/plugin/basic-auth.t @@ -340,7 +340,7 @@ GET /t ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code @@ -384,7 +384,7 @@ GET /t ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code diff --git a/t/plugin/cas-auth.t b/t/plugin/cas-auth.t new file mode 100644 index 000000000000..d7629433759c --- /dev/null +++ b/t/plugin/cas-auth.t @@ -0,0 +1,227 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('warn'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: Add route for sp1 +--- config + location /t { + content_by_lua_block { + local kc = require("lib.keycloak_cas") + local core = require("apisix.core") + + local default_opts = kc.get_default_opts() + local opts = core.table.deepcopy(default_opts) + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/cas1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "host" : "127.0.0.1", + "plugins": { + "cas-auth": ]] .. core.json.encode(opts) .. [[ + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: login and logout ok +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local kc = require "lib.keycloak_cas" + + local path = "/uri" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + local username = "test" + local password = "test" + + local res, err, cas_cookie, keycloak_cookie = kc.login_keycloak(uri .. path, username, password) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie + } + }) + assert(res.status == 200) + ngx.say(res.body) + + res, err = kc.logout_keycloak(uri .. "/logout", cas_cookie, keycloak_cookie) + assert(res.status == 200) + } + } +--- response_body_like +uri: /uri +cookie: .* +host: 127.0.0.1:1984 +user-agent: .* +x-real-ip: 127.0.0.1 + + + +=== TEST 3: Add route for sp2 +--- config + location /t { + content_by_lua_block { + local kc = require("lib.keycloak_cas") + local core = require("apisix.core") + + local default_opts = kc.get_default_opts() + local opts = core.table.deepcopy(default_opts) + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/cas2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "host" : "127.0.0.2", + "plugins": { + "cas-auth": ]] .. core.json.encode(opts) .. [[ + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: login sp1 and sp2, then do single logout +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local kc = require "lib.keycloak_cas" + + local path = "/uri" + + -- login to sp1 + local uri = "http://127.0.0.1:" .. ngx.var.server_port + local username = "test" + local password = "test" + + local res, err, cas_cookie, keycloak_cookie = kc.login_keycloak(uri .. path, username, password) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie + } + }) + assert(res.status == 200) + + -- login to sp2, which would skip login at keycloak side + local uri2 = "http://127.0.0.2:" .. ngx.var.server_port + + local res, err, cas_cookie2 = kc.login_keycloak_for_second_sp(uri2 .. path, keycloak_cookie) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri2 .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie2 + } + }) + assert(res.status == 200) + + -- SLO (single logout) + res, err = kc.logout_keycloak(uri .. "/logout", cas_cookie, keycloak_cookie) + assert(res.status == 200) + + -- login to sp2, which would do normal login process at keycloak side + local res, err, cas_cookie2, keycloak_cookie = kc.login_keycloak(uri2 .. path, username, password) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie2 + } + }) + assert(res.status == 200) + + -- logout sp2 + res, err = kc.logout_keycloak(uri2 .. "/logout", cas_cookie2, keycloak_cookie) + assert(res.status == 200) + } + } diff --git a/t/plugin/clickhouse-logger.t b/t/plugin/clickhouse-logger.t index 5426ce028489..ccb0be11ad91 100644 --- a/t/plugin/clickhouse-logger.t +++ b/t/plugin/clickhouse-logger.t @@ -48,6 +48,18 @@ add_block_preprocessor(sub { ngx.say("ok") } } + location /clickhouse-logger/test1 { + content_by_lua_block { + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "clickhouse body: ", data) + for k, v in pairs(headers) do + ngx.log(ngx.WARN, "clickhouse headers: " .. k .. ":" .. v) + end + ngx.say("ok") + } + } } _EOC_ @@ -131,7 +143,7 @@ passed } } --- response_body -property "endpoint_addr" is required +value should match only one schema, but matches none @@ -175,7 +187,49 @@ passed -=== TEST 5: access local server +=== TEST 5: add plugin on routes using multi clickhouse-logger +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addrs": ["http://127.0.0.1:10420/clickhouse-logger/test", + "http://127.0.0.1:10420/clickhouse-logger/test1"], + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 6: access local server --- request GET /opentracing --- response_body diff --git a/t/plugin/dubbo-proxy/route.t b/t/plugin/dubbo-proxy/route.t index da8fd5383a10..5824125b220e 100644 --- a/t/plugin/dubbo-proxy/route.t +++ b/t/plugin/dubbo-proxy/route.t @@ -48,8 +48,11 @@ _EOC_ my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -162,7 +165,6 @@ dubbo success apisix: node_listen: 1984 enable_admin: true - admin_key: null plugins: - key-auth - dubbo-proxy @@ -230,7 +232,6 @@ passed apisix: node_listen: 1984 enable_admin: true - admin_key: null plugins: - key-auth - dubbo-proxy @@ -243,7 +244,6 @@ plugins: apisix: node_listen: 1984 enable_admin: true - admin_key: null plugins: - key-auth - dubbo-proxy diff --git a/t/plugin/dubbo-proxy/upstream.t b/t/plugin/dubbo-proxy/upstream.t index f831b211fc79..71f6dc48d670 100644 --- a/t/plugin/dubbo-proxy/upstream.t +++ b/t/plugin/dubbo-proxy/upstream.t @@ -50,8 +50,11 @@ _EOC_ my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/plugin/echo.t b/t/plugin/echo.t index 99aec41e62c5..71571e969d6b 100644 --- a/t/plugin/echo.t +++ b/t/plugin/echo.t @@ -211,7 +211,7 @@ Location: https://www.iresty.com end local resp_data = core.json.decode(body) - ngx.say(encode_with_keys_sorted(resp_data.node.value.plugins)) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) } } --- request diff --git a/t/plugin/elasticsearch-logger.t b/t/plugin/elasticsearch-logger.t new file mode 100644 index 000000000000..2e82953f46ee --- /dev/null +++ b/t/plugin/elasticsearch-logger.t @@ -0,0 +1,453 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local ok, err + local configs = { + -- full configuration + { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services", + type = "collector" + }, + auth = { + username = "elastic", + password = "123456" + }, + ssl_verify = false, + timeout = 60, + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 2, + batch_max_size = 10, + }, + -- minimize configuration + { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services" + } + }, + -- property "endpoint_addr" is required + { + field = { + index = "services" + } + }, + -- property "field" is required + { + endpoint_addr = "http://127.0.0.1:9200", + }, + -- property "index" is required + { + endpoint_addr = "http://127.0.0.1:9200", + field = {} + }, + -- property "endpoint" must not end with "/" + { + endpoint_addr = "http://127.0.0.1:9200/", + field = { + index = "services" + } + } + } + + local plugin = require("apisix.plugins.elasticsearch-logger") + for i = 1, #configs do + ok, err = plugin.check_schema(configs[i]) + if err then + ngx.say(err) + else + ngx.say("passed") + end + end + } + } +--- response_body_like +passed +passed +property "endpoint_addr" is required +property "field" is required +property "field" validation failed: property "index" is required +property "endpoint_addr" validation failed: failed to match pattern "\[\^/\]\$" with "http://127.0.0.1:9200/" + + + +=== TEST 2: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger', + ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: test route (success write) +--- extra_init_by_lua + local core = require("apisix.core") + local http = require("resty.http") + local ngx_re = require("ngx.re") + local log_util = require("apisix.utils.log-util") + log_util.get_full_log = function(ngx, conf) + return { + test = "test" + } + end + + http.request_uri = function(self, uri, params) + if not params.body or type(params.body) ~= "string" then + return nil, "invalid params body" + end + + local arr = ngx_re.split(params.body, "\n") + if not arr or #arr ~= 2 then + return nil, "invalid params body" + end + + local entry = core.json.decode(arr[2]) + local origin_entry = log_util.get_full_log(ngx, {}) + for k, v in pairs(origin_entry) do + local vv = entry[k] + if not vv or vv ~= v then + return nil, "invalid params body" + end + end + + core.log.error("check elasticsearch full log body success") + return { + status = 200, + body = "success" + }, nil + end +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +check elasticsearch full log body success + + + +=== TEST 4: set route (auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: test route (auth success) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] successfully processed the entries + + + +=== TEST 6: set route (no auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: test route (no auth, failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] failed to process entries: elasticsearch server returned status: 401 +"reason":"missing authentication credentials for REST request [/_bulk]" +Batch Processor[elasticsearch-logger] exceeded the max_retry_count + + + +=== TEST 8: set route (error auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "111111" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: test route (error auth failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] failed to process entries +Batch Processor[elasticsearch-logger] exceeded the max_retry_count + + + +=== TEST 10: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger', + ngx.HTTP_PUT, [[{ + "log_format": { + "custom_host": "$host", + "custom_timestamp": "$time_iso8601", + "custom_client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body_like +passed +passed + + + +=== TEST 11: hit route and check custom elasticsearch logger +--- extra_init_by_lua + local core = require("apisix.core") + local http = require("resty.http") + local ngx_re = require("ngx.re") + local log_util = require("apisix.utils.log-util") + log_util.get_custom_format_log = function(ctx, format) + return { + test = "test" + } + end + + http.request_uri = function(self, uri, params) + if not params.body or type(params.body) ~= "string" then + return nil, "invalid params body" + end + + local arr = ngx_re.split(params.body, "\n") + if not arr or #arr ~= 2 then + return nil, "invalid params body" + end + + local entry = core.json.decode(arr[2]) + local origin_entry = log_util.get_custom_format_log(nil, nil) + for k, v in pairs(origin_entry) do + local vv = entry[k] + if not vv or vv ~= v then + return nil, "invalid params body" + end + end + + core.log.error("check elasticsearch custom body success") + return { + status = 200, + body = "success" + }, nil + end +--- request +GET /hello +--- response_body +hello world +--- wait: 2 +--- error_log +check elasticsearch custom body success diff --git a/t/plugin/error-log-logger-clickhouse.t b/t/plugin/error-log-logger-clickhouse.t index 2e7c14aa7bd0..02ad604baae7 100644 --- a/t/plugin/error-log-logger-clickhouse.t +++ b/t/plugin/error-log-logger-clickhouse.t @@ -32,6 +32,14 @@ add_block_preprocessor(sub { $block->set_value("request", "GET /t"); } + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - error-log-logger +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + my $http_config = $block->http_config // <<_EOC_; server { listen 10420; @@ -88,12 +96,6 @@ done === TEST 2: test unreachable server ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /t { content_by_lua_block { @@ -128,12 +130,6 @@ clickhouse headers: x-clickhouse-database:default === TEST 3: put plugin metadata and log an error level message ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /t { content_by_lua_block { @@ -169,12 +165,6 @@ clickhouse headers: x-clickhouse-database:default === TEST 4: log a warn level message ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /t { content_by_lua_block { @@ -194,12 +184,6 @@ clickhouse headers: x-clickhouse-database:default === TEST 5: log some messages ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /t { content_by_lua_block { @@ -219,12 +203,6 @@ clickhouse headers: x-clickhouse-database:default === TEST 6: log an info level message ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /t { content_by_lua_block { @@ -240,12 +218,6 @@ this is an info message for test6 === TEST 7: delete metadata for the plugin, recover to the default ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /t { content_by_lua_block { diff --git a/t/plugin/error-log-logger-skywalking.t b/t/plugin/error-log-logger-skywalking.t index 289ac369e947..54354f34ea31 100644 --- a/t/plugin/error-log-logger-skywalking.t +++ b/t/plugin/error-log-logger-skywalking.t @@ -21,6 +21,19 @@ repeat_each(1); no_long_string(); no_root_location(); worker_connections(128); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - error-log-logger +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } +}); + run_tests; __DATA__ @@ -56,12 +69,6 @@ done === TEST 2: test unreachable server ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -90,12 +97,6 @@ qr/Batch Processor\[error-log-logger\] failed to process entries: error while se === TEST 3: put plugin metadata and log an error level message ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -126,12 +127,6 @@ qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is an error message for test.*\ === TEST 4: log a warn level message ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -149,12 +144,6 @@ qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is a warning message for test.* === TEST 5: log some messages ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -173,12 +162,6 @@ qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is an error message for test.*\ === TEST 6: log an info level message ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -196,12 +179,6 @@ qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is an info message for test.*\" === TEST 7: delete metadata for the plugin, recover to the default ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { diff --git a/t/plugin/error-log-logger.t b/t/plugin/error-log-logger.t index 7aa37a422b16..451f85299703 100644 --- a/t/plugin/error-log-logger.t +++ b/t/plugin/error-log-logger.t @@ -63,6 +63,15 @@ _EOC_ _EOC_ $block->set_value("stream_server_config", $stream_default_server); + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - error-log-logger +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + }); run_tests; @@ -70,6 +79,7 @@ run_tests; __DATA__ === TEST 1: not enable the plugin +--- extra_yaml_config --- config location /tg { content_by_lua_block { @@ -87,9 +97,6 @@ error-log-logger === TEST 2: enable the plugin, but not init the metadata ---- yaml_config -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -107,12 +114,6 @@ qr/please set the correct plugin_metadata for error-log-logger/ === TEST 3: set a wrong metadata ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -145,12 +146,6 @@ qr/please set the correct plugin_metadata for error-log-logger/ === TEST 4: test unreachable server ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -180,12 +175,6 @@ qr/\[Server\] receive data:.*this is a warning message for test./ === TEST 5: log a warn level message ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -215,9 +204,6 @@ qr/\[Server\] receive data:.*this is a warning message for test./ === TEST 6: log an error level message ---- yaml_config -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -236,9 +222,6 @@ qr/\[Server\] receive data:.*this is an error message for test./ === TEST 7: log an info level message ---- yaml_config -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -257,12 +240,6 @@ qr/\[Server\] receive data:.*this is an info message for test./ === TEST 8: delete metadata for the plugin, recover to the default ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -288,12 +265,6 @@ passed === TEST 9: want to reload the plugin by route ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -336,12 +307,6 @@ qr/please set the correct plugin_metadata for error-log-logger/ === TEST 10: avoid sending stale error log ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -375,12 +340,6 @@ qr/\[Server\] receive data:.*this is an error message for test./ === TEST 11: delete the route ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -406,12 +365,6 @@ passed === TEST 12: log a warn level message (schema compatibility testing) ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -441,9 +394,6 @@ qr/\[Server\] receive data:.*this is a warning message for test./ === TEST 13: log an error level message (schema compatibility testing) ---- yaml_config -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -462,9 +412,6 @@ qr/\[Server\] receive data:.*this is an error message for test./ === TEST 14: log an info level message (schema compatibility testing) ---- yaml_config -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { @@ -483,12 +430,6 @@ qr/\[Server\] receive data:.*this is an info message for test./ === TEST 15: delete metadata for the plugin, recover to the default (schema compatibility testing) ---- yaml_config -apisix: - enable_admin: true - admin_key: null -plugins: - - error-log-logger --- config location /tg { content_by_lua_block { diff --git a/t/plugin/example.t b/t/plugin/example.t index 985aa11f111b..21972d290a08 100644 --- a/t/plugin/example.t +++ b/t/plugin/example.t @@ -165,12 +165,15 @@ GET /t --- response_body plugin name: example-plugin priority: 0 --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" # etcd address - prefix: "/apisix" # apisix configurations prefix - timeout: 1 - +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" # etcd address + prefix: "/apisix" # apisix configurations prefix + timeout: 1 plugins: - example-plugin - not-exist-plugin diff --git a/t/plugin/ext-plugin/extra-info.t b/t/plugin/ext-plugin/extra-info.t index 56b67be0f89b..e55bb673b7b0 100644 --- a/t/plugin/ext-plugin/extra-info.t +++ b/t/plugin/ext-plugin/extra-info.t @@ -181,3 +181,175 @@ GET /hello --- error_code: 503 --- error_log failed to receive RPC_HTTP_REQ_CALL: closed + + + +=== TEST 5: ask response body (not exist) +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "respbody", result = nil} + } + ext.go({extra_info = actions}) + } + } +--- error_log: failed to read response body: not exits + + + +=== TEST 6: add route with ext-plugin-post-resp +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/*", + "plugins": { + "ext-plugin-post-resp": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 7: ask var +--- request +GET /hello?x= +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "var", name = "server_addr", result = "127.0.0.1"}, + {type = "var", name = "remote_addr", result = "127.0.0.1"}, + {type = "var", name = "route_id", result = "1"}, + {type = "var", name = "arg_x", result = ""}, + } + ext.go({extra_info = actions}) + } + } +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully +send extra info req successfully +send extra info req successfully +send extra info req successfully +--- response_body +hello world + + + +=== TEST 8: ask response body +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "respbody", result = "hello world\n"}, + } + ext.go({extra_info = actions}) + } + } +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully +--- response_body +hello world + + + +=== TEST 9: ask response body (chunked) +--- request +GET /hello_chunked +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "respbody", result = "hello world\n"}, + } + ext.go({extra_info = actions}) + } + } +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully +--- response_body +hello world + + + +=== TEST 10: ask request body (empty) +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = nil} + } + ext.go({extra_info = actions}) + } + } + + + +=== TEST 11: ask request body +--- request +POST /hello +123 +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = "123"} + } + ext.go({extra_info = actions}) + } + } diff --git a/t/plugin/ext-plugin/sanity-openresty-1-19.t b/t/plugin/ext-plugin/sanity2.t similarity index 86% rename from t/plugin/ext-plugin/sanity-openresty-1-19.t rename to t/plugin/ext-plugin/sanity2.t index c33d86e007fb..206e7b090c2d 100644 --- a/t/plugin/ext-plugin/sanity-openresty-1-19.t +++ b/t/plugin/ext-plugin/sanity2.t @@ -14,16 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version =~ m/\/1.17.8/) { - plan(skip_all => "require OpenResty 1.19+"); -} else { - plan('no_plan'); -} +use t::APISIX 'no_plan'; repeat_each(1); no_long_string(); diff --git a/t/plugin/google-cloud-logging2.t b/t/plugin/google-cloud-logging2.t index 4b52ebc6a8ea..5c60775de4b7 100644 --- a/t/plugin/google-cloud-logging2.t +++ b/t/plugin/google-cloud-logging2.t @@ -58,24 +58,8 @@ __DATA__ } } - local expected = { - node = { - value = { - plugins = { - ["google-cloud-logging"] = { - max_retry_count = 0, - retry_delay = 1, - buffer_duration = 60, - batch_max_size = 1000, - inactive_timeout = 5, - } - } - } - } - } - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config, expected) + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) if code >= 300 then ngx.status = code diff --git a/t/plugin/grpc-transcode.t b/t/plugin/grpc-transcode.t index 78baac0ff73b..a005c0113096 100644 --- a/t/plugin/grpc-transcode.t +++ b/t/plugin/grpc-transcode.t @@ -42,7 +42,7 @@ __DATA__ content_by_lua_block { local t = require("lib.test_admin").test local etcd = require("apisix.core.etcd") - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -64,7 +64,7 @@ __DATA__ end ngx.say(body) - local res = assert(etcd.get('/proto/1')) + local res = assert(etcd.get('/protos/1')) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time @@ -86,7 +86,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/2', + local code, body = t('/apisix/admin/protos/2', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -123,7 +123,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/2', + local code, body = t('/apisix/admin/protos/2', ngx.HTTP_DELETE ) @@ -317,7 +317,7 @@ Connection refused) while connecting to upstream location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -562,7 +562,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -606,7 +606,6 @@ passed [[{ "methods": ["GET", "POST"], "uri": "/grpctest", - "service_protocol": "grpc", "plugins": { "grpc-transcode": { "proto_id": "1", @@ -669,7 +668,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -771,7 +770,6 @@ failed to encode request data to protobuf [[{ "methods": ["GET", "POST"], "uri": "/grpctest", - "service_protocol": "grpc", "plugins": { "grpc-transcode": { "proto_id": "1", diff --git a/t/plugin/grpc-transcode2.t b/t/plugin/grpc-transcode2.t index 7c8286650f50..3bdede0999a4 100644 --- a/t/plugin/grpc-transcode2.t +++ b/t/plugin/grpc-transcode2.t @@ -41,7 +41,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -135,7 +135,7 @@ Content-Type: application/json location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/2', + local code, body = t('/apisix/admin/protos/2', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -240,7 +240,7 @@ failed to encode request data to protobuf local content = t.read_file("t/grpc_server_example/proto.pb") local data = {content = ngx.encode_base64(content)} - local code, body = t.test('/apisix/admin/proto/1', + local code, body = t.test('/apisix/admin/protos/1', ngx.HTTP_PUT, json.encode(data) ) @@ -388,7 +388,7 @@ Undefined service method location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -500,12 +500,8 @@ qr/request log: \{.*body":\"\\u0000\\u0000\\u0000\\u0000\\u0002\\b\\u0003\\u0000 content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -581,7 +577,7 @@ qr/request log: \{.*body":\"\{\\"result\\":3}/ content_by_lua_block { local http = require "resty.http" local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -700,7 +696,7 @@ set protobuf option: int64_as_string content_by_lua_block { local http = require "resty.http" local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; diff --git a/t/plugin/grpc-transcode3.t b/t/plugin/grpc-transcode3.t index a027a84bd9bd..97208788ab1b 100644 --- a/t/plugin/grpc-transcode3.t +++ b/t/plugin/grpc-transcode3.t @@ -42,7 +42,7 @@ __DATA__ content_by_lua_block { local http = require "resty.http" local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; diff --git a/t/plugin/hmac-auth2.t b/t/plugin/hmac-auth2.t index 4358ef0f8cb8..a845f3de95fd 100644 --- a/t/plugin/hmac-auth2.t +++ b/t/plugin/hmac-auth2.t @@ -400,7 +400,7 @@ x-real-ip: 127.0.0.1 ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code @@ -436,7 +436,7 @@ x-real-ip: 127.0.0.1 ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code diff --git a/t/plugin/http-logger-json.t b/t/plugin/http-logger-json.t index 9787165532e4..46b79797b81b 100644 --- a/t/plugin/http-logger-json.t +++ b/t/plugin/http-logger-json.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/plugin/http-logger2.t b/t/plugin/http-logger2.t index 688bc8bc3e01..f99c11483d9d 100644 --- a/t/plugin/http-logger2.t +++ b/t/plugin/http-logger2.t @@ -42,6 +42,13 @@ add_block_preprocessor(sub { } } + location /http-logger/Authorization { + content_by_lua_block { + ngx.log(ngx.WARN, "received Authorization header: [", ngx.var.http_authorization, "]") + ngx.say("OK") + } + } + location /http-logger/center { content_by_lua_block { local function str_split(str, reps) @@ -253,3 +260,52 @@ test-http-logger-response request.body:test-http-logger-request response.body:test-http-logger-response --- wait: 1.5 + + + +=== TEST 8: test default Authorization header sent to the log server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["POST"], + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001/http-logger/Authorization", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:12001": 1 + }, + "type": "roundrobin" + }, + "uri": "/http-logger/test" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit +--- request +POST /http-logger/test +test-http-logger-request +--- error_log +received Authorization header: [nil] +--- wait: 1.5 diff --git a/t/plugin/ip-restriction.t b/t/plugin/ip-restriction.t index d2cbc75c0325..d32c8b2e41a4 100644 --- a/t/plugin/ip-restriction.t +++ b/t/plugin/ip-restriction.t @@ -587,7 +587,9 @@ qr/failed to validate item 1: object matches none of the required/ "blacklist": [ "127.0.0.0/24" ], - "disable": true + "_meta": { + "disable": true + } } } }]] diff --git a/t/plugin/jwt-auth.t b/t/plugin/jwt-auth.t index 197a4259453f..81cf8c935834 100644 --- a/t/plugin/jwt-auth.t +++ b/t/plugin/jwt-auth.t @@ -54,7 +54,7 @@ __DATA__ } } --- response_body_like eval -qr/{"algorithm":"HS256","base64_secret":false,"exp":86400,"key":"123","secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ +qr/{"algorithm":"HS256","base64_secret":false,"exp":86400,"key":"123","lifetime_grace_period":0,"secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ @@ -498,7 +498,7 @@ property "key" is required local code, body, raw = t('/apisix/admin/schema/plugins/jwt-auth?schema_type=consumer', ngx.HTTP_GET, [[ -{"dependencies":{"algorithm":{"oneOf":[{"properties":{"algorithm":{"default":"HS256","enum":["HS256","HS512"]}}},{"required":["public_key","private_key"],"properties":{"algorithm":{"enum":["RS256"]},"public_key":{"type":"string"},"private_key":{"type":"string"}}}]}},"required":["key"],"type":"object","properties":{"base64_secret":{"default":false,"type":"boolean"},"secret":{"type":"string"},"algorithm":{"enum":["HS256","HS512","RS256"],"default":"HS256","type":"string"},"exp":{"minimum":1,"default":86400,"type":"integer"},"key":{"type":"string"}}} +{"dependencies":{"algorithm":{"oneOf":[{"properties":{"algorithm":{"default":"HS256","enum":["HS256","HS512"]}}},{"required":["public_key","private_key"],"properties":{"algorithm":{"enum":["RS256","ES256"]},"public_key":{"type":"string"},"private_key":{"type":"string"}}}]}},"required":["key"],"type":"object","properties":{"base64_secret":{"default":false,"type":"boolean"},"secret":{"type":"string"},"algorithm":{"enum":["HS256","HS512","RS256","ES256"],"default":"HS256","type":"string"},"exp":{"minimum":1,"default":86400,"type":"integer"},"key":{"type":"string"}}} ]] ) @@ -517,7 +517,7 @@ property "key" is required ngx.HTTP_GET, nil, [[ - {"properties":{"disable":{"type":"boolean"}},"type":"object"} + {"properties":{},"type":"object"} ]] ) ngx.status = code @@ -535,7 +535,7 @@ property "key" is required ngx.HTTP_GET, nil, [[ - {"properties":{"disable":{"type":"boolean"}},"type":"object"} + {"properties":{},"type":"object"} ]] ) ngx.status = code @@ -957,7 +957,7 @@ qr/failed to sign jwt/ } } --- response_body_like eval -qr/{"algorithm":"HS512","base64_secret":false,"exp":86400,"key":"123","secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ +qr/{"algorithm":"HS512","base64_secret":false,"exp":86400,"key":"123","lifetime_grace_period":0,"secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ @@ -1083,7 +1083,7 @@ hello world content_by_lua_block { local plugin = require("apisix.plugins.jwt-auth") local core = require("apisix.core") - local conf = {key = "123", algorithm = "ES256"} + local conf = {key = "123", algorithm = "ES512"} local ok, err = plugin.check_schema(conf, core.schema.TYPE_CONSUMER) if not ok then @@ -1126,7 +1126,7 @@ base64_secret required but the secret is not in base64 format location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body, res_data = t('/apisix/admin/consumers', + local code, body, res = t('/apisix/admin/consumers', ngx.HTTP_PUT, [[{ "username": "kerouac", @@ -1136,29 +1136,18 @@ base64_secret required but the secret is not in base64 format "secret": "my-secret-key" } } - }]], - [[{ - "node": { - "value": { - "username": "kerouac", - "plugins": { - "jwt-auth": { - "key": "exp-not-set", - "secret": "my-secret-key" - } - } - } - }, - "action": "set" }]] - ) + ) + + res = require("toolkit.json").decode(res) + assert(res.value.plugins["jwt-auth"].exp == 86400) ngx.status = code - ngx.say(require("toolkit.json").encode(res_data)) + ngx.say(body) } } ---- response_body_like eval -qr/"exp":86400/ +--- response_body +passed @@ -1235,3 +1224,94 @@ qr/failed to validate dependent schema for \\"algorithm\\"/ --- error_code: 400 --- response_body_like eval qr/failed to validate dependent schema for \\"algorithm\\"/ + + + +=== TEST 52: add consumer with username and plugins with public_key, private_key(ES256) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-es256", + "algorithm": "ES256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEVs/o5+uQbTjL3chynL4wXgUg2R9\nq9UU8I5mEovUf86QZ7kOBIjJwqnzD1omageEHWwHdBO6B+dFabmdT9POxg==\n-----END PUBLIC KEY-----", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgevZzL1gdAFr88hb2\nOF/2NxApJCzGCEDdfSp6VQO30hyhRANCAAQRWz+jn65BtOMvdyHKcvjBeBSDZH2r\n1RTwjmYSi9R/zpBnuQ4EiMnCqfMPWiZqB4QdbAd0E7oH50VpuZ1P087G\n-----END PRIVATE KEY-----" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 53: JWT sign and verify use ES256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 54: sign/verify use ES256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err, sign = t('/apisix/plugin/jwt/sign?key=user-key-es256', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world diff --git a/t/plugin/jwt-auth2.t b/t/plugin/jwt-auth2.t index a6c6f31ad7e7..6225133878a6 100644 --- a/t/plugin/jwt-auth2.t +++ b/t/plugin/jwt-auth2.t @@ -147,3 +147,322 @@ GET /hello jwt-header: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs --- response_body hello world + + + +=== TEST 8: use lifetime_grace_period default value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- in order to modify the system_leeway in jwt-validators module + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANW16kX5SMrMa2t7F2R1w6Bk/qpjS4QQ\n]] .. + [[hnrbED3Dpsl9JXAx90MYsIWp51hBxJSE/EPVK8WF/sjHK1xQbEuDfEECAwEAAQ==\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9" .. + ".eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk" .. + "4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCB" .. + "jb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.u1ISx7JbuK_GFRIUqIMP175FqX" .. + "RyF9V7y86480Q4N3jNxs3ePbc51TFtIHDrKttstU4Tub28PYVSlr-HXfjo7w", + } + }) + ngx.status = res.status + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- add consumer + local code, body, res_data = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "exp": 1, + "algorithm": "HS256", + "base64_secret": false, + "secret": "test-jwt-secret", + "key": "test-jwt-a" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- add route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "query": "jwt", + "header": "Mytoken", + "cookie": "jwt" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- resgiter jwt sign api + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwt/sign" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- get JWT token + local code, err, sign = t('/apisix/plugin/jwt/sign?key=test-jwt-a', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + -- verify JWT token + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + + -- the JWT has not expired, so it should be valid + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- after 1.1 seconds, the JWT should be expired, because the exp is only 1 second + ngx.sleep(1.1) + res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + ngx.status = res.status + ngx.print(res.body) + } + } +--- error_code: 401 +--- response_body eval +qr/failed to verify jwt/ +--- error_log eval +qr/ailed to verify jwt: 'exp' claim expired at/ + + + +=== TEST 9: lifetime_grace_period is 2 seconds +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- in order to modify the system_leeway in jwt-validators module + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANW16kX5SMrMa2t7F2R1w6Bk/qpjS4QQ\n]] .. + [[hnrbED3Dpsl9JXAx90MYsIWp51hBxJSE/EPVK8WF/sjHK1xQbEuDfEECAwEAAQ==\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9" .. + ".eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk" .. + "4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCB" .. + "jb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.u1ISx7JbuK_GFRIUqIMP175FqX" .. + "RyF9V7y86480Q4N3jNxs3ePbc51TFtIHDrKttstU4Tub28PYVSlr-HXfjo7w", + } + }) + ngx.status = res.status + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- add consumer + local code, body, res_data = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "exp": 1, + "algorithm": "HS256", + "base64_secret": false, + "secret": "test-jwt-secret", + "key": "test-jwt-a", + "lifetime_grace_period": 2 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- add route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "query": "jwt", + "header": "Mytoken", + "cookie": "jwt" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- resgiter jwt sign api + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwt/sign" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- get JWT token + local code, err, sign = t('/apisix/plugin/jwt/sign?key=test-jwt-a', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + -- verify JWT token + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + + -- after 1.1 seconds, since lifetime_grace_period is 2 seconds, + -- so the JWT has not expired, it should be valid + ngx.sleep(1.1) + local res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + ngx.status = res.status + ngx.print(res.body) + } + } +--- response_body +hello world diff --git a/t/plugin/key-auth.t b/t/plugin/key-auth.t index a3483573a7d2..33e86fe19d13 100644 --- a/t/plugin/key-auth.t +++ b/t/plugin/key-auth.t @@ -188,7 +188,7 @@ GET /hello code, body = t('/apisix/admin/consumers', ngx.HTTP_PUT, string.format('{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}', username, key), - string.format('{"node":{"value":{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}},"action":"set"}', username, key) + string.format('{"value":{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}}', username, key) ) end diff --git a/t/plugin/ldap-auth.t b/t/plugin/ldap-auth.t index 9ecac330f948..31b7a643013e 100644 --- a/t/plugin/ldap-auth.t +++ b/t/plugin/ldap-auth.t @@ -202,6 +202,8 @@ Authorization: Basic Zm9vOmZvbwo= --- error_code: 401 --- response_body {"message":"Invalid user authorization"} +--- error_log +The supplied credential is invalid @@ -302,7 +304,7 @@ find consumer user01 ngx.HTTP_GET, nil, [[ -{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"disable":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} +{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"tls_verify":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} ]] ) ngx.status = code @@ -338,8 +340,107 @@ find consumer user01 ngx.HTTP_GET, nil, [[ -{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"disable":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} ]] +{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"tls_verify":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} ]] ) ngx.status = code } } + + + +=== TEST 17: enable ldap-auth with tls +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "localhost:1636", + "uid": "cn", + "use_tls": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 + + + +=== TEST 19: enable ldap-auth with tls, verify CA +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "localhost:1636", + "uid": "cn", + "use_tls": true, + "tls_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 diff --git a/t/plugin/limit-count2.t b/t/plugin/limit-count2.t index 621edad8a912..0dadaf78e990 100644 --- a/t/plugin/limit-count2.t +++ b/t/plugin/limit-count2.t @@ -764,3 +764,42 @@ limit key: afafafhao2:remote_addr limit key: afafafhao2:remote_addr --- response_body [200,200,503,503] + + + +=== TEST 22: group with disable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "abcd", + "_meta": { + "disable": false + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/plugin/limit-count3.t b/t/plugin/limit-count3.t index 4298a20bd604..0c5490616ad2 100644 --- a/t/plugin/limit-count3.t +++ b/t/plugin/limit-count3.t @@ -224,3 +224,64 @@ passed } --- response_body [200,200] + + + +=== TEST 7: set another route with the same conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 61 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: avoid sharing the same counter +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello1" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200] diff --git a/t/plugin/log-rotate.t b/t/plugin/log-rotate.t index 5e04be131d2d..8ce51dd5d8bb 100644 --- a/t/plugin/log-rotate.t +++ b/t/plugin/log-rotate.t @@ -25,11 +25,7 @@ no_root_location(); add_block_preprocessor(sub { my ($block) = @_; - my $user_yaml_config = <<_EOC_; -apisix: - node_listen: 1984 - admin_key: null - + my $extra_yaml_config = <<_EOC_; plugins: # plugin list - log-rotate @@ -39,7 +35,7 @@ plugin_attr: max_kept: 3 _EOC_ - $block->set_value("yaml_config", $user_yaml_config); + $block->set_value("extra_yaml_config", $extra_yaml_config); if ((!defined $block->error_log) && (!defined $block->no_error_log)) { diff --git a/t/plugin/log-rotate2.t b/t/plugin/log-rotate2.t index 1a28f33e8829..faebee804378 100644 --- a/t/plugin/log-rotate2.t +++ b/t/plugin/log-rotate2.t @@ -25,11 +25,7 @@ no_root_location(); add_block_preprocessor(sub { my ($block) = @_; - if (!defined $block->yaml_config) { - my $yaml_config = <<_EOC_; -apisix: - node_listen: 1984 - admin_key: ~ + my $extra_yaml_config = <<_EOC_; plugins: - log-rotate plugin_attr: @@ -39,8 +35,7 @@ plugin_attr: enable_compression: true _EOC_ - $block->set_value("yaml_config", $yaml_config); - } + $block->set_value("extra_yaml_config", $extra_yaml_config); if ((!defined $block->error_log) && (!defined $block->no_error_log)) { $block->set_value("no_error_log", "[error]"); @@ -61,7 +56,7 @@ __DATA__ location /t { content_by_lua_block { ngx.log(ngx.ERR, "start xxxxxx") - ngx.sleep(2.5) + ngx.sleep(3.5) local has_split_access_file = false local has_split_error_file = false local lfs = require("lfs") @@ -105,7 +100,7 @@ start xxxxxx --- config location /t { content_by_lua_block { - ngx.sleep(2) + ngx.sleep(3) local default_logs = {} for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do diff --git a/t/plugin/log-rotate3.t b/t/plugin/log-rotate3.t new file mode 100644 index 000000000000..e6dbdd877e2c --- /dev/null +++ b/t/plugin/log-rotate3.t @@ -0,0 +1,138 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 86400 + max_size: 9 + max_kept: 3 + enable_compression: false +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: log rotate by max_size +--- config + location /t { + content_by_lua_block { + ngx.log(ngx.ERR, "start xxxxxx") + ngx.sleep(2) + local has_split_access_file = false + local has_split_error_file = false + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__access.log$") then + has_split_access_file = true + end + + if string.match(file_name, "__error.log$") then + has_split_error_file = true + end + end + + if not has_split_access_file and has_split_error_file then + ngx.status = 200 + else + ngx.status = 500 + end + } + } + + + +=== TEST 2: in current log +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.1) + ngx.log(ngx.WARN, "start xxxxxx") + ngx.say("done") + } + } +--- response_body +done +--- error_log +start xxxxxx + + + +=== TEST 3: check file changes +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + + local default_logs = {} + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__error.log$") or string.match(file_name, "__access.log$") then + local filepath = ngx.config.prefix() .. "/logs/" .. file_name + local attr = lfs.attributes(filepath) + if attr then + default_logs[filepath] = { change = attr.change, size = attr.size } + end + end + end + + ngx.sleep(1) + + local passed = false + for filepath, origin_attr in pairs(default_logs) do + local check_attr = lfs.attributes(filepath) + if check_attr.change == origin_attr.change and check_attr.size == origin_attr.size then + passed = true + else + passed = false + break + end + end + + if passed then + ngx.say("passed") + end + } + } +--- response_body +passed diff --git a/t/plugin/opa2.t b/t/plugin/opa2.t index 75d9632ba26a..d14269ce696c 100644 --- a/t/plugin/opa2.t +++ b/t/plugin/opa2.t @@ -56,7 +56,9 @@ __DATA__ "username": "test", "plugins": { "key-auth": { - "disable": false, + "_meta": { + "disable": false + }, "key": "test-key" } } @@ -68,7 +70,9 @@ __DATA__ "name": "s1", "plugins": { "key-auth": { - "disable": false + "_meta": { + "disable": false + } } } }]], diff --git a/t/plugin/openfunction.t b/t/plugin/openfunction.t new file mode 100644 index 000000000000..8da960df4d40 --- /dev/null +++ b/t/plugin/openfunction.t @@ -0,0 +1,335 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({function_uri = "http://127.0.0.1:30585/default/test-body"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: missing `function_uri` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({timeout = 60000}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "function_uri" is required + + + +=== TEST 3: wrong type for `function_uri` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({function_uri = 30858}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "function_uri" validation failed: wrong type: expected string, got number + + + +=== TEST 4: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584/function-sample" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit route (with GET request) +--- request +GET /hello +--- response_body +Hello, function-sample! + + + +=== TEST 6: reset route with test-body function +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30585/default/test-body" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit route with POST method +--- request +POST /hello +test +--- response_body +Hello, test! + + + +=== TEST 8: reset route with test-header function with service_token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30583/", + "authorization": { + "service_token": "test:test" + } + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit route with POST request with service_token +--- request +POST /hello +--- response_body chomp +[Basic dGVzdDp0ZXN0] + + + +=== TEST 10: reset route with test-header function without service_token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30583/" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit route with user-specific Authorization header +--- request +POST /hello +--- more_headers +authorization: user-token-xxx +--- response_body chomp +[user-token-xxx] + + + +=== TEST 12: reset route to non-existent function_uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584/default/non-existent" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route (with non-existent function_uri) +--- request +POST /hello +test +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 404 +--- response_body_like eval +qr/not found/ + + + +=== TEST 14: reset route with test-uri function and path forwarding +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: hit route with GET method +--- request +GET /hello/openfunction +--- response_body +Hello, openfunction! diff --git a/t/plugin/openid-connect.t b/t/plugin/openid-connect.t index 22786eaea9f2..9337e4235d18 100644 --- a/t/plugin/openid-connect.t +++ b/t/plugin/openid-connect.t @@ -109,7 +109,8 @@ done "redirect_uri": "https://iresty.com", "ssl_verify": false, "timeout": 10, - "scope": "apisix" + "scope": "apisix", + "use_pkce": false } }, "upstream": { @@ -918,7 +919,7 @@ OIDC introspection failed: invalid token --- request GET /t --- response_body -{"access_token_in_authorization_header":false,"bearer_only":false,"client_id":"kbyuFDidLLm280LIwVFiazOqjO3ty8KH","client_secret":"60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa","discovery":"http://127.0.0.1:1980/.well-known/openid-configuration","introspection_endpoint_auth_method":"client_secret_basic","logout_path":"/logout","realm":"apisix","scope":"openid","set_access_token_header":true,"set_id_token_header":true,"set_refresh_token_header":false,"set_userinfo_header":true,"ssl_verify":false,"timeout":3} +{"access_token_in_authorization_header":false,"bearer_only":false,"client_id":"kbyuFDidLLm280LIwVFiazOqjO3ty8KH","client_secret":"60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa","discovery":"http://127.0.0.1:1980/.well-known/openid-configuration","introspection_endpoint_auth_method":"client_secret_basic","logout_path":"/logout","realm":"apisix","scope":"openid","set_access_token_header":true,"set_id_token_header":true,"set_refresh_token_header":false,"set_userinfo_header":true,"ssl_verify":false,"timeout":3,"use_pkce":false} --- no_error_log [error] @@ -1185,3 +1186,80 @@ GET /t http://127.0.0.1:.*/hello --- no_error_log [error] + + + +=== TEST 30: Switch route URI back to `/hello` and enable pkce. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "use_pkce": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 31: Access route w/o bearer token. Should redirect to authentication endpoint of ID provider with code_challenge parameters. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=https://iresty.com') ~= -1 and + string.match(location, '.*code_challenge=.*') and + string.match(location, '.*code_challenge_method=S256.*') then + ngx.say(true) + end + } + } +--- request +GET /t +--- timeout: 10s +--- response_body +true +--- error_code: 302 +--- no_error_log +[error] diff --git a/t/plugin/opentelemetry.t b/t/plugin/opentelemetry.t index 0c142667a1ab..759b248c6a80 100644 --- a/t/plugin/opentelemetry.t +++ b/t/plugin/opentelemetry.t @@ -553,6 +553,7 @@ plugin_attr: --- extra_init_by_lua local core = require("apisix.core") local otlp = require("opentelemetry.trace.exporter.otlp") + local span_kind = require("opentelemetry.trace.span_kind") otlp.export_spans = function(self, spans) if (#spans ~= 1) then ngx.log(ngx.ERR, "unexpected spans length: ", #spans) @@ -565,6 +566,12 @@ plugin_attr: return end + local current_span_kind = span:plain().kind + if current_span_kind ~= span_kind.server then + ngx.log(ngx.ERR, "expected span.kind to be server but got ", current_span_kind) + return + end + if span.name ~= "/opentracing?foo=bar&a=b" then ngx.log(ngx.ERR, "expect span name: /opentracing?foo=bar&a=b, but got ", span.name) return diff --git a/t/plugin/opentelemetry2.t b/t/plugin/opentelemetry2.t index f173d125ba91..2495d8ef2adf 100644 --- a/t/plugin/opentelemetry2.t +++ b/t/plugin/opentelemetry2.t @@ -142,3 +142,88 @@ plugin body_filter phase opentelemetry context current opentelemetry context current opentelemetry export span + + + +=== TEST 3: set additional_attributes with match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route_name", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + }, + "additional_header_prefix_attributes": [ + "x-my-header-*" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/attributes" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: opentelemetry expands headers +--- extra_init_by_lua + local otlp = require("opentelemetry.trace.exporter.otlp") + otlp.export_spans = function(self, spans) + if (#spans ~= 1) then + ngx.log(ngx.ERR, "unexpected spans length: ", #spans) + return + end + + local attributes_names = {} + local attributes = {} + local span = spans[1] + for _, attribute in ipairs(span.attributes) do + if attribute.key == "hostname" then + -- remove any randomness + goto skip + end + table.insert(attributes_names, attribute.key) + attributes[attribute.key] = attribute.value.string_value or "" + ::skip:: + end + table.sort(attributes_names) + for _, attribute in ipairs(attributes_names) do + ngx.log(ngx.INFO, "attribute " .. attribute .. ": \"" .. attributes[attribute] .. "\"") + end + + ngx.log(ngx.INFO, "opentelemetry export span") + end +--- request +GET /attributes +--- more_headers +x-my-header-name: william +x-my-header-nick: bill +--- wait: 1 +--- error_code: 404 +--- grep_error_log eval +qr/attribute .+?:.[^,]*/ +--- grep_error_log_out +attribute route: "route_name" +attribute service: "" +attribute x-my-header-name: "william" +attribute x-my-header-nick: "bill" diff --git a/t/plugin/openwhisk.t b/t/plugin/openwhisk.t index 4d89bbe57b9b..0d7e73a96b23 100644 --- a/t/plugin/openwhisk.t +++ b/t/plugin/openwhisk.t @@ -244,7 +244,7 @@ qr/"error":"The requested resource does not exist."/ [[{ "plugins": { "openwhisk": { - "api_host": "http://127.0.0.0:3233", + "api_host": "http://127.0.0.1:1979", "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", "namespace": "guest", "action": "non-existent" diff --git a/t/plugin/plugin.t b/t/plugin/plugin.t index e45f5d5f7814..5baaed8011a6 100644 --- a/t/plugin/plugin.t +++ b/t/plugin/plugin.t @@ -284,7 +284,7 @@ GET /hello error_response = "OK" }}, }) do - local code, body = t('/apisix/admin/global_rules/1', + local code, body = t('/apisix/admin/plugin_configs/1', ngx.HTTP_PUT, { plugins = { @@ -306,3 +306,222 @@ GET /hello {"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: wrong type: expected object, got boolean"} {"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: property \"error_response\" validation failed: value should match only one schema, but matches none"} passed + + + +=== TEST 10: invalid _meta filter vars schema with wrong type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + { + plugins = { + ["jwt-auth"] = { + _meta = { + filter = "arg_k == v" + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +{"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: property \"filter\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 11: invalid _meta filter schema with wrong expr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for _, filter in ipairs({ + {"arg_name", "==", "json"}, + { + {"arg_name", "*=", "json"} + } + }) do + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + { + plugins = { + ["jwt-auth"] = { + _meta = { + filter = filter + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + end + } + } +--- response_body +{"error_msg":"failed to validate the 'vars' expression: rule should be wrapped inside brackets"} +{"error_msg":"failed to validate the 'vars' expression: invalid operator '*='"} + + + +=== TEST 12: proxy-rewrite plugin run with _meta filter vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v2"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v2" + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello" + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 13: hit route: run proxy-rewrite plugin +--- request +GET /hello?version=v2 +--- response_headers +x-api-version: v2 + + + +=== TEST 14: hit route: not run proxy-rewrite plugin +--- request +GET /hello?version=v1 +--- response_body +hello world + + + +=== TEST 15: different route,same plugin, different filter (for expr_lrucache) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v3"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v3" + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello1" + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 16: hit route: run proxy-rewrite plugin +--- request +GET /hello1?version=v3 +--- response_headers +x-api-version: v3 + + + +=== TEST 17: same plugin, same id between routes and global_rules, different filter (for expr_lrucache) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/2', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v4"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v4" + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 18: hit route: run proxy-rewrite plugin +--- request +GET /hello1?version=v4 +--- response_headers +x-api-version: v4 diff --git a/t/plugin/prometheus.t b/t/plugin/prometheus.t index 442d1b8b3f5b..3d8b8299f8ea 100644 --- a/t/plugin/prometheus.t +++ b/t/plugin/prometheus.t @@ -611,8 +611,10 @@ qr/apisix_node_info\{hostname=".*"\} 1/ --- yaml_config apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml --- apisix_yaml routes: - diff --git a/t/plugin/prometheus2.t b/t/plugin/prometheus2.t index a2715d6d6d10..178751e1f224 100644 --- a/t/plugin/prometheus2.t +++ b/t/plugin/prometheus2.t @@ -352,9 +352,9 @@ GET /apisix/prometheus/metrics "syslog": { "host": "127.0.0.1", "include_req_body": false, - "max_retry_times": 1, + "max_retry_count": 1, "tls": false, - "retry_interval": 1, + "retry_delay": 1, "batch_max_size": 1000, "buffer_duration": 60, "port": 1000, @@ -914,3 +914,14 @@ GET /hello GET /apisix/prometheus/metrics --- response_body eval qr/apisix_bandwidth\{type="egress",route="1",service="service_name",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 50: fetch the prometheus shared dict data +--- http_config +lua_shared_dict test-shared-dict 10m; +--- request +GET /apisix/prometheus/metrics +--- response_body_like +.*apisix_shared_dict_capacity_bytes\{name="test-shared-dict"\} 10485760(?:.|\n)* +apisix_shared_dict_free_space_bytes\{name="test-shared-dict"\} \d+.* diff --git a/t/plugin/prometheus4.t b/t/plugin/prometheus4.t new file mode 100644 index 000000000000..93302028e68b --- /dev/null +++ b/t/plugin/prometheus4.t @@ -0,0 +1,140 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/metrics', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + } + } + + + +=== TEST 2: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/10', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: client request +--- yaml_config +plugin_attr: + prometheus: + metrics: + bandwidth: + extra_labels: + - upstream_addr: $upstream_addr + - upstream_status: $upstream_status +--- request +GET /hello + + + +=== TEST 4: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="10",service="",consumer="",node="127.0.0.1",upstream_addr="127.0.0.1:1980",upstream_status="200"\} \d+/ + + + +=== TEST 5: client request, label with nonexist ngx variable +--- yaml_config +plugin_attr: + prometheus: + metrics: + http_status: + extra_labels: + - dummy: $dummy +--- request +GET /hello + + + +=== TEST 6: fetch the prometheus metric data, with nonexist ngx variable +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="200",route="10",matched_uri="\/hello",matched_host="",service="",consumer="",node="127.0.0.1",dummy=""\} \d+/ diff --git a/t/plugin/proxy-rewrite.t b/t/plugin/proxy-rewrite.t index fbca1b621b51..076f236afba3 100644 --- a/t/plugin/proxy-rewrite.t +++ b/t/plugin/proxy-rewrite.t @@ -41,8 +41,7 @@ __DATA__ local plugin = require("apisix.plugins.proxy-rewrite") local ok, err = plugin.check_schema({ uri = '/apisix/home', - host = 'apisix.iresty.com', - scheme = 'http' + host = 'apisix.iresty.com' }) if not ok then ngx.say(err) @@ -60,34 +59,7 @@ done -=== TEST 2: wrong value of key ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.proxy-rewrite") - local ok, err = plugin.check_schema({ - uri = '/apisix/home', - host = 'apisix.iresty.com', - scheme = 'tcp' - }) - if not ok then - ngx.say(err) - end - - ngx.say("done") - } - } ---- request -GET /t ---- response_body -property "scheme" validation failed: matches none of the enum values -done ---- no_error_log -[error] - - - -=== TEST 3: add plugin +=== TEST 2: add plugin --- config location /t { content_by_lua_block { @@ -98,7 +70,6 @@ done "plugins": { "proxy-rewrite": { "uri": "/test/add", - "scheme": "https", "host": "apisix.iresty.com" } }, @@ -127,7 +98,7 @@ passed -=== TEST 4: update plugin +=== TEST 3: update plugin --- config location /t { content_by_lua_block { @@ -138,7 +109,6 @@ passed "plugins": { "proxy-rewrite": { "uri": "/test/update", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -167,7 +137,7 @@ passed -=== TEST 5: disable plugin +=== TEST 4: disable plugin --- config location /t { content_by_lua_block { @@ -202,7 +172,7 @@ passed -=== TEST 6: set route(rewrite host) +=== TEST 5: set route(rewrite host) --- config location /t { content_by_lua_block { @@ -214,7 +184,6 @@ passed "plugins": { "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -243,7 +212,7 @@ passed -=== TEST 7: rewrite host +=== TEST 6: rewrite host --- request GET /hello HTTP/1.1 --- response_body @@ -255,7 +224,7 @@ scheme: http -=== TEST 8: set route(rewrite host + scheme) +=== TEST 7: set route(rewrite host + upstream scheme is https) --- config location /t { content_by_lua_block { @@ -267,11 +236,11 @@ scheme: http "plugins": { "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", - "scheme": "https", "host": "test.com" } }, "upstream": { + "scheme": "https", "nodes": { "127.0.0.1:1983": 1 }, @@ -296,7 +265,7 @@ passed -=== TEST 9: rewrite host + scheme +=== TEST 8: rewrite host + upstream scheme is https --- request GET /hello HTTP/1.1 --- response_body @@ -308,7 +277,7 @@ scheme: https -=== TEST 10: set route(rewrite headers) +=== TEST 9: set route(rewrite headers) --- config location /t { content_by_lua_block { @@ -349,7 +318,7 @@ passed -=== TEST 11: rewrite headers +=== TEST 10: rewrite headers --- request GET /hello HTTP/1.1 --- more_headers @@ -364,7 +333,7 @@ x-real-ip: 127.0.0.1 -=== TEST 12: set route(add headers) +=== TEST 11: set route(add headers) --- config location /t { content_by_lua_block { @@ -405,7 +374,7 @@ passed -=== TEST 13: add headers +=== TEST 12: add headers --- request GET /hello HTTP/1.1 --- response_body @@ -418,7 +387,7 @@ x-real-ip: 127.0.0.1 -=== TEST 14: set route(rewrite empty headers) +=== TEST 13: set route(rewrite empty headers) --- config location /t { content_by_lua_block { @@ -459,7 +428,7 @@ passed -=== TEST 15: rewrite empty headers +=== TEST 14: rewrite empty headers --- request GET /hello HTTP/1.1 --- more_headers @@ -474,7 +443,7 @@ x-real-ip: 127.0.0.1 -=== TEST 16: set route(rewrite uri args) +=== TEST 15: set route(rewrite uri args) --- config location /t { content_by_lua_block { @@ -512,7 +481,7 @@ passed -=== TEST 17: rewrite uri args +=== TEST 16: rewrite uri args --- request GET /hello?q=apisix&a=iresty HTTP/1.1 --- response_body @@ -524,7 +493,7 @@ q: apisix -=== TEST 18: set route(rewrite uri empty args) +=== TEST 17: set route(rewrite uri empty args) --- config location /t { content_by_lua_block { @@ -562,7 +531,7 @@ passed -=== TEST 19: rewrite uri empty args +=== TEST 18: rewrite uri empty args --- request GET /hello HTTP/1.1 --- response_body @@ -572,7 +541,7 @@ uri: /plugin_proxy_rewrite_args -=== TEST 20: remove header +=== TEST 19: remove header --- config location /t { content_by_lua_block { @@ -614,7 +583,7 @@ passed -=== TEST 21: remove header +=== TEST 20: remove header --- request GET /hello HTTP/1.1 --- more_headers @@ -630,7 +599,7 @@ x-real-ip: 127.0.0.1 -=== TEST 22: set route(only using regex_uri) +=== TEST 21: set route(only using regex_uri) --- config location /t { content_by_lua_block { @@ -668,7 +637,7 @@ passed -=== TEST 23: hit route(rewrite uri using regex_uri) +=== TEST 22: hit route(rewrite uri using regex_uri) --- request GET /test/plugin/proxy/rewrite HTTP/1.1 --- response_body @@ -680,7 +649,7 @@ scheme: http -=== TEST 24: hit route(404 not found) +=== TEST 23: hit route(404 not found) --- request GET /test/not/found HTTP/1.1 --- error_code: 404 @@ -689,7 +658,7 @@ GET /test/not/found HTTP/1.1 -=== TEST 25: set route(Using both uri and regex_uri) +=== TEST 24: set route(Using both uri and regex_uri) --- config location /t { content_by_lua_block { @@ -728,7 +697,7 @@ passed -=== TEST 26: hit route(rewrite uri using uri & regex_uri property) +=== TEST 25: hit route(rewrite uri using uri & regex_uri property) --- request GET /test/hello HTTP/1.1 --- response_body @@ -738,7 +707,7 @@ hello world -=== TEST 27: set route(invalid regex_uri) +=== TEST 26: set route(invalid regex_uri) --- config location /t { content_by_lua_block { @@ -776,7 +745,7 @@ GET /t -=== TEST 28: set route(invalid regex syntax for the first element) +=== TEST 27: set route(invalid regex syntax for the first element) --- config location /t { content_by_lua_block { @@ -816,7 +785,7 @@ qr/invalid regex_uri/ -=== TEST 29: set route(invalid regex syntax for the second element) +=== TEST 28: set route(invalid regex syntax for the second element) --- config location /t { content_by_lua_block { @@ -854,7 +823,7 @@ invalid capturing variable name found -=== TEST 30: set route(invalid uri) +=== TEST 29: set route(invalid uri) --- config location /t { content_by_lua_block { @@ -893,7 +862,7 @@ qr/failed to match pattern/ -=== TEST 31: wrong value of uri +=== TEST 30: wrong value of uri --- config location /t { content_by_lua_block { @@ -918,7 +887,7 @@ property "uri" validation failed: failed to match pattern "^\\/.*" with "home" -=== TEST 32: set route(invalid header field) +=== TEST 31: set route(invalid header field) --- config location /t { content_by_lua_block { @@ -962,7 +931,7 @@ header field: X-Api:Version -=== TEST 33: set route(invalid header value) +=== TEST 32: set route(invalid header value) --- config location /t { content_by_lua_block { @@ -1004,7 +973,7 @@ qr/invalid value character/ -=== TEST 34: set route(rewrite uri with args) +=== TEST 33: set route(rewrite uri with args) --- config location /t { content_by_lua_block { @@ -1042,7 +1011,7 @@ passed -=== TEST 35: rewrite uri with args +=== TEST 34: rewrite uri with args --- request GET /hello?a=iresty --- response_body_like eval @@ -1057,7 +1026,7 @@ q: apisix) -=== TEST 36: print the plugin `conf` in etcd, no dirty data +=== TEST 35: print the plugin `conf` in etcd, no dirty data --- config location /t { content_by_lua_block { @@ -1091,19 +1060,19 @@ q: apisix) end local resp_data = core.json.decode(body) - ngx.say(encode_with_keys_sorted(resp_data.node.value.plugins)) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) } } --- request GET /t --- response_body -{"proxy-rewrite":{"headers":{"X-Api":"v2"},"uri":"/uri/plugin_proxy_rewrite"}} +{"proxy-rewrite":{"headers":{"X-Api":"v2"},"uri":"/uri/plugin_proxy_rewrite","use_real_request_uri_unsafe":false}} --- no_error_log [error] -=== TEST 37: set route(header contains nginx variables) +=== TEST 36: set route(header contains nginx variables) --- config location /t { content_by_lua_block { @@ -1146,7 +1115,7 @@ passed -=== TEST 38: hit route(header supports nginx variables) +=== TEST 37: hit route(header supports nginx variables) --- request GET /hello?name=Bill HTTP/1.1 --- more_headers @@ -1164,7 +1133,7 @@ x-real-ip: 127.0.0.1 -=== TEST 39: set route(nginx variable does not exist) +=== TEST 38: set route(nginx variable does not exist) --- config location /t { content_by_lua_block { @@ -1208,7 +1177,7 @@ passed -=== TEST 40: hit route(get nginx variable is nil) +=== TEST 39: hit route(get nginx variable is nil) --- request GET /hello HTTP/1.1 --- response_body @@ -1221,7 +1190,7 @@ x-real-ip: 127.0.0.1 -=== TEST 41: set route(rewrite uri based on ctx.var) +=== TEST 40: set route(rewrite uri based on ctx.var) --- config location /t { content_by_lua_block { @@ -1259,7 +1228,7 @@ passed -=== TEST 42: hit route(upstream uri: should be /hello) +=== TEST 41: hit route(upstream uri: should be /hello) --- request GET /test?new_uri=hello --- response_body @@ -1269,7 +1238,7 @@ hello world -=== TEST 43: host with port +=== TEST 42: host with port --- config location /t { content_by_lua_block { @@ -1293,7 +1262,7 @@ done -=== TEST 44: set route(rewrite host with port) +=== TEST 43: set route(rewrite host with port) --- config location /t { content_by_lua_block { @@ -1333,7 +1302,7 @@ passed -=== TEST 45: rewrite host with port +=== TEST 44: rewrite host with port --- request GET /hello --- response_body diff --git a/t/plugin/proxy-rewrite2.t b/t/plugin/proxy-rewrite2.t index fcd4011bacec..4ba236f362fc 100644 --- a/t/plugin/proxy-rewrite2.t +++ b/t/plugin/proxy-rewrite2.t @@ -27,8 +27,10 @@ add_block_preprocessor(sub { my $yaml_config = $block->yaml_config // <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); @@ -46,34 +48,7 @@ run_tests; __DATA__ -=== TEST 1: rewrite scheme but the node doesn't have port ---- apisix_yaml -routes: - - - id: 1 - uri: /hello - upstream_id: 1 - plugins: - proxy-rewrite: - scheme: "https" - - - id: 2 - uri: /hello_chunked - upstream_id: 1 -upstreams: - - - id: 1 - nodes: - "127.0.0.1": 1 - type: roundrobin -#END ---- error_code: 503 ---- error_log -Can't detect upstream's scheme - - - -=== TEST 2: access $upstream_uri before proxy-rewrite +=== TEST 1: access $upstream_uri before proxy-rewrite --- apisix_yaml global_rules: - @@ -107,7 +82,7 @@ scheme: http -=== TEST 3: default X-Forwarded-Proto +=== TEST 2: default X-Forwarded-Proto --- apisix_yaml routes: - @@ -128,7 +103,7 @@ X-Forwarded-Proto: http -=== TEST 4: pass X-Forwarded-Proto +=== TEST 3: pass X-Forwarded-Proto --- apisix_yaml routes: - @@ -151,7 +126,7 @@ X-Forwarded-Proto: https -=== TEST 5: customize X-Forwarded-Proto +=== TEST 4: customize X-Forwarded-Proto --- apisix_yaml routes: - @@ -178,7 +153,7 @@ X-Forwarded-Proto: https -=== TEST 6: make sure X-Forwarded-Proto hit the `core.request.header` cache +=== TEST 5: make sure X-Forwarded-Proto hit the `core.request.header` cache --- apisix_yaml routes: - @@ -211,7 +186,7 @@ localhost -=== TEST 7: pass duplicate X-Forwarded-Proto +=== TEST 6: pass duplicate X-Forwarded-Proto --- apisix_yaml routes: - diff --git a/t/plugin/proxy-rewrite3.t b/t/plugin/proxy-rewrite3.t index f98de527fa3f..88e2a9db1cf4 100644 --- a/t/plugin/proxy-rewrite3.t +++ b/t/plugin/proxy-rewrite3.t @@ -51,7 +51,6 @@ __DATA__ "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", "method": "POST", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -97,7 +96,6 @@ plugin_proxy_rewrite get method: POST "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", "method": "GET", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -138,8 +136,7 @@ plugin_proxy_rewrite get method: GET local ok, err = plugin.check_schema({ uri = '/apisix/home', method = 'GET1', - host = 'apisix.iresty.com', - scheme = 'http' + host = 'apisix.iresty.com' }) if not ok then ngx.say(err) @@ -167,7 +164,6 @@ done "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", "method": "POST", - "scheme": "http", "host": "apisix.iresty.com", "headers":{ "x-api-version":"v1" @@ -200,3 +196,91 @@ passed GET /hello --- error_log plugin_proxy_rewrite get method: POST + + + +=== TEST 8: set route(unsafe uri not normalized at request) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/print_uri_detailed" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: unsafe uri not normalized at request +--- request +GET /print%5Furi%5Fdetailed HTTP/1.1 +--- response_body +ngx.var.uri: /print_uri_detailed +ngx.var.request_uri: /print%5Furi%5Fdetailed + + + +=== TEST 10: set route(safe uri not normalized at request) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/print_uri_detailed" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: safe uri not normalized at request +--- request +GET /print_uri_detailed HTTP/1.1 +--- response_body +ngx.var.uri: /print_uri_detailed +ngx.var.request_uri: /print_uri_detailed diff --git a/t/plugin/redirect.t b/t/plugin/redirect.t index 3b8d87afd787..4d4d4c5f7846 100644 --- a/t/plugin/redirect.t +++ b/t/plugin/redirect.t @@ -443,12 +443,13 @@ Location: https://foo.com:8443/hello -=== TEST 19: redirect(port using `apisix.ssl.listen_port`) +=== TEST 19: redirect(port using `apisix.ssl.listen`) --- yaml_config apisix: ssl: enable: true - listen_port: 9445 + listen: + - port: 9445 --- request GET /hello --- more_headers @@ -476,8 +477,8 @@ apisix: ssl: enable: true listen: - - 6443 - - 7443 + - port: 6443 + - port: 7443 - port: 8443 - port: 9443 --- request @@ -649,7 +650,7 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) @@ -1114,3 +1115,38 @@ X-Forwarded-Proto: http --- error_code: 301 --- response_headers Location: https://foo.com:9443/hello + + + +=== TEST 47: wrong configure, enable http_to_https with append_query_string +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "append_query_string": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/error_msg":"failed to check the configuration of plugin redirect err: only one of `http_to_https` and `append_query_string` can be configured."/ +--- no_error_log +[error] diff --git a/t/plugin/redirect2.t b/t/plugin/redirect2.t index ac840e6d4274..24f6f8ebf3fb 100644 --- a/t/plugin/redirect2.t +++ b/t/plugin/redirect2.t @@ -79,3 +79,32 @@ GET /test/hello?o=apache --- response_headers Location: http://test.com/hello?q=apisix&o=apache --- error_code: 302 + + + +=== TEST 3: compatible with old version configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "append_query_string": false + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/plugin/response-rewrite.t b/t/plugin/response-rewrite.t index ebde0d0580a6..d9283e3fe7d9 100644 --- a/t/plugin/response-rewrite.t +++ b/t/plugin/response-rewrite.t @@ -471,7 +471,7 @@ invalid base64 content end local resp_data = core.json.decode(body) - ngx.say(encode_with_keys_sorted(resp_data.node.value.plugins)) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) } } --- request @@ -699,3 +699,50 @@ X-A: 127.0.0.1 X-B: from 127.0.0.1 to 127.0.0.1:1980 --- no_error_log [error] + + + +=== TEST 25: set empty body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 26: hit set empty body +--- request +GET /hello +--- response_body +--- no_error_log +[error] diff --git a/t/plugin/response-rewrite2.t b/t/plugin/response-rewrite2.t index 48401f915308..e3209314632a 100644 --- a/t/plugin/response-rewrite2.t +++ b/t/plugin/response-rewrite2.t @@ -517,3 +517,181 @@ passed GET /hello --- response_body hello world + + + +=== TEST 19: schema check for headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {add = { + {"headers:"} + }}, + {remove = { + {"headers:"} + }}, + {set = { + {"headers"} + }}, + {set = { + {[""] = 1} + }}, + {set = { + {["a"] = true} + }}, + }) do + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({headers = case}) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } +} +--- response_body eval +"property \"headers\" validation failed: object matches none of the required\n" x 5 + + + +=== TEST 20: add headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Cache-Control: no-cache", + "Cache-Control : max-age=0, must-revalidate" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: hit +--- request +GET /hello +--- response_headers +Cache-Control: no-cache, max-age=0, must-revalidate + + + +=== TEST 22: set headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Cache-Control: no-cache" + ], + "set": { + "Cache-Control": "max-age=0, must-revalidate" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: hit +--- request +GET /hello +--- response_headers +Cache-Control: max-age=0, must-revalidate + + + +=== TEST 24: remove headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Set-Cookie: =; Max-Age=" + ], + "set": { + "Cache-Control": "max-age=0, must-revalidate" + }, + "remove": [ + "Set-Cookie", + "Cache-Control" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: hit +--- request +GET /hello +--- response_headers +Cache-Control: +Set-Cookie: diff --git a/t/plugin/rocketmq-logger2.t b/t/plugin/rocketmq-logger2.t index 286d3cad4fe0..60178612ee5e 100644 --- a/t/plugin/rocketmq-logger2.t +++ b/t/plugin/rocketmq-logger2.t @@ -208,9 +208,8 @@ qr/failed to send data to rocketmq topic: .*, nameserver_list: \{"127.0.0.127":9 content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_metadata/rocketmq-logger', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]]) + ngx.HTTP_DELETE + ) } } --- response_body diff --git a/t/plugin/tencent-cloud-cls.t b/t/plugin/tencent-cloud-cls.t new file mode 100644 index 000000000000..14006bbd7e9f --- /dev/null +++ b/t/plugin/tencent-cloud-cls.t @@ -0,0 +1,330 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 10420; + location /structuredlog { + content_by_lua_block { + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "tencent-cloud-cls body: ", data) + for k, v in pairs(headers) do + ngx.log(ngx.WARN, "tencent-cloud-cls headers: " .. k .. ":" .. v) + end + ngx.say("ok") + } + } + } + server { + listen 10421; + location /structuredlog { + content_by_lua_block { + ngx.exit(500) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local ok, err = plugin.check_schema({ + cls_host = "ap-guangzhou.cls.tencentyun.com", + cls_topic = "143b5d70-139b-4aec-b54e-bb97756916de", + secret_id = "secret_id", + secret_key = "secret_key", + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: cls config missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local ok, err = plugin.check_schema({ + cls_host = "ap-guangzhou.cls.tencentyun.com", + cls_topic = "143b5d70-139b-4aec-b54e-bb97756916de", + secret_id = "secret_id", + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "secret_key" is required +done + + + +=== TEST 3: add plugin for incorrect server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10421", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: incorrect server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] failed to process entries [1/1]: got wrong status: 500 +--- wait: 0.5 + + + +=== TEST 5: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10420", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: access local server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 7: verify request +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_to_cls = function(self, logs) + if (#logs ~= 1) then + ngx.log(ngx.ERR, "unexpected logs length: ", #logs) + return + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 8: verify cls api request +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_cls_request = function(self, pb_obj) + if (#pb_obj.logGroupList ~= 1) then + ngx.log(ngx.ERR, "unexpected logGroupList length: ", #pb_obj.logGroupList) + return false + end + local log_group = pb_obj.logGroupList[1] + if #log_group.logs ~= 1 then + ngx.log(ngx.ERR, "unexpected logs length: ", #log_group.logs) + return false + end + local log = log_group.logs[1] + if #log.contents == 0 then + ngx.log(ngx.ERR, "unexpected contents length: ", #log.contents) + return false + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing + + + +=== TEST 9: plugin metadata +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/tencent-cloud-cls', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: log use log_format +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_cls_request = function(self, pb_obj) + if (#pb_obj.logGroupList ~= 1) then + ngx.log(ngx.ERR, "unexpected logGroupList length: ", #pb_obj.logGroupList) + return false + end + local log_group = pb_obj.logGroupList[1] + if #log_group.logs ~= 1 then + ngx.log(ngx.ERR, "unexpected logs length: ", #log_group.logs) + return false + end + local log = log_group.logs[1] + if #log.contents == 0 then + ngx.log(ngx.ERR, "unexpected contents length: ", #log.contents) + return false + end + local has_host, has_timestamp, has_client_ip = false, false, false + for i, tag in ipairs(log.contents) do + if tag.key == "host" then + has_host = true + end + if tag.key == "@timestamp" then + has_timestamp = true + end + if tag.key == "client_ip" then + has_client_ip = true + end + end + if not(has_host and has_timestamp and has_client_ip) then + return false + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing +--- wait: 0.5 diff --git a/t/plugin/traffic-split2.t b/t/plugin/traffic-split2.t index 41bee399d689..c41886283d56 100644 --- a/t/plugin/traffic-split2.t +++ b/t/plugin/traffic-split2.t @@ -744,7 +744,6 @@ qr/1980, 1981, 1982, 1980, 1981, 1982, 1980, 1981, 1982/ ngx.say(body) } } ---- skip_nginx: 5: < 1.19.0 --- response_body passed @@ -753,7 +752,6 @@ passed === TEST 19: hit route --- request GET /uri?id=1 ---- skip_nginx: 5: < 1.19.0 --- response_body eval qr/host: 127.0.0.1/ --- error_log diff --git a/t/plugin/ua-restriction.t b/t/plugin/ua-restriction.t index 82e665894655..0e8a9544bd34 100644 --- a/t/plugin/ua-restriction.t +++ b/t/plugin/ua-restriction.t @@ -725,7 +725,9 @@ hello world "denylist": [ "foo" ], - "disable": true + "_meta": { + "disable": true + } } } }]] diff --git a/t/plugin/wolf-rbac.t b/t/plugin/wolf-rbac.t index 954f9c1ca114..6e0fb0d7dd16 100644 --- a/t/plugin/wolf-rbac.t +++ b/t/plugin/wolf-rbac.t @@ -115,12 +115,12 @@ done for _, data in ipairs(data) do local code, body = t(data.url, ngx.HTTP_PUT, data.data) - ngx.say(code..body) + ngx.say(body) end } } --- response_body eval -"201passed\n" x 3 +"passed\n" x 3 @@ -342,17 +342,17 @@ x-rbac-token: V1#invalid-appid#rbac-token === TEST 16: verify: failed --- request GET /hello1 ---- error_code: 401 +--- error_code: 403 --- more_headers x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token --- response_body -{"message":"Invalid user permission"} +{"message":"ERR_ACCESS_DENIED"} --- grep_error_log eval -qr/no permission to access */ +qr/ERR_ACCESS_DENIED */ --- grep_error_log_out -no permission to access -no permission to access -no permission to access +ERR_ACCESS_DENIED +ERR_ACCESS_DENIED +ERR_ACCESS_DENIED @@ -545,3 +545,36 @@ location /t { } --- response_body_like eval qr/success to change password/ + + + +=== TEST 29: verify: failed, server internal error +--- request +GET /hello/500 +--- error_code: 500 +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_body +{"message":"request to wolf-server failed, status:500"} +--- grep_error_log eval +qr/request to wolf-server failed, status:500 */ +--- grep_error_log_out +request to wolf-server failed, status:500 +request to wolf-server failed, status:500 + + + +=== TEST 30: verify: failed, token is expired +--- request +GET /hello/401 +--- error_code: 401 +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_body +{"message":"ERR_TOKEN_INVALID"} +--- grep_error_log eval +qr/ERR_TOKEN_INVALID */ +--- grep_error_log_out +ERR_TOKEN_INVALID +ERR_TOKEN_INVALID +ERR_TOKEN_INVALID diff --git a/t/plugin/workflow.t b/t/plugin/workflow.t new file mode 100644 index 000000000000..e1bf77a1f26c --- /dev/null +++ b/t/plugin/workflow.t @@ -0,0 +1,689 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.workflow") + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + status = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = "403" + } + } + } + } + } + }, + { + rules = { + { + case = { + + }, + actions = { + { + "return", + { + code = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "fake", + { + code = 403 + } + } + } + } + } + } + } + + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body +done +property "rules" validation failed: failed to validate item 1: property "actions" is required +property "rules" validation failed: failed to validate item 1: property "actions" validation failed: failed to validate item 1: expect array to have at least 1 items +failed to validate the 'return' action: property "code" is required +failed to validate the 'return' action: property "code" validation failed: wrong type: expected integer, got string +property "rules" validation failed: failed to validate item 1: property "case" validation failed: expect array to have at least 1 items +unsupported action: fake + + + +=== TEST 2: set plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: trigger workflow +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 4: multiple conditions in one case +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"], + ["arg_foo", "==", "bar"] + ], + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: missing match the only case +--- request +GET /hello?foo=bad + + + +=== TEST 6: trigger workflow +--- request +GET /hello?foo=bar +--- error_code: 403 +--- response_body +{"error_msg":"rejected by workflow"} + + + +=== TEST 7: multiple cases with different actions +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello2"} + }, + actions = { + { + "return", + { + code = 401 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: trigger one case +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 9: trigger another case +--- request +GET /hello2 +--- error_code: 401 + + + +=== TEST 10: match case in order +# rules is an array, match in the order of the index of the array, +# when cases are matched, actions are executed and do not continue +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"arg_foo", "==", "bar"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 401 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: both case 1&2 matched, trigger the first cases +--- request +GET /hello?foo=bar +--- error_code: 403 + + + +=== TEST 12: case 1 mismatched, trigger the second cases +--- request +GET /hello?foo=bad +--- error_code: 401 + + + +=== TEST 13: all cases mismatched, pass to upstream +--- request +GET /hello1 +--- response_body +hello1 world + + + +=== TEST 14: schema check(limit-count) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.workflow") + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2, time_window = 60, rejected_code = 503, key = 'remote_addr'} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {time_window = 60} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + group = "services_1" + } + } + } + } + } + } + } + + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body +done +failed to validate the 'limit-count' action: property "time_window" is required +failed to validate the 'limit-count' action: property "count" is required +failed to validate the 'limit-count' action: group is not supported + + + +=== TEST 15: set actions as limit-count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "limit-count", + { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503] + + + +=== TEST 17: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: cross-hit case 1 and case 2, up limit by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1", +"GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200, 200, 200, 503, 503] diff --git a/t/plugin/workflow2.t b/t/plugin/workflow2.t new file mode 100644 index 000000000000..b30567532832 --- /dev/null +++ b/t/plugin/workflow2.t @@ -0,0 +1,285 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: multiple cases with different actions(return & limit-count) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello1"] +--- error_code eval +[403, 200, 503] + + + +=== TEST 3: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 5: cross-hit case 1 and case 2, up limit by isolation 2 +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 6: different actions with different limit count conf, up limit by isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: case 1 up limit, case 2 psssed +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 200] diff --git a/t/plugin/zipkin2.t b/t/plugin/zipkin2.t index 3175075d32ed..8423f6f67d63 100644 --- a/t/plugin/zipkin2.t +++ b/t/plugin/zipkin2.t @@ -98,6 +98,7 @@ passed b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-1-05e3ac9a4f6e3b90 --- response_headers x-b3-sampled: 1 +x-b3-traceid: 80f198ee56343ba864fe8b2a57d3eff7 --- raw_response_headers_unlike b3: --- error_log @@ -124,6 +125,9 @@ invalid b3 header b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-0-05e3ac9a4f6e3b90 --- response_headers x-b3-sampled: 0 +x-b3-traceid: 80f198ee56343ba864fe8b2a57d3eff7 +x-b3-parentspanid: 05e3ac9a4f6e3b90 +x-b3-spanid: e457b5a2e4d86bd1 @@ -132,6 +136,9 @@ x-b3-sampled: 0 b3: 0 --- response_headers x-b3-sampled: 0 +x-b3-traceid: +x-b3-parentspanid: +x-b3-spanid: diff --git a/t/router/multi-ssl-certs.t b/t/router/multi-ssl-certs.t index 1c302f9ed134..1bfb4d36bef9 100644 --- a/t/router/multi-ssl-certs.t +++ b/t/router/multi-ssl-certs.t @@ -36,17 +36,14 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "www.test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "www.test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -183,17 +180,14 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} - local code, body = t.test('/apisix/admin/ssl/2', + local code, body = t.test('/apisix/admin/ssls/2', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test2.com" - }, - "key": "/apisix/ssl/2" + "value": { + "sni": "*.test2.com" }, - "action": "set" + "key": "/apisix/ssls/2" }]] ) @@ -268,17 +262,14 @@ location /t { local ssl_key = t.read_file("t/certs/apisix_admin_ssl.key") local data = {cert = ssl_cert, key = ssl_key, sni = "apisix.dev"} - local code, body = t.test('/apisix/admin/ssl/3', + local code, body = t.test('/apisix/admin/ssls/3', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "apisix.dev" - }, - "key": "/apisix/ssl/3" + "value": { + "sni": "apisix.dev" }, - "action": "set" + "key": "/apisix/ssls/3" }]] ) @@ -349,9 +340,9 @@ location /t { local core = require("apisix.core") local t = require("lib.test_admin") - t.test('/apisix/admin/ssl/1', ngx.HTTP_DELETE) - t.test('/apisix/admin/ssl/2', ngx.HTTP_DELETE) - t.test('/apisix/admin/ssl/3', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/2', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/3', ngx.HTTP_DELETE) } } diff --git a/t/router/radixtree-host-uri-priority.t b/t/router/radixtree-host-uri-priority.t index a05b619fa065..3fd214668533 100644 --- a/t/router/radixtree-host-uri-priority.t +++ b/t/router/radixtree-host-uri-priority.t @@ -25,11 +25,13 @@ no_shuffle(); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false router: http: 'radixtree_host_uri' admin_key: null +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ run_tests(); @@ -65,7 +67,7 @@ Host: test.com --- response_body eval qr/1980/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -100,7 +102,7 @@ Host: test.com --- response_body eval qr/1981/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -134,7 +136,7 @@ Host: test.com --- response_body eval qr/1980/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -168,6 +170,6 @@ Host: test.com --- response_body eval qr/1981/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] diff --git a/t/router/radixtree-host-uri.t b/t/router/radixtree-host-uri.t index 63e07d4b118d..098a6c23b5af 100644 --- a/t/router/radixtree-host-uri.t +++ b/t/router/radixtree-host-uri.t @@ -27,7 +27,6 @@ apisix: node_listen: 1984 router: http: 'radixtree_host_uri' - admin_key: null _EOC_ run_tests(); diff --git a/t/router/radixtree-host-uri2.t b/t/router/radixtree-host-uri2.t index 313bc6a1444f..4d60072168eb 100644 --- a/t/router/radixtree-host-uri2.t +++ b/t/router/radixtree-host-uri2.t @@ -25,11 +25,13 @@ no_shuffle(); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false router: http: 'radixtree_host_uri' admin_key: null +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ run_tests(); @@ -62,7 +64,7 @@ Host: test.com --- response_body eval qr/1981/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -94,7 +96,7 @@ Host: www.test.com --- response_body eval qr/1981/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -126,7 +128,7 @@ Host: www.test.com --- response_body eval qr/1981/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -159,7 +161,7 @@ Host: www.test.com --- response_body eval qr/1980/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] @@ -192,7 +194,7 @@ Host: www.test.com --- response_body eval qr/1981/ --- error_log -use config_center: yaml +use config_provider: yaml --- no_error_log [error] diff --git a/t/router/radixtree-host-uri3.t b/t/router/radixtree-host-uri3.t index 9fa14c22fbf8..2db4bb437c1e 100644 --- a/t/router/radixtree-host-uri3.t +++ b/t/router/radixtree-host-uri3.t @@ -21,7 +21,6 @@ apisix: node_listen: 1984 router: http: 'radixtree_host_uri' - admin_key: null _EOC_ add_block_preprocessor(sub { diff --git a/t/router/radixtree-sni.t b/t/router/radixtree-sni.t index b8494d315b03..c1b781473f41 100644 --- a/t/router/radixtree-sni.t +++ b/t/router/radixtree-sni.t @@ -36,19 +36,16 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "www.test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "www.test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -79,7 +76,7 @@ passed }, "uri": "/hello" }]] - ) + ) if code >= 300 then ngx.status = code @@ -223,19 +220,16 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -337,19 +331,16 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -451,19 +442,16 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test2.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.test2.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -579,19 +567,16 @@ location /t { local data = {status = 0} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, core.json.encode(data), [[{ - "node": { - "value": { - "status": 0 - }, - "key": "/apisix/ssl/1" + "value": { + "status": 0 }, - "action": "compareAndSwap" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -660,19 +645,16 @@ location /t { local data = {status = 1} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, core.json.encode(data), [[{ - "node": { - "value": { - "status": 1 - }, - "key": "/apisix/ssl/1" + "value": { + "status": 1 }, - "action": "compareAndSwap" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -744,19 +726,16 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "snis": ["test2.com", "*.test2.com"] - }, - "key": "/apisix/ssl/1" + "value": { + "snis": ["test2.com", "*.test2.com"] }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -874,7 +853,7 @@ location /t { local ssl_key = t.aes_encrypt(t.read_file("t/certs/test2.key")) local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/router/radixtree-sni2.t b/t/router/radixtree-sni2.t index 83d1187dc0b5..b0940a1a1646 100644 --- a/t/router/radixtree-sni2.t +++ b/t/router/radixtree-sni2.t @@ -45,17 +45,14 @@ location /t { sni = "test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) ngx.status = code @@ -167,19 +164,16 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test2.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.test2.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -270,10 +264,10 @@ location /t { key = raw_ssl_key, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) - ) + ) ngx.status = code ngx.print(body) @@ -298,7 +292,7 @@ location /t { local ssl_key = t.read_file("t/certs/incorrect.key") local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) @@ -412,7 +406,7 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.TesT2.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) @@ -477,7 +471,7 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, snis = {"TesT2.com", "a.com"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/router/radixtree-uri-keep-end-slash.t b/t/router/radixtree-uri-keep-end-slash.t index d51ac0755e13..01225e43961e 100644 --- a/t/router/radixtree-uri-keep-end-slash.t +++ b/t/router/radixtree-uri-keep-end-slash.t @@ -26,7 +26,6 @@ our $yaml_config = <<_EOC_; apisix: node_listen: 1984 delete_uri_tail_slash: true - admin_key: null _EOC_ run_tests(); diff --git a/t/router/radixtree-uri-sanity.t b/t/router/radixtree-uri-sanity.t index d49285ff1c93..ac9ab5a1ee1e 100644 --- a/t/router/radixtree-uri-sanity.t +++ b/t/router/radixtree-uri-sanity.t @@ -25,7 +25,6 @@ no_shuffle(); our $servlet_yaml_config = <<_EOC_; apisix: node_listen: 1984 - admin_key: null normalize_uri_like_servlet: true _EOC_ diff --git a/t/router/radixtree-uri-with-parameter.t b/t/router/radixtree-uri-with-parameter.t index d8fa0950c235..00686e996918 100644 --- a/t/router/radixtree-uri-with-parameter.t +++ b/t/router/radixtree-uri-with-parameter.t @@ -25,7 +25,6 @@ no_shuffle(); our $yaml_config = <<_EOC_; apisix: node_listen: 1984 - admin_key: null router: http: 'radixtree_uri_with_parameter' _EOC_ @@ -59,21 +58,18 @@ __DATA__ "uri": "/name/:name/bar" }]], [[{ - "node": { - "value": { - "uri": "/name/:name/bar", - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/name/:name/bar", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] - ) + ) if code >= 300 then ngx.status = code @@ -162,21 +158,18 @@ qr/404 Not Found/ "uri": "/:name/foo" }]], [[{ - "node": { - "value": { - "uri": "/:name/foo", - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/:name/foo", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] - ) + ) if code >= 300 then ngx.status = code @@ -221,11 +214,11 @@ GET /json/bbb/foo content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "hosts": ["bar.com"] }]] - ) + ) if code >= 300 then ngx.status = code @@ -234,8 +227,8 @@ GET /json/bbb/foo end local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "methods": ["GET"], "upstream": { "nodes": { @@ -249,7 +242,7 @@ GET /json/bbb/foo "service_id": "1", "uri": "/:name/hello" }]] - ) + ) if code >= 300 then ngx.status = code @@ -258,8 +251,8 @@ GET /json/bbb/foo end local code, body = t('/apisix/admin/routes/2', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "methods": ["GET"], "upstream": { "nodes": { @@ -273,7 +266,7 @@ GET /json/bbb/foo "uri": "/:name/hello", "priority": -1 }]] - ) + ) if code >= 300 then ngx.status = code diff --git a/t/stream-node/mtls.t b/t/stream-node/mtls.t index 3caad2c1b7ee..35dcfc4fd626 100644 --- a/t/stream-node/mtls.t +++ b/t/stream-node/mtls.t @@ -100,7 +100,7 @@ __DATA__ depth = 2, } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/stream-node/priority-balancer.t b/t/stream-node/priority-balancer.t index e25bdc3f97d7..00fc91cbbe23 100644 --- a/t/stream-node/priority-balancer.t +++ b/t/stream-node/priority-balancer.t @@ -30,8 +30,10 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/stream-node/sanity.t b/t/stream-node/sanity.t index f42b89ca9832..06e809dfd25a 100644 --- a/t/stream-node/sanity.t +++ b/t/stream-node/sanity.t @@ -373,7 +373,6 @@ GET /t passed --- no_error_log [error] ---- skip_nginx: 5: < 1.19.0 @@ -384,7 +383,6 @@ mmm hello world --- no_error_log [error] ---- skip_nginx: 5: < 1.19.0 diff --git a/t/stream-node/sni.t b/t/stream-node/sni.t index 0d71313640bb..29181527902c 100644 --- a/t/stream-node/sni.t +++ b/t/stream-node/sni.t @@ -46,7 +46,7 @@ __DATA__ cert = ssl_cert, key = ssl_key, sni = "*.test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/stream-node/tls.t b/t/stream-node/tls.t index 2f3016b55830..7e9568c4d94a 100644 --- a/t/stream-node/tls.t +++ b/t/stream-node/tls.t @@ -46,7 +46,7 @@ __DATA__ cert = ssl_cert, key = ssl_key, sni = "test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/stream-node/upstream-tls.t b/t/stream-node/upstream-tls.t index a9fce58a4c53..e721d4c354b7 100644 --- a/t/stream-node/upstream-tls.t +++ b/t/stream-node/upstream-tls.t @@ -118,7 +118,7 @@ hello apisix_backend cert = ssl_cert, key = ssl_key, sni = "test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/stream-plugin/mqtt-proxy.t b/t/stream-plugin/mqtt-proxy.t index 4a59e376d362..69403f380f85 100644 --- a/t/stream-plugin/mqtt-proxy.t +++ b/t/stream-plugin/mqtt-proxy.t @@ -39,12 +39,19 @@ __DATA__ "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { "host": "127.0.0.1", - "port": 1995 + "port": 1995, + "weight": 1 } - } + ] } }]] ) @@ -132,7 +139,7 @@ match(): not hit any route -=== TEST 6: check schema +=== TEST 6: set route with host --- config location /t { content_by_lua_block { @@ -145,51 +152,22 @@ match(): not hit any route "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "host": "127.0.0.1" - } + "protocol_level": 4 } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.print(body) - } - } ---- request -GET /t ---- error_code: 400 ---- response_body -{"error_msg":"failed to check the configuration of stream plugin [mqtt-proxy]: property \"upstream\" validation failed: value should match only one schema, but matches none"} - - - -=== TEST 7: set route with host ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/stream_routes/1', - ngx.HTTP_PUT, - [[{ - "remote_addr": "127.0.0.1", - "server_port": 1985, - "plugins": { - "mqtt-proxy": { - "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { "host": "localhost", - "port": 1995 + "port": 1995, + "weight": 1 } - } + ] } }]] - ) + ) if code >= 300 then ngx.status = code @@ -206,7 +184,7 @@ passed -=== TEST 8: hit route +=== TEST 7: hit route --- stream_request eval "\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" --- stream_response @@ -216,54 +194,7 @@ hello world -=== TEST 9: set route with invalid host ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/stream_routes/1', - ngx.HTTP_PUT, - [[{ - "remote_addr": "127.0.0.1", - "server_port": 1985, - "plugins": { - "mqtt-proxy": { - "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "host": "loc", - "port": 1995 - } - } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed ---- no_error_log -[error] - - - -=== TEST 10: hit route ---- stream_request eval -"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" ---- error_log -failed to parse domain: loc, error: ---- timeout: 10 - - - -=== TEST 11: set route with upstream +=== TEST 8: set route with upstream --- config location /t { content_by_lua_block { @@ -305,7 +236,7 @@ passed -=== TEST 12: hit route +=== TEST 9: hit route --- stream_request eval "\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" --- stream_response @@ -319,7 +250,7 @@ mqtt client id: foo -=== TEST 13: hit route with empty client id +=== TEST 10: hit route with empty client id --- stream_request eval "\x10\x0c\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x00" --- stream_response @@ -332,7 +263,7 @@ qr/mqtt client id: \w+/ -=== TEST 14: MQTT 5 +=== TEST 11: MQTT 5 --- config location /t { content_by_lua_block { @@ -374,7 +305,7 @@ passed -=== TEST 15: hit route with empty property +=== TEST 12: hit route with empty property --- stream_request eval "\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" --- stream_response @@ -387,7 +318,7 @@ qr/mqtt client id: \w+/ -=== TEST 16: hit route with property +=== TEST 13: hit route with property --- stream_request eval "\x10\x1b\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x05\x11\x00\x00\x0e\x10\x00\x09\x63\x6c\x69\x6e\x74\x2d\x31\x31\x31" --- stream_response @@ -401,7 +332,7 @@ mqtt client id: clint-111 -=== TEST 17: balance with mqtt_client_id +=== TEST 14: balance with mqtt_client_id --- config location /t { content_by_lua_block { @@ -451,7 +382,7 @@ passed -=== TEST 18: hit route with empty id +=== TEST 15: hit route with empty id --- stream_request eval "\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" --- stream_response @@ -465,7 +396,7 @@ proxy request to 127.0.0.1:1995 -=== TEST 19: hit route with different client id, part 1 +=== TEST 16: hit route with different client id, part 1 --- stream_request eval "\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x66" --- stream_response @@ -480,7 +411,7 @@ proxy request to 0.0.0.0:1995 -=== TEST 20: hit route with different client id, part 2 +=== TEST 17: hit route with different client id, part 2 --- stream_request eval "\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x67" --- stream_response diff --git a/t/stream-plugin/mqtt-proxy2.t b/t/stream-plugin/mqtt-proxy2.t new file mode 100644 index 000000000000..e387b26dce1a --- /dev/null +++ b/t/stream-plugin/mqtt-proxy2.t @@ -0,0 +1,79 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: set route with invalid host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "loc", + "port": 1995, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: hit route +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- error_log +failed to parse domain: loc, error: +--- timeout: 10 diff --git a/t/tars/discovery/tars.t b/t/tars/discovery/tars.t index da85fb3f5cea..9cc1606643f7 100644 --- a/t/tars/discovery/tars.t +++ b/t/tars/discovery/tars.t @@ -28,8 +28,11 @@ add_block_preprocessor(sub { my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: yaml enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml discovery: tars: db_conf: @@ -51,9 +54,8 @@ _EOC_ $block->set_value("apisix_yaml", $apisix_yaml); - my $extra_init_by_lua = <<_EOC_; + my $extra_init_by_lua_start = <<_EOC_; -- reduce incremental_fetch_interval,full_fetch_interval - local core = require("apisix.core") local schema = require("apisix.discovery.tars.schema") schema.properties.incremental_fetch_interval.minimum=1 schema.properties.incremental_fetch_interval.default=1 @@ -61,7 +63,7 @@ _EOC_ schema.properties.full_fetch_interval.default = 3 _EOC_ - $block->set_value("extra_init_by_lua", $extra_init_by_lua); + $block->set_value("extra_init_by_lua_start", $extra_init_by_lua_start); my $config = $block->config // <<_EOC_; location /count { diff --git a/t/xds-library/config_xds.t b/t/xds-library/config_xds.t index 8df413aa3082..b82b7d9c5b07 100644 --- a/t/xds-library/config_xds.t +++ b/t/xds-library/config_xds.t @@ -64,8 +64,10 @@ _EOC_ my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: xds - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: xds _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/t/xds-library/config_xds_2.t b/t/xds-library/config_xds_2.t index 67629d4bcbed..bfd9fd0d5f94 100644 --- a/t/xds-library/config_xds_2.t +++ b/t/xds-library/config_xds_2.t @@ -46,8 +46,10 @@ _EOC_ my $yaml_config = <<_EOC_; apisix: node_listen: 1984 - config_center: xds - enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: xds _EOC_ $block->set_value("yaml_config", $yaml_config); diff --git a/utils/create-ssl.py b/utils/create-ssl.py index 93f206819c97..e8a3daa33b73 100755 --- a/utils/create-ssl.py +++ b/utils/create-ssl.py @@ -30,7 +30,7 @@ key = f.read() sni = sys.argv[3] api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ "cert": cert, "key": key, "snis": [sni], diff --git a/utils/gen-vote-contents.sh b/utils/gen-vote-contents.sh index 87ddedbc18d8..d644dfd6bedf 100755 --- a/utils/gen-vote-contents.sh +++ b/utils/gen-vote-contents.sh @@ -73,7 +73,7 @@ tar zxvf apache-apisix-$VERSION-src.tgz 4. Build Apache APISIX: -https://github.com/apache/apisix/blob/release/$BLOB_VERSION/docs/en/latest/installation-guide.md#installation-via-source-release-package +https://github.com/apache/apisix/blob/release/$BLOB_VERSION/docs/en/latest/building-apisix.md#building-apisix-from-source The vote will be open for at least 72 hours or until necessary number of votes are reached. diff --git a/utils/linux-install-openresty.sh b/utils/linux-install-openresty.sh index 7498da3ab381..c97454e3e26e 100755 --- a/utils/linux-install-openresty.sh +++ b/utils/linux-install-openresty.sh @@ -48,4 +48,4 @@ else openresty="openresty-debug=$OPENRESTY_VERSION*" fi -sudo apt-get install "$openresty" lua5.1 liblua5.1-0-dev openresty-openssl111-debug-dev libldap2-dev +sudo apt-get install "$openresty" openresty-openssl111-debug-dev libldap2-dev