From cb9c34bbd87e2a3c7772b6e5fca0e4479a95d44c Mon Sep 17 00:00:00 2001 From: Yanks Yoon <37652070+yanksyoon@users.noreply.github.com> Date: Wed, 26 Jun 2024 20:40:36 +0900 Subject: [PATCH] Feat/private endpoint (#284) Add support for private endpoint. * Integration test for openstack integration --- .github/workflows/e2e_test.yaml | 5 +- .github/workflows/e2e_test_openstack.yaml | 462 ------------------ .github/workflows/e2e_test_run.yaml | 15 + .github/workflows/integration_test.yaml | 19 +- .github/workflows/manual_test_env.yaml | 24 - .github/workflows/test.yaml | 4 +- pyproject.toml | 1 - scripts/setup-lxd.sh | 8 + src-docs/charm_state.py.md | 36 +- src/charm_state.py | 4 + src/openstack_cloud/openstack_manager.py | 2 +- tests/conftest.py | 9 + tests/integration/conftest.py | 169 +++++-- tests/integration/data/clouds.yaml.tmpl | 10 + tests/integration/helpers/common.py | 3 +- tests/integration/helpers/openstack.py | 53 +- tests/integration/test_charm_base_image.py | 6 +- tests/integration/test_charm_fork_repo.py | 2 - ...one_runner.py => test_charm_lxd_runner.py} | 108 +--- .../integration/test_charm_metrics_failure.py | 43 +- .../integration/test_charm_metrics_success.py | 17 +- tests/integration/test_charm_runner.py | 123 +++++ tests/integration/test_charm_upgrade.py | 2 +- tests/integration/test_e2e.py | 62 +++ .../integration/test_openstack_base_image.py | 57 --- .../integration/test_openstack_one_runner.py | 60 --- tests/integration/test_openstack_runner.py | 168 ------- tests/unit/test_charm.py | 4 +- tests/unit/test_openstack_manager.py | 279 ++++++++++- 29 files changed, 762 insertions(+), 993 deletions(-) delete mode 100644 .github/workflows/e2e_test_openstack.yaml delete mode 100644 .github/workflows/manual_test_env.yaml create mode 100644 scripts/setup-lxd.sh create mode 100644 tests/integration/data/clouds.yaml.tmpl rename tests/integration/{test_charm_one_runner.py => test_charm_lxd_runner.py} (65%) create mode 100644 tests/integration/test_charm_runner.py create mode 100644 tests/integration/test_e2e.py delete mode 100644 tests/integration/test_openstack_base_image.py delete mode 100644 tests/integration/test_openstack_one_runner.py delete mode 100644 tests/integration/test_openstack_runner.py diff --git a/.github/workflows/e2e_test.yaml b/.github/workflows/e2e_test.yaml index 938536335..7b2d68415 100644 --- a/.github/workflows/e2e_test.yaml +++ b/.github/workflows/e2e_test.yaml @@ -1,9 +1,7 @@ name: End-to-End Test on: - # TODO: Re-enable - # pull_request: - workflow_dispatch: + pull_request: jobs: build-charm: @@ -454,6 +452,7 @@ jobs: uses: ./.github/workflows/e2e_test_run.yaml with: runner-tag: "pr-${{ needs.run-id.outputs.run-id }}${{ github.run_attempt}}" + runner-virt-type: "lxd" required_status_checks: name: Required E2E Test Status Checks diff --git a/.github/workflows/e2e_test_openstack.yaml b/.github/workflows/e2e_test_openstack.yaml deleted file mode 100644 index 0c66e2c9c..000000000 --- a/.github/workflows/e2e_test_openstack.yaml +++ /dev/null @@ -1,462 +0,0 @@ -name: End-to-End Openstack Test - -on: - pull_request: - -jobs: - build-charm: - name: Build Charm - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Remove Unnecessary Components - run: | - rm -rf .git - rm -rf .github - - - name: Write lxd-profile.yaml - run: | - cat << EOF > ./lxd-profile.yaml - config: - security.nesting: true - security.privileged: true - raw.lxc: | - lxc.apparmor.profile=unconfined - lxc.mount.auto=proc:rw sys:rw cgroup:rw - lxc.cgroup.devices.allow=a - lxc.cap.drop= - devices: - kmsg: - path: /dev/kmsg - source: /dev/kmsg - type: unix-char - EOF - - - name: Cache github-runner Charm - uses: actions/cache@v4 - id: cache-charm - with: - path: github-runner_ubuntu-22.04-amd64.charm - key: github-runner-charm-${{ hashFiles('**/*') }} - - - name: Setup LXD - if: steps.cache-charm.outputs.cache-hit != 'true' - uses: canonical/setup-lxd@main - - - name: Install charmcraft - if: steps.cache-charm.outputs.cache-hit != 'true' - run: sudo snap install charmcraft --classic - - - name: Pack github-runner Charm - if: steps.cache-charm.outputs.cache-hit != 'true' - run: charmcraft pack || ( cat ~/.local/state/charmcraft/log/* && exit 1 ) - - - name: Upload github-runner Charm - uses: actions/upload-artifact@v4 - with: - name: dangerous-test-only-github-runner_ubuntu-22.04-amd64.charm - path: github-runner_ubuntu-22.04-amd64.charm - - run-id: - name: Generate Run ID - runs-on: ubuntu-latest - outputs: - run-id: ${{ steps.run-id.outputs.run-id }} - steps: - - name: Generate Run ID - id: run-id - run: | - echo "run-id=e2e-$(LC_ALL=C tr -dc 'a-z' < /dev/urandom | head -c4)" >> $GITHUB_OUTPUT - - deploy-e2e-test-runner: - name: Deploy End-to-End Test OpenStack Runner (${{ matrix.event.name }}) - runs-on: ["self-hosted", "xlarge", "x64"] - needs: [build-charm, run-id] - strategy: - matrix: - event: - - name: pull_request - abbreviation: pr - - name: workflow_dispatch - abbreviation: wd - - name: push - abbreviation: push - - name: schedule - abbreviation: sd - - name: issues - abbreviation: is - steps: - - name: Install GitHub Cli - run: which gh || sudo apt install gh -y - - name: Check rate limit - env: - GH_TOKEN: ${{ (matrix.event.name == 'issues' || matrix.event.name == 'schedule') && secrets.E2E_TESTING_TOKEN || secrets.GITHUB_TOKEN }} - run: | - # Check rate limit, this check does not count against the primary rate limit: - # https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api?apiVersion=2022-11-28#checking-the-status-of-your-rate-limit - gh api \ - --method GET \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" --jq ".resources.core" \ - /rate_limit - - - name: Setup Lxd Juju Controller - uses: charmed-kubernetes/actions-operator@main - with: - juju-channel: 3.2/stable - provider: microk8s - microk8s-addons: "dns ingress hostpath-storage" - channel: 1.26-strict/stable - - - uses: actions/checkout@v4.1.1 - - name: Setup microstack - run: bash -xe scripts/setup-microstack.sh - - - name: Create Testing Juju Model - run: juju add-model testing - - name: Set Testing Model Proxy Configuration - run: | - juju model-config juju-http-proxy=$http_proxy - juju model-config juju-https-proxy=$https_proxy - juju model-config juju-no-proxy=$no_proxy - - name: Change Testing Model Logging Level - run: juju model-config logging-config="=INFO;unit=DEBUG" - - - name: Download github-runner Charm - uses: actions/download-artifact@v4 - with: - name: dangerous-test-only-github-runner_ubuntu-22.04-amd64.charm - - name: Copy github-runner Charm - run: | - cp github-runner_ubuntu-22.04-amd64.charm /home/$USER/github-runner_ubuntu-22.04-amd64.charm - - - name: Generate Runner Name - id: runner-name - run: echo name=${{ matrix.event.abbreviation }}-${{ needs.run-id.outputs.run-id }}${{ github.run_attempt }} >> $GITHUB_OUTPUT - - - name: Create Runner OpenStack Flavor - run: | - OS_CLIENT_CONFIG_FILE="clouds.yaml" openstack --os-cloud sunbeam flavor create runner --ram 16384 --disk 20 --vcpus 16 - - - name: Deploy github-runner Charm (Pull Request, Workflow Dispatch and Push) - if: matrix.event.name == 'workflow_dispatch' || matrix.event.name == 'push' || matrix.event.name == 'pull_request' - run: | - CLOUDS_YAML="`cat clouds.yaml`" - juju deploy /home/$USER/github-runner_ubuntu-22.04-amd64.charm \ - ${{ steps.runner-name.outputs.name }} \ - --base ubuntu@22.04 \ - --config path=${{ secrets.E2E_TESTING_REPO }} \ - --config token=${{ secrets.E2E_TESTING_TOKEN }} \ - --config virtual-machines=1 \ - --config test-mode=insecure \ - --config experimental-openstack-clouds-yaml="$CLOUDS_YAML" \ - --config experimental-openstack-network=demo-network \ - --config experimental-openstack-flavor=runner - - - name: Checkout branch (Issues, Schedule) - if: matrix.event.name == 'issues' || matrix.event.name == 'schedule' - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - token: ${{ secrets.E2E_TESTING_TOKEN }} - - name: Create temporary orphan branch (Issues, Schedule) - if: matrix.event.name == 'issues' || matrix.event.name == 'schedule' - run: | - # We dont need all content for the test, so create an orphan branch. - git checkout --orphan ${{ steps.runner-name.outputs.name }} - git reset - - WF_FILE=".github/workflows/schedule_issues_test.yaml" - # Replace workflow event in schedule_issues_test.yaml - if [[ ${{ matrix.event.name }} == 'schedule' ]]; then - sed -i "s/workflow_dispatch:/schedule:\n - cron: '*\/5 * * * *'/" $WF_FILE - else - sed -i "s/workflow_dispatch:/issues:\n types: [opened]/" $WF_FILE - fi - git add $WF_FILE - git config user.name github-actions - git config user.email github-actions@github.com - git commit -m"Add ${{matrix.event.name}} workflow" - git push origin ${{ steps.runner-name.outputs.name }} - - name: Deploy github-runner Charm (Issues, Schedule) - if: matrix.event.name == 'issues' || matrix.event.name == 'schedule' - env: - GH_TOKEN: ${{ secrets.E2E_TESTING_TOKEN }} - run: | - # GitHub does not allow to create multiple forks of the same repo under the same user, - # so we need to create a new repository and push the branch to it. - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /user/repos \ - -f name=${{ steps.runner-name.outputs.name }} - - TESTING_REPO=${{ secrets.E2E_TESTING_TOKEN_ORG }}/${{ steps.runner-name.outputs.name }} - - # Create registration token in order to allow listing of runner binaries - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - repos/${TESTING_REPO}/actions/runners/registration-token - - # Push the orphan branch to the newly created repo. - git pull origin ${{ steps.runner-name.outputs.name }} - git remote add testing https://github.com/${TESTING_REPO}.git - git push testing ${{ steps.runner-name.outputs.name }}:main - - juju deploy /home/$USER/github-runner_ubuntu-22.04-amd64.charm \ - ${{ steps.runner-name.outputs.name }} - --config path=$TESTING_REPO \ - --config token=${{ secrets.E2E_TESTING_TOKEN }} \ - --config virtual-machines=1 \ - --config test-mode=insecure \ - --config experimental-openstack-clouds-yaml="$CLOUDS_YAML" \ - --config experimental-openstack-network=demo-network \ - --config experimental-openstack-flavor=runner - - - name: Watch github-runner (Pull Request) - if: matrix.event.name == 'pull_request' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - timeout-minutes: 30 - run: | - juju debug-log --replay --tail & - - while :; do - JOBS=$(gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ secrets.E2E_TESTING_REPO }}/actions/runs/$GITHUB_RUN_ID/attempts/$GITHUB_RUN_ATTEMPT/jobs) - CONCLUSION=$(echo $JOBS | jq -r '.jobs[] | select(.name == "End-to-End Test / End-to-End Test Run") | .conclusion') - STATUS=$(echo $JOBS | jq -r '.jobs[] | select(.name == "End-to-End Test / End-to-End Test Run") | .status') - if [[ $STATUS != "queued" && $STATUS != "in_progress" ]]; then - break - fi - sleep 10 - done - if [[ $STATUS != "completed" || $CONCLUSION != "success" ]]; then - echo "test workflow failed with status: $STATUS, conclusion: $CONCLUSION" - kill $(jobs -p) - exit 1 - fi - - - name: Trigger workflow (Workflow Dispatch and Push) - if: matrix.event.name == 'workflow_dispatch' || matrix.event.name == 'push' - env: - # push requires E2E_TESTING_TOKEN, because if GITHUB_TOKEN is used, no workflow is triggered for a push: - # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow - GH_TOKEN: ${{ matrix.event.name == 'workflow_dispatch' && secrets.GITHUB_TOKEN || secrets.E2E_TESTING_TOKEN }} - run: | - # Base any future branches on the current branch - REF_SHA=$(gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ secrets.E2E_TESTING_REPO }}/git/ref/heads/$GITHUB_HEAD_REF \ - --jq .object.sha) - - # Create a temporary reference/branch - # For push, this should trigger the "Push Event Tests" workflow automatically - # because the test is run for branches matching the pattern "push-e2e-*" - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ secrets.E2E_TESTING_REPO }}/git/refs \ - -f ref='refs/heads/${{ steps.runner-name.outputs.name }}' \ - -f sha=$REF_SHA - - # For workflow_dispatch, we need to trigger the "Workflow Dispatch Tests" workflow manually - if ${{ matrix.event.name == 'workflow_dispatch' }}; then - gh workflow run workflow_dispatch_test.yaml \ - -R ${{ secrets.E2E_TESTING_REPO }} \ - --ref ${{ steps.runner-name.outputs.name }} \ - -f runner=${{ steps.runner-name.outputs.name }} - fi - - - name: Watch github-runner (Workflow Dispatch and Push) - if: matrix.event.name == 'workflow_dispatch' || matrix.event.name == 'push' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - juju debug-log --replay --tail & - - get-workflow-status() { - # Search recent workflow runs for the one designated by the run-id ref - output=$(gh run list \ - -R ${{ secrets.E2E_TESTING_REPO }} \ - -L 100 \ - --json headBranch,status \ - --jq '[.[] | select(.headBranch=="${{ steps.runner-name.outputs.name }}")]') - - # Workflows that have not started have no status - if [ $(echo "$output" | jq 'length') -eq 0 ] - then - echo "not_started" - else - # Parse output with jq to get the status field of the first object - status=$(echo "$output" | jq -r '.[0].status') - echo "$status" - fi - } - - # Wait for the workflow to start while checking its status - for i in {1..360} - do - status=$(get-workflow-status) - echo "workflow status: $status" - if [[ $status != "not_started" && $status != "queued" && $status != "in_progress" ]]; then - break - fi - sleep 10 - done - - # Make sure the workflow was completed or else consider it failed - conclusion=$(gh run list \ - -R ${{ secrets.E2E_TESTING_REPO }} \ - -L 100 \ - --json headBranch,conclusion \ - --jq '.[] | select(.headBranch=="${{ steps.runner-name.outputs.name }}") | .conclusion') - - if [[ $status != "completed" || $conclusion != "success" ]]; then - echo "test workflow failed with status: $status, conclusion: $conclusion" - kill $(jobs -p) - exit 1 - else - echo "Workflow completed with status: $status, conclusion: $conclusion, run-id: ${{ steps.runner-name.outputs.name }}" - kill $(jobs -p) - fi - - - name: Trigger workflow and watch github-runner (Issues, Schedule) - if: matrix.event.name == 'issues' || matrix.event.name == 'schedule' - env: - GH_TOKEN: ${{ secrets.E2E_TESTING_TOKEN }} - run: | - juju debug-log --replay --tail & - - TESTING_REPO=${{ secrets.E2E_TESTING_TOKEN_ORG }}/${{ steps.runner-name.outputs.name }} - - # For issues, we need to trigger the workflow by opening an issue - if ${{ matrix.event.name == 'issues' }}; then - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${TESTING_REPO}/issues \ - -f title="Test issue ${{ steps.runner-name.outputs.name }}" - fi - - get-workflow-status() { - # Search recent workflow runs for the one designated by the run-id ref - output=$(gh run list \ - -R ${TESTING_REPO} \ - -L 100 \ - --json headBranch,status,createdAt \ - --jq '[.[] | select(.headBranch=="main")] | sort_by(.createdAt)') - - # Workflows that have not started have no status - if [ $(echo "$output" | jq 'length') -eq 0 ] - then - echo "not_started" - else - # Parse output with jq to get the status field of the first object - status=$(echo "$output" | jq -r '.[0].status') - echo "$status" - fi - } - - # Wait for the workflow to start while checking its status - for i in {1..360} - do - status=$(get-workflow-status) - echo "workflow status: $status" - if [[ $status != "not_started" && $status != "queued" && $status != "in_progress" ]]; then - break - fi - sleep 10 - done - - # Make sure the workflow was completed or else consider it failed - runs=$(gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${TESTING_REPO}/actions/runs \ - --jq '[.workflow_runs[] | select(.head_branch=="main")] | sort_by(.created_at)') - conclusion=$(echo $runs | jq -r '.[0].conclusion') - wf_run_id=$(echo $runs | jq -r '.[0].id') - - logs_filename=${{matrix.event.name}}-workflow-logs.zip - # We retrieve the logs because the testing repo is deleted at the end of the test - gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${TESTING_REPO}/actions/runs/${wf_run_id}/logs > ${logs_filename} \ - || (echo "Failed to retrieve logs from schedule tests" && rm ${logs_filename}) - - - if [[ $status != "completed" || $conclusion != "success" ]]; then - echo "test workflow failed with status: $status, conclusion: $conclusion" - kill $(jobs -p) - exit 1 - else - echo "Workflow completed with status: $status, conclusion: $conclusion, run-id: ${{ steps.runner-name.outputs.name }}" - kill $(jobs -p) - fi - - name: Upload test logs (Issues, Schedule) - if: always() && (matrix.event.name == 'issues' || matrix.event.name == 'schedule') - uses: actions/upload-artifact@v4 - with: - name: ${{matrix.event.name}}-workflow-logs.zip - path: ${{matrix.event.name}}-workflow-logs.zip - if-no-files-found: ignore - - - name: Show Firewall Rules - run: | - juju ssh ${{ steps.runner-name.outputs.name }}/0 sudo nft list ruleset - - - name: Clean Up (Workflow Dispatch and Push) - if: always() && (matrix.event.name == 'workflow_dispatch' || matrix.event.name == 'push') - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method DELETE \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "/repos/${{ secrets.E2E_TESTING_REPO }}/git/refs/heads/${{ steps.runner-name.outputs.name }}" - echo "Deleted ref ${{ steps.runner-name.outputs.name }}" - - - name: Clean Up (Issues, Schedule) - if: always() && (matrix.event.name == 'issues' || matrix.event.name == 'schedule') - env: - GH_TOKEN: ${{ secrets.E2E_TESTING_TOKEN }} - run: | - set +e - - gh api \ - --method DELETE \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "/repos/${{ secrets.E2E_TESTING_REPO }}/git/refs/heads/${{ steps.runner-name.outputs.name }}" \ - && echo "Deleted ref ${{ steps.runner-name.outputs.name }}" - - TESTING_REPO=${{ secrets.E2E_TESTING_TOKEN_ORG }}/${{ steps.runner-name.outputs.name }} - - set -e - - gh api \ - --method DELETE \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "/repos/${TESTING_REPO}" - - echo "Deleted repo ${TESTING_REPO}" - - e2e-test: - name: End-to-End Test - needs: [build-charm, run-id] - uses: ./.github/workflows/e2e_test_run.yaml - with: - runner-tag: "pr-${{ needs.run-id.outputs.run-id }}${{ github.run_attempt}}" diff --git a/.github/workflows/e2e_test_run.yaml b/.github/workflows/e2e_test_run.yaml index 3d7870803..1ecc238e1 100644 --- a/.github/workflows/e2e_test_run.yaml +++ b/.github/workflows/e2e_test_run.yaml @@ -12,12 +12,25 @@ on: description: The e2e test runner tag to run the workflow on. type: string required: true + runner-virt-type: + description: The e2e test runner virtualization type. E.g. lxd, or openstack. + # workflow_call does not support choice type. + type: string + required: true workflow_dispatch: inputs: runner-tag: description: The e2e test runner tag to run the workflow on. type: string required: true + runner-virt-type: + description: The e2e test runner virtualization type. + type: choice + required: true + options: + - lxd + - openstack + jobs: e2e-test: name: End-to-End Test Run @@ -89,8 +102,10 @@ jobs: - name: test check-jsonschema run: check-jsonschema --version - name: Test Firewall + if: "${{ github.event.inputs.runner-virt-type == 'lxd' }}" run: | HOST_IP=$(ip route | grep default | cut -f 3 -d" ") [ $((ping $HOST_IP -c 5 || :) | grep "Destination Port Unreachable" | wc -l) -eq 5 ] - name: Test sctp support + if: "${{ github.event.inputs.runner-virt-type == 'lxd' }}" run: sudo apt-get install lksctp-tools -yq && checksctp diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index 1435a6adf..73100c079 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -1,9 +1,7 @@ name: integration-tests on: - # TODO: Re-enable - # pull_request: - workflow_dispatch: + pull_request: jobs: # test option values defined at test/conftest.py are passed on via repository secret @@ -27,4 +25,17 @@ jobs: pre-run-script: scripts/pre-integration-test.sh provider: lxd test-tox-env: integration-juju3.1 - modules: '["test_charm_base_image", "test_charm_fork_repo", "test_charm_no_runner", "test_charm_scheduled_events", "test_charm_one_runner", "test_charm_metrics_success", "test_charm_metrics_failure", "test_self_hosted_runner", "test_charm_with_proxy", "test_charm_with_juju_storage", "test_debug_ssh", "test_charm_upgrade"]' \ No newline at end of file + modules: '["test_charm_base_image", "test_charm_fork_repo", "test_charm_no_runner", "test_charm_scheduled_events", "test_charm_one_runner", "test_charm_metrics_success", "test_charm_metrics_failure", "test_self_hosted_runner", "test_charm_with_proxy", "test_charm_with_juju_storage", "test_debug_ssh", "test_charm_upgrade"]' + openstack-integration-tests-private-endpoint: + name: Integration test using private-endpoint + uses: canonical/operator-workflows/.github/workflows/integration_test.yaml@main + secrets: inherit + with: + juju-channel: 3.2/stable + pre-run-script: scripts/setup-lxd.sh + provider: lxd + test-tox-env: integration-juju3.2 + modules: '["test_charm_metrics_failure", "test_charm_metrics_success", "test_charm_fork_repo", "test_charm_runner", "test_e2e"]' + extra-arguments: "-m openstack --openstack-flavor-name=builder-cpu4-ram8-disk50 --http-proxy=http://squid.internal:3128 --https-proxy=http://squid.internal:3128 --no-proxy=keystone.ps6.canonical.com,glance.ps6.canonical.com,nova.ps6.canonical.com,neutron.ps6.canonical.com" + self-hosted-runner: true + self-hosted-runner-label: stg-private-endpoint diff --git a/.github/workflows/manual_test_env.yaml b/.github/workflows/manual_test_env.yaml deleted file mode 100644 index 9518e887b..000000000 --- a/.github/workflows/manual_test_env.yaml +++ /dev/null @@ -1,24 +0,0 @@ -name: Manual test - -on: - # TODO: Re-enable - # pull_request: - workflow_dispatch: - -jobs: - integration-tests-microstack: - name: Integration test using microstack - uses: canonical/operator-workflows/.github/workflows/integration_test.yaml@main - secrets: inherit - with: - juju-channel: 3.2/stable - pre-run-script: scripts/setup-microstack.sh - provider: microk8s - channel: 1.26-strict/stable - microk8s-addons: "dns ingress hostpath-storage" - test-tox-env: integration-juju3.2 - modules: '["test_openstack_runner"]' - self-hosted-runner: true - self-hosted-runner-label: xlarge - tmate-debug: true - tmate-timeout: 2400 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 2044639a0..99e540d31 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,9 +1,7 @@ name: Tests on: - # TODO: Re-enable - # pull_request: - workflow_dispatch: + pull_request: jobs: unit-tests: diff --git a/pyproject.toml b/pyproject.toml index 132be1edc..94572081c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,6 @@ omit = [ "src/repo_policy_compliance_client.py", # 2024/04/17: These files are pending a major refactor. The refactor includes a RunnerManager # interface class which will include a complete re-organization of the code in these files. - "src/openstack_cloud/openstack_manager.py", "src/runner.py", "src/runner_manager.py", ] diff --git a/scripts/setup-lxd.sh b/scripts/setup-lxd.sh new file mode 100644 index 000000000..5b2a5a6cb --- /dev/null +++ b/scripts/setup-lxd.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Script to setup LXD for testing + +juju bootstrap localhost lxd diff --git a/src-docs/charm_state.py.md b/src-docs/charm_state.py.md index 6f560f251..31615fa2f 100644 --- a/src-docs/charm_state.py.md +++ b/src-docs/charm_state.py.md @@ -36,7 +36,7 @@ State of the Charm. --- - + ## function `parse_github_path` @@ -137,7 +137,7 @@ Some charm configurations are grouped into other configuration models. --- - + ### classmethod `check_reconcile_interval` @@ -166,7 +166,7 @@ Validate the general charm configuration. --- - + ### classmethod `from_charm` @@ -205,7 +205,7 @@ Raised when charm config is invalid. - `msg`: Explanation of the error. - + ### function `__init__` @@ -247,7 +247,7 @@ The charm state. --- - + ### classmethod `from_charm` @@ -292,7 +292,7 @@ Charm configuration related to GitHub. --- - + ### classmethod `from_charm` @@ -337,7 +337,7 @@ Represent GitHub organization. --- - + ### function `path` @@ -370,7 +370,7 @@ Represent GitHub repository. --- - + ### function `path` @@ -391,7 +391,7 @@ Return a string representing the path. ## class `ImmutableConfigChangedError` Represents an error when changing immutable charm state. - + ### function `__init__` @@ -446,7 +446,7 @@ Runner configurations for local LXD instances. --- - + ### classmethod `check_virtual_machine_resources` @@ -477,7 +477,7 @@ Validate the virtual_machine_resources field values. --- - + ### classmethod `check_virtual_machines` @@ -506,7 +506,7 @@ Validate the virtual machines configuration value. --- - + ### classmethod `from_charm` @@ -553,7 +553,7 @@ Runner configuration for OpenStack Instances. --- - + ### classmethod `from_charm` @@ -607,7 +607,7 @@ Return the aproxy address. --- - + ### classmethod `check_use_aproxy` @@ -637,7 +637,7 @@ Validate the proxy configuration. --- - + ### classmethod `from_charm` @@ -676,7 +676,7 @@ Configuration for the repo policy compliance service. --- - + ### classmethod `from_charm` @@ -739,7 +739,7 @@ SSH connection information for debug workflow. --- - + ### classmethod `from_charm` @@ -772,7 +772,7 @@ Raised when given machine charm architecture is unsupported. - `arch`: The current machine architecture. - + ### function `__init__` diff --git a/src/charm_state.py b/src/charm_state.py index c896a5db5..11888c25e 100644 --- a/src/charm_state.py +++ b/src/charm_state.py @@ -1,6 +1,10 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +# 2024/06/26 The charm contains a lot of states and configuration. The upcoming refactor will +# split each/related class to a file. +# pylint: disable=too-many-lines + """State of the Charm.""" import dataclasses diff --git a/src/openstack_cloud/openstack_manager.py b/src/openstack_cloud/openstack_manager.py index cf4b1e07b..44b4a6867 100644 --- a/src/openstack_cloud/openstack_manager.py +++ b/src/openstack_cloud/openstack_manager.py @@ -761,7 +761,7 @@ def _get_ssh_connection( An SSH connection to OpenStack server instance. """ server: Server | None = conn.get_server(name_or_id=server_name) - if not server: + if server is None: raise _SSHError(f"Server gone while trying to get SSH connection: {server_name}.") if not server.key_name: raise _SSHError( diff --git a/tests/conftest.py b/tests/conftest.py index 744f671f5..cf04487fd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -71,44 +71,53 @@ def pytest_addoption(parser: Parser): "--openstack-network-name", action="store", help="The Openstack network to create testing instances under.", + default=None, ) parser.addoption( "--openstack-flavor-name", action="store", help="The Openstack flavor to create testing instances with.", + default=None, ) parser.addoption( "--openstack-auth-url", action="store", help="The URL to Openstack authentication service, i.e. keystone.", + default=None, ) parser.addoption( "--openstack-password", action="store", help="The password to authenticate to Openstack service.", + default=None, ) parser.addoption( "--openstack-project-domain-name", action="store", help="The Openstack project domain name to use.", + default=None, ) parser.addoption( "--openstack-project-name", action="store", help="The Openstack project name to use.", + default=None, ) parser.addoption( "--openstack-user-domain-name", action="store", help="The Openstack user domain name to use.", + default=None, ) parser.addoption( "--openstack-username", action="store", help="The Openstack user to authenticate as.", + default=None, ) parser.addoption( "--openstack-region-name", action="store", help="The Openstack region to authenticate to.", + default=None, ) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 452759a52..3177d3012 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -5,13 +5,13 @@ import logging import random import secrets +import string from pathlib import Path from time import sleep from typing import Any, AsyncIterator, Generator, Iterator, Optional import nest_asyncio import openstack -import openstack.connection import pytest import pytest_asyncio import yaml @@ -22,7 +22,7 @@ from juju.application import Application from juju.client._definitions import FullStatus, UnitStatus from juju.model import Model -from openstack.exceptions import ConflictException +from openstack.connection import Connection from pytest_operator.plugin import OpsTest from charm_state import ( @@ -33,6 +33,7 @@ PATH_CONFIG_NAME, USE_APROXY_CONFIG_NAME, VIRTUAL_MACHINES_CONFIG_NAME, + InstanceType, ) from github_client import GithubClient from tests.integration.helpers.common import ( @@ -51,6 +52,20 @@ nest_asyncio.apply() +@pytest_asyncio.fixture(scope="module", name="instance_type") +async def instance_type_fixture( + request: pytest.FixtureRequest, pytestconfig: pytest.Config +) -> InstanceType: + # Due to scope being module we cannot use request.node.get_closes_marker as openstack + # mark is not available in this scope. + openstack_marker = pytestconfig.getoption("-m") == "openstack" + + if openstack_marker: + return InstanceType.OPENSTACK + else: + return InstanceType.LOCAL_LXD + + @pytest.fixture(scope="module") def metadata() -> dict[str, Any]: """Metadata information of the charm.""" @@ -72,6 +87,12 @@ def app_name(existing_app: Optional[str]) -> str: return existing_app or f"integration-id{secrets.token_hex(2)}" +@pytest.fixture(scope="module", name="openstack_clouds_yaml") +def openstack_clouds_yaml_fixture(pytestconfig: pytest.Config) -> str | None: + """The openstack clouds yaml config.""" + return pytestconfig.getoption("--openstack-clouds-yaml") + + @pytest.fixture(scope="module") def charm_file( pytestconfig: pytest.Config, loop_device: Optional[str], openstack_clouds_yaml: Optional[str] @@ -148,42 +169,100 @@ def loop_device(pytestconfig: pytest.Config) -> Optional[str]: return pytestconfig.getoption("--loop-device") -@pytest.fixture(scope="module") -def openstack_clouds_yaml(pytestconfig: pytest.Config) -> Optional[str]: - """Configured clouds-yaml setting.""" - clouds_yaml = pytestconfig.getoption("--openstack-clouds-yaml") - return Path(clouds_yaml).read_text(encoding="utf-8") if clouds_yaml else None +@pytest.fixture(scope="module", name="private_endpoint_clouds_yaml") +def private_endpoint_clouds_yaml_fixture(pytestconfig: pytest.Config) -> Optional[str]: + """The openstack private endpoint clouds yaml.""" + auth_url = pytestconfig.getoption("--openstack-auth-url") + password = pytestconfig.getoption("--openstack-password") + project_domain_name = pytestconfig.getoption("--openstack-project-domain-name") + project_name = pytestconfig.getoption("--openstack-project-name") + user_domain_name = pytestconfig.getoption("--openstack-user-domain-name") + user_name = pytestconfig.getoption("--openstack-username") + region_name = pytestconfig.getoption("--openstack-region-name") + if any( + not val + for val in ( + auth_url, + password, + project_domain_name, + project_name, + user_domain_name, + user_name, + region_name, + ) + ): + return None + return string.Template( + Path("tests/integration/data/clouds.yaml.tmpl").read_text(encoding="utf-8") + ).substitute( + { + "auth_url": auth_url, + "password": password, + "project_domain_name": project_domain_name, + "project_name": project_name, + "user_domain_name": user_domain_name, + "username": user_name, + "region_name": region_name, + } + ) -@pytest.fixture(scope="module", name="openstack_connection") -def openstack_connection_fixture( - openstack_clouds_yaml: Optional[str], -) -> Generator[openstack.connection.Connection, None, None]: - """The openstack connection instance.""" - assert openstack_clouds_yaml, "Openstack clouds yaml was not provided." +@pytest.fixture(scope="module", name="clouds_yaml_contents") +def clouds_yaml_contents_fixture( + openstack_clouds_yaml: Optional[str], private_endpoint_clouds_yaml: Optional[str] +): + """The Openstack clouds yaml or private endpoint cloud yaml contents.""" + clouds_yaml_contents = openstack_clouds_yaml or private_endpoint_clouds_yaml + assert clouds_yaml_contents, ( + "Please specify --openstack-clouds-yaml or all of private endpoint arguments " + "(--openstack-auth-url, --openstack-password, --openstack-project-domain-name, " + "--openstack-project-name, --openstack-user-domain-name, --openstack-user-name, " + "--openstack-region-name)" + ) + return clouds_yaml_contents - openstack_clouds_yaml_yaml = yaml.safe_load(openstack_clouds_yaml) - clouds_yaml_path = Path.cwd() / "clouds.yaml" - clouds_yaml_path.write_text(data=openstack_clouds_yaml, encoding="utf-8") - first_cloud = next(iter(openstack_clouds_yaml_yaml["clouds"].keys())) - with openstack.connect(first_cloud) as conn: - yield conn +@pytest.fixture(scope="module", name="network_name") +def network_name_fixture(pytestconfig: pytest.Config) -> str: + """Network to use to spawn test instances under.""" + network_name = pytestconfig.getoption("--openstack-network-name") + assert network_name, "Please specify the --openstack-network-name command line option" + return network_name -@pytest.fixture(scope="module", name="openstack_flavor") -def openstack_flavor_fixture( - openstack_connection: openstack.connection.Connection, -) -> str: - """Name of the openstack flavor for runner.""" - flavor_name = "runner" - try: - openstack_connection.create_flavor(flavor_name, 4096, 2, 10) - except ConflictException: - # Do nothing if flavor already exists. - pass + +@pytest.fixture(scope="module", name="flavor_name") +def flavor_name_fixture(pytestconfig: pytest.Config) -> str: + """Flavor to create testing instances with.""" + flavor_name = pytestconfig.getoption("--openstack-flavor-name") + assert flavor_name, "Please specify the --openstack-flavor-name command line option" return flavor_name +@pytest.fixture(scope="module", name="openstack_connection") +def openstack_connection_fixture( + clouds_yaml_contents: str, app_name: str +) -> Generator[Connection, None, None]: + """The openstack connection instance.""" + clouds_yaml = yaml.safe_load(clouds_yaml_contents) + clouds_yaml_path = Path.cwd() / "clouds.yaml" + clouds_yaml_path.write_text(data=clouds_yaml_contents, encoding="utf-8") + first_cloud = next(iter(clouds_yaml["clouds"].keys())) + with openstack.connect(first_cloud) as connection: + yield connection + + # servers, keys, security groups, security rules, images are created by the charm. + # don't remove security groups & rules since they are single instances. + # don't remove images since it will be moved to image-builder + for server in connection.list_servers(): + server_name: str = server.name + if server_name.startswith(app_name): + connection.delete_server(server_name) + for key in connection.list_keypairs(): + key_name: str = key.name + if key_name.startswith(app_name): + connection.delete_keypair(key_name) + + @pytest.fixture(scope="module") def model(ops_test: OpsTest) -> Model: """Juju model used in the test.""" @@ -234,8 +313,9 @@ async def app_openstack_runner_fixture( http_proxy: str, https_proxy: str, no_proxy: str, - openstack_clouds_yaml: str, - openstack_flavor: str, + clouds_yaml_contents: str, + network_name: str, + flavor_name: str, existing_app: Optional[str], ) -> AsyncIterator[Application]: """Application launching VMs and no runners.""" @@ -255,15 +335,13 @@ async def app_openstack_runner_fixture( reconcile_interval=60, constraints={ "root-disk": 50 * 1024, - "cores": 4, "mem": 16 * 1024, - "arch": "arm64", + # "arch": "arm64", }, config={ - OPENSTACK_CLOUDS_YAML_CONFIG_NAME: openstack_clouds_yaml, - # this is set by microstack sunbeam, see scripts/setup-microstack.sh - OPENSTACK_NETWORK_CONFIG_NAME: "demo-network", - OPENSTACK_FLAVOR_CONFIG_NAME: openstack_flavor, + OPENSTACK_CLOUDS_YAML_CONFIG_NAME: clouds_yaml_contents, + OPENSTACK_NETWORK_CONFIG_NAME: network_name, + OPENSTACK_FLAVOR_CONFIG_NAME: flavor_name, USE_APROXY_CONFIG_NAME: "true", LABELS_CONFIG_NAME: app_name, }, @@ -580,14 +658,10 @@ async def app_with_grafana_agent_integrated_fixture( @pytest_asyncio.fixture(scope="module", name="basic_app") async def basic_app_fixture( - request: pytest.FixtureRequest, pytestconfig: pytest.Config + request: pytest.FixtureRequest, instance_type: InstanceType ) -> Application: """Setup the charm with the basic configuration.""" - # Due to scope being module we cannot use request.node.get_closes_marker as openstack - # mark is not available in this scope. - openstack_marker = pytestconfig.getoption("-m") == "openstack" - - if openstack_marker: + if instance_type == InstanceType.OPENSTACK: app = request.getfixturevalue("app_openstack_runner") else: app = request.getfixturevalue("app_no_runner") @@ -595,11 +669,12 @@ async def basic_app_fixture( @pytest_asyncio.fixture(scope="function", name="instance_helper") -async def instance_helper_fixture(request: pytest.FixtureRequest) -> InstanceHelper: +async def instance_helper_fixture( + request: pytest.FixtureRequest, instance_type: InstanceType +) -> InstanceHelper: """Instance helper fixture.""" - openstack_marker = request.node.get_closest_marker("openstack") helper: InstanceHelper - if openstack_marker: + if instance_type == InstanceType.OPENSTACK: openstack_connection = request.getfixturevalue("openstack_connection") helper = OpenStackInstanceHelper(openstack_connection=openstack_connection) else: diff --git a/tests/integration/data/clouds.yaml.tmpl b/tests/integration/data/clouds.yaml.tmpl new file mode 100644 index 000000000..3b6cc91bc --- /dev/null +++ b/tests/integration/data/clouds.yaml.tmpl @@ -0,0 +1,10 @@ +clouds: + testcloud: + auth: + auth_url: ${auth_url} + password: ${password} + project_domain_name: ${project_domain_name} + project_name: ${project_name} + user_domain_name: ${user_domain_name} + username: ${username} + region_name: ${region_name} diff --git a/tests/integration/helpers/common.py b/tests/integration/helpers/common.py index e433794b3..8f73bd081 100644 --- a/tests/integration/helpers/common.py +++ b/tests/integration/helpers/common.py @@ -44,6 +44,7 @@ DISPATCH_FAILURE_TEST_WORKFLOW_FILENAME = "workflow_dispatch_failure_test.yaml" DISPATCH_WAIT_TEST_WORKFLOW_FILENAME = "workflow_dispatch_wait_test.yaml" DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME = "e2e_test_run.yaml" +DISPATCH_E2E_TEST_RUN_OPENSTACK_WORKFLOW_FILENAME = "e2e_test_run_openstack.yaml" DEFAULT_RUNNER_CONSTRAINTS = {"root-disk": 15} @@ -284,7 +285,7 @@ async def deploy_github_runner_charm( application = await model.deploy( charm_file, application_name=app_name, - series="jammy", + base="ubuntu@22.04", config=default_config, constraints=constraints or DEFAULT_RUNNER_CONSTRAINTS, storage=storage, diff --git a/tests/integration/helpers/openstack.py b/tests/integration/helpers/openstack.py index 351e8dfae..7e62e1855 100644 --- a/tests/integration/helpers/openstack.py +++ b/tests/integration/helpers/openstack.py @@ -26,11 +26,46 @@ def __init__(self, openstack_connection: openstack.connection.Connection): """ self.openstack_connection = openstack_connection + async def expose_to_instance( + self, + unit: Unit, + port: int, + ) -> None: + """Expose a port on the juju machine to the OpenStack instance. + + Uses SSH remote port forwarding from the juju machine to the OpenStack instance containing + the runner. + + Args: + unit: The juju unit of the github-runner charm. + port: The port on the juju machine to expose to the runner. + """ + runner = self._get_runner(unit=unit) + assert runner, f"Runner not found for unit {unit.name}" + network_address_list = runner.addresses.values() + logger.warning(network_address_list) + assert ( + network_address_list + ), f"No addresses to connect to for OpenStack server {runner.name}" + + ip = None + for network_addresses in network_address_list: + for address in network_addresses: + ip = address["addr"] + break + assert ip, f"Failed to get IP address for OpenStack server {runner.name}" + + ssh_cmd = f'ssh -fNT -R {port}:localhost:{port} -i /home/ubuntu/.ssh/runner-{runner.name}.key -o "StrictHostKeyChecking no" -o "ControlPersist yes" ubuntu@{ip} &' + exit_code, _, stderr = await run_in_unit(unit, ssh_cmd) + assert exit_code == 0, f"Error in SSH remote forwarding of port {port}: {stderr}" + async def run_in_instance( self, unit: Unit, command: str, timeout: int | None = None, + assert_on_failure: bool = False, + assert_msg: str | None = None, ) -> tuple[int, str | None, str | None]: """Run command in OpenStack instance. @@ -38,6 +73,8 @@ async def run_in_instance( unit: Juju unit to execute the command in. command: Command to execute. timeout: Amount of time to wait for the execution. + assert_on_failure: Perform assertion on non-zero exit code. + assert_msg: Message for the failure assertion. Returns: Tuple of return code, stdout and stderr. @@ -60,7 +97,17 @@ async def run_in_instance( ssh_cmd = f'ssh -i /home/ubuntu/.ssh/runner-{runner.name}.key -o "StrictHostKeyChecking no" ubuntu@{ip} {command}' ssh_cmd_as_ubuntu_user = f"su - ubuntu -c '{ssh_cmd}'" logging.warning("ssh_cmd: %s", ssh_cmd_as_ubuntu_user) - return await run_in_unit(unit, ssh_cmd, timeout) + exit_code, stdout, stderr = await run_in_unit(unit, ssh_cmd, timeout) + logger.debug( + "Run command '%s' in runner with result %s: '%s' '%s'", + command, + exit_code, + stdout, + stderr, + ) + if assert_on_failure: + assert exit_code == 0, assert_msg + return exit_code, stdout, stderr async def ensure_charm_has_runner(self, app: Application) -> None: """Reconcile the charm to contain one runner. @@ -150,7 +197,6 @@ async def setup_repo_policy( ) instance_helper = OpenStackInstanceHelper(openstack_connection) - unit_address = await unit.get_public_address() await app.expose() unit_name_without_slash = unit.name.replace("/", "-") await run_in_unit( @@ -162,11 +208,12 @@ async def setup_repo_policy( await app.set_config( { "repo-policy-compliance-token": charm_token, - "repo-policy-compliance-url": f"http://{unit_address}:8080", + "repo-policy-compliance-url": "http://localhost:8080", } ) await instance_helper.ensure_charm_has_runner(app=app) + await instance_helper.expose_to_instance(unit, 8080) async def _install_repo_policy( diff --git a/tests/integration/test_charm_base_image.py b/tests/integration/test_charm_base_image.py index b64796bc6..f4f76e03f 100644 --- a/tests/integration/test_charm_base_image.py +++ b/tests/integration/test_charm_base_image.py @@ -9,13 +9,15 @@ from juju.model import Model from charm_state import BASE_IMAGE_CONFIG_NAME -from tests.integration.helpers import ( +from tests.integration.helpers.common import ( DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, dispatch_workflow, + wait_for, +) +from tests.integration.helpers.lxd import ( ensure_charm_has_runner, get_runner_name, run_in_lxd_instance, - wait_for, ) diff --git a/tests/integration/test_charm_fork_repo.py b/tests/integration/test_charm_fork_repo.py index 04e9f7754..00e600b81 100644 --- a/tests/integration/test_charm_fork_repo.py +++ b/tests/integration/test_charm_fork_repo.py @@ -55,8 +55,6 @@ async def test_dispatch_workflow_failure( https_proxy=https_proxy, ) - await instance_helper.ensure_charm_has_runner(app_with_forked_repo) - workflow = forked_github_repository.get_workflow( id_or_file_name=DISPATCH_FAILURE_TEST_WORKFLOW_FILENAME ) diff --git a/tests/integration/test_charm_one_runner.py b/tests/integration/test_charm_lxd_runner.py similarity index 65% rename from tests/integration/test_charm_one_runner.py rename to tests/integration/test_charm_lxd_runner.py index c8e537d85..c656f724d 100644 --- a/tests/integration/test_charm_one_runner.py +++ b/tests/integration/test_charm_lxd_runner.py @@ -11,16 +11,8 @@ from juju.model import Model from charm import GithubRunnerCharm -from charm_state import ( - RUNNER_STORAGE_CONFIG_NAME, - TOKEN_CONFIG_NAME, - VIRTUAL_MACHINES_CONFIG_NAME, - VM_CPU_CONFIG_NAME, - VM_DISK_CONFIG_NAME, - VM_MEMORY_CONFIG_NAME, -) +from charm_state import RUNNER_STORAGE_CONFIG_NAME, TOKEN_CONFIG_NAME, VIRTUAL_MACHINES_CONFIG_NAME from tests.integration.helpers.lxd import ( - assert_resource_lxd_profile, ensure_charm_has_runner, get_runner_names, reconcile, @@ -74,88 +66,6 @@ async def test_network_access(app: Application) -> None: assert stdout is None -@pytest.mark.asyncio -@pytest.mark.abort_on_fail -async def test_flush_runner_and_resource_config(app: Application) -> None: - """ - arrange: A working application with one runner. - act: - 1. Run Check_runner action. Record the runner name for later. - 2. Nothing. - 3. Change the virtual machine resource configuration. - 4. Run flush_runner action. - - assert: - 1. One runner exists. - 2. LXD profile of matching resource config exists. - 3. Nothing. - 4. a. The runner name should be different to the runner prior running - the action. - b. LXD profile matching virtual machine resources of step 2 exists. - - Test are combined to reduce number of runner spawned. - """ - unit = app.units[0] - - # 1. - action = await app.units[0].run_action("check-runners") - await action.wait() - - assert action.status == "completed" - assert action.results["online"] == "1" - assert action.results["offline"] == "0" - assert action.results["unknown"] == "0" - - runner_names = action.results["runners"].split(", ") - assert len(runner_names) == 1 - - # 2. - configs = await app.get_config() - await assert_resource_lxd_profile(unit, configs) - - # 3. - await app.set_config( - {VM_CPU_CONFIG_NAME: "1", VM_MEMORY_CONFIG_NAME: "3GiB", VM_DISK_CONFIG_NAME: "8GiB"} - ) - - # 4. - action = await app.units[0].run_action("flush-runners") - await action.wait() - - configs = await app.get_config() - await assert_resource_lxd_profile(unit, configs) - await wait_till_num_of_runners(unit, 1) - - action = await app.units[0].run_action("check-runners") - await action.wait() - - assert action.status == "completed" - assert action.results["online"] == "1" - assert action.results["offline"] == "0" - assert action.results["unknown"] == "0" - - new_runner_names = action.results["runners"].split(", ") - assert len(new_runner_names) == 1 - assert new_runner_names[0] != runner_names[0] - - -@pytest.mark.asyncio -@pytest.mark.abort_on_fail -async def test_check_runner(app: Application) -> None: - """ - arrange: A working application with one runner. - act: Run check_runner action. - assert: Action returns result with one runner. - """ - action = await app.units[0].run_action("check-runners") - await action.wait() - - assert action.status == "completed" - assert action.results["online"] == "1" - assert action.results["offline"] == "0" - assert action.results["unknown"] == "0" - - @pytest.mark.asyncio @pytest.mark.abort_on_fail async def test_token_config_changed(model: Model, app: Application, token_alt: str) -> None: @@ -289,19 +199,3 @@ async def test_disabled_apt_daily_upgrades(model: Model, app: Application) -> No assert "apt-daily" not in stdout # this also checks for apt-daily-upgrade service assert "unattended-upgrades" not in stdout - - -async def test_token_config_changed_insufficient_perms( - model: Model, app: Application, token: str -) -> None: - """ - arrange: A working application with one runner. - act: Change the token to be invalid and set the number of runners to zero. - assert: The active runner should be removed, regardless of the invalid new token. - """ - unit = app.units[0] - - await app.set_config({TOKEN_CONFIG_NAME: "invalid-token", VIRTUAL_MACHINES_CONFIG_NAME: "0"}) - await model.wait_for_idle() - - await wait_till_num_of_runners(unit, num=0) diff --git a/tests/integration/test_charm_metrics_failure.py b/tests/integration/test_charm_metrics_failure.py index 860c44573..832f1791a 100644 --- a/tests/integration/test_charm_metrics_failure.py +++ b/tests/integration/test_charm_metrics_failure.py @@ -3,6 +3,7 @@ """Integration tests for metrics/logs assuming Github workflow failures or a runner crash.""" import time +from asyncio import sleep from typing import AsyncIterator import pytest @@ -41,16 +42,16 @@ @pytest_asyncio.fixture(scope="function", name="app") async def app_fixture( - model: Model, app_with_grafana_agent: Application, loop_device: str + model: Model, basic_app: Application, loop_device: str ) -> AsyncIterator[Application]: """Setup and teardown the charm after each test. Clear the metrics log before each test. """ - unit = app_with_grafana_agent.units[0] + unit = basic_app.units[0] await clear_metrics_log(unit) await print_loop_device_info(unit, loop_device) - yield app_with_grafana_agent + yield basic_app @pytest.mark.openstack @@ -71,6 +72,8 @@ async def test_charm_issues_metrics_for_failed_repo_policy( assert: The RunnerStart, RunnerStop and Reconciliation metric is logged. The Reconciliation metric has the post job status set to failure. """ + await app.set_config({PATH_CONFIG_NAME: forked_github_repository.full_name}) + if isinstance(instance_helper, OpenStackInstanceHelper): await setup_repo_policy( app=app, @@ -91,7 +94,13 @@ async def test_charm_issues_metrics_for_failed_repo_policy( ) # Set the number of virtual machines to 0 to speedup reconciliation - await app.set_config({VIRTUAL_MACHINES_CONFIG_NAME: "0"}) + await app.set_config( + { + VIRTUAL_MACHINES_CONFIG_NAME: "0", + "repo-policy-compliance-token": "", + "repo-policy-compliance-url": "", + } + ) await reconcile(app=app, model=model) await assert_events_after_reconciliation( @@ -107,8 +116,8 @@ async def test_charm_issues_metrics_for_failed_repo_policy( async def test_charm_issues_metrics_for_abnormal_termination( model: Model, app: Application, - forked_github_repository: Repository, - forked_github_branch: Branch, + github_repository: Repository, + test_github_branch: Branch, instance_helper: InstanceHelper, ): """ @@ -117,37 +126,41 @@ async def test_charm_issues_metrics_for_abnormal_termination( assert: The RunnerStart, RunnerStop and Reconciliation metric is logged. The Reconciliation metric has the post job status set to Abnormal. """ - await app.set_config({PATH_CONFIG_NAME: forked_github_repository.full_name}) + await app.set_config({PATH_CONFIG_NAME: github_repository.full_name}) + await app.set_config({VIRTUAL_MACHINES_CONFIG_NAME: "1"}) await instance_helper.ensure_charm_has_runner(app) unit = app.units[0] - workflow = forked_github_repository.get_workflow( + workflow = github_repository.get_workflow( id_or_file_name=DISPATCH_CRASH_TEST_WORKFLOW_FILENAME ) dispatch_time = time.time() - assert workflow.create_dispatch(forked_github_branch, {"runner": app.name}) + assert workflow.create_dispatch(test_github_branch, {"runner": app.name}) await wait_for_workflow_to_start( unit, workflow, - branch=forked_github_branch, + branch=test_github_branch, started_time=dispatch_time, instance_helper=instance_helper, ) + # Wait a bit to ensure pre-job script has been executed. + await sleep(10) + # Make the runner terminate abnormally by killing run.sh runner_name = await instance_helper.get_runner_name(unit) kill_run_sh_cmd = "pkill -9 run.sh" - ret_code, _, stderr = await instance_helper.run_in_instance(unit, kill_run_sh_cmd) - assert ret_code == 0, f"Failed to kill run.sh: {stderr}" + ret_code, stdout, stderr = await instance_helper.run_in_instance(unit, kill_run_sh_cmd) + assert ret_code == 0, f"Failed to kill run.sh with code {ret_code}: {stderr}" # Cancel workflow and wait that the runner is marked offline # to avoid errors during reconciliation. await cancel_workflow_run( - unit, workflow, branch=forked_github_branch, instance_helper=instance_helper + unit, workflow, branch=test_github_branch, instance_helper=instance_helper ) - await wait_for_runner_to_be_marked_offline(forked_github_repository, runner_name) + await wait_for_runner_to_be_marked_offline(github_repository, runner_name) # Set the number of virtual machines to 0 to speedup reconciliation await app.set_config({VIRTUAL_MACHINES_CONFIG_NAME: "0"}) @@ -155,7 +168,7 @@ async def test_charm_issues_metrics_for_abnormal_termination( await assert_events_after_reconciliation( app=app, - github_repository=forked_github_repository, + github_repository=github_repository, post_job_status=PostJobStatus.ABNORMAL, ) diff --git a/tests/integration/test_charm_metrics_success.py b/tests/integration/test_charm_metrics_success.py index ffd18d2d1..f757f9a7e 100644 --- a/tests/integration/test_charm_metrics_success.py +++ b/tests/integration/test_charm_metrics_success.py @@ -33,16 +33,16 @@ @pytest_asyncio.fixture(scope="function", name="app") async def app_fixture( - model: Model, app_with_grafana_agent: Application, loop_device: str + model: Model, basic_app: Application, loop_device: str ) -> AsyncIterator[Application]: """Setup and teardown the charm after each test. Clear the metrics log before each test. """ - unit = app_with_grafana_agent.units[0] + unit = basic_app.units[0] await clear_metrics_log(unit) await print_loop_device_info(unit, loop_device) - yield app_with_grafana_agent + yield basic_app @pytest.mark.openstack @@ -76,8 +76,8 @@ async def test_charm_issues_runner_installed_metric( async def test_charm_issues_metrics_after_reconciliation( model: Model, app: Application, - forked_github_repository: Repository, - forked_github_branch: Branch, + github_repository: Repository, + test_github_branch: Branch, instance_helper: InstanceHelper, ): """ @@ -86,7 +86,6 @@ async def test_charm_issues_metrics_after_reconciliation( assert: The RunnerStart, RunnerStop and Reconciliation metric is logged. The Reconciliation metric has the post job status set to normal. """ - await app.set_config({PATH_CONFIG_NAME: forked_github_repository.full_name}) await instance_helper.ensure_charm_has_runner(app) # Clear metrics log to make reconciliation event more predictable @@ -94,8 +93,8 @@ async def test_charm_issues_metrics_after_reconciliation( await clear_metrics_log(unit) await dispatch_workflow( app=app, - branch=forked_github_branch, - github_repository=forked_github_repository, + branch=test_github_branch, + github_repository=github_repository, conclusion="success", workflow_id_or_name=DISPATCH_TEST_WORKFLOW_FILENAME, ) @@ -105,7 +104,7 @@ async def test_charm_issues_metrics_after_reconciliation( await reconcile(app=app, model=model) await assert_events_after_reconciliation( - app=app, github_repository=forked_github_repository, post_job_status=PostJobStatus.NORMAL + app=app, github_repository=github_repository, post_job_status=PostJobStatus.NORMAL ) diff --git a/tests/integration/test_charm_runner.py b/tests/integration/test_charm_runner.py new file mode 100644 index 000000000..5f44f27d5 --- /dev/null +++ b/tests/integration/test_charm_runner.py @@ -0,0 +1,123 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Integration tests for github-runner charm containing one runner.""" +from typing import AsyncIterator + +import pytest +import pytest_asyncio +from juju.application import Application +from juju.model import Model + +from charm_state import ( + VM_CPU_CONFIG_NAME, + VM_DISK_CONFIG_NAME, + VM_MEMORY_CONFIG_NAME, + InstanceType, +) +from tests.integration.helpers import lxd +from tests.integration.helpers.common import InstanceHelper + + +@pytest_asyncio.fixture(scope="function", name="app") +async def app_fixture( + model: Model, + basic_app: Application, + instance_helper: InstanceHelper, +) -> AsyncIterator[Application]: + """Setup and teardown the charm after each test. + + Ensure the charm has one runner before starting a test. + """ + await instance_helper.ensure_charm_has_runner(basic_app) + yield basic_app + + +@pytest.mark.openstack +@pytest.mark.asyncio +@pytest.mark.abort_on_fail +async def test_check_runner(app: Application) -> None: + """ + arrange: A working application with one runner. + act: Run check_runner action. + assert: Action returns result with one runner. + """ + action = await app.units[0].run_action("check-runners") + await action.wait() + + assert action.status == "completed" + assert action.results["online"] == "1" + assert action.results["offline"] == "0" + assert action.results["unknown"] == "0" + + +@pytest.mark.openstack +@pytest.mark.asyncio +@pytest.mark.abort_on_fail +async def test_flush_runner_and_resource_config( + app: Application, instance_type: InstanceType +) -> None: + """ + arrange: A working application with one runner. + act: + 1. Run Check_runner action. Record the runner name for later. + 2. Nothing. + 3. Change the virtual machine resource configuration. + 4. Run flush_runner action. + + assert: + 1. One runner exists. + 2. Check the resource matches the configuration. + 3. Nothing. + 4. a. The runner name should be different to the runner prior running + the action. + b. LXD profile matching virtual machine resources of step 2 exists. + + Test are combined to reduce number of runner spawned. + """ + unit = app.units[0] + + # 1. + action = await app.units[0].run_action("check-runners") + await action.wait() + + assert action.status == "completed" + assert action.results["online"] == "1" + assert action.results["offline"] == "0" + assert action.results["unknown"] == "0" + + runner_names = action.results["runners"].split(", ") + assert len(runner_names) == 1 + + # 2. + # Check if the LXD profile is checked by the charm. Only for local LXD. + configs = await app.get_config() + if instance_type == InstanceType.LOCAL_LXD: + await lxd.assert_resource_lxd_profile(unit, configs) + # OpenStack flavor is not managed by the charm. The charm takes it as a config option. + # Therefore no need to check it. + + # 3. + await app.set_config( + {VM_CPU_CONFIG_NAME: "1", VM_MEMORY_CONFIG_NAME: "3GiB", VM_DISK_CONFIG_NAME: "8GiB"} + ) + + # 4. + action = await app.units[0].run_action("flush-runners") + await action.wait() + + configs = await app.get_config() + if instance_type == InstanceType.LOCAL_LXD: + await lxd.assert_resource_lxd_profile(unit, configs) + + action = await app.units[0].run_action("check-runners") + await action.wait() + + assert action.status == "completed" + assert action.results["online"] == "1" + assert action.results["offline"] == "0" + assert action.results["unknown"] == "0" + + new_runner_names = action.results["runners"].split(", ") + assert len(new_runner_names) == 1 + assert new_runner_names[0] != runner_names[0] diff --git a/tests/integration/test_charm_upgrade.py b/tests/integration/test_charm_upgrade.py index ee0e1b3f8..73011d0ee 100644 --- a/tests/integration/test_charm_upgrade.py +++ b/tests/integration/test_charm_upgrade.py @@ -12,7 +12,7 @@ from pytest_operator.plugin import OpsTest from charm_state import VIRTUAL_MACHINES_CONFIG_NAME -from tests.integration.helpers import ( +from tests.integration.helpers.common import ( deploy_github_runner_charm, inject_lxd_profile, is_upgrade_charm_event_emitted, diff --git a/tests/integration/test_e2e.py b/tests/integration/test_e2e.py new file mode 100644 index 000000000..b3fb311ed --- /dev/null +++ b/tests/integration/test_e2e.py @@ -0,0 +1,62 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +from typing import AsyncIterator + +import pytest +import pytest_asyncio +from github.Branch import Branch +from github.Repository import Repository +from juju.application import Application +from juju.model import Model + +from charm_state import InstanceType +from tests.integration.helpers.common import ( + DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, + InstanceHelper, + dispatch_workflow, +) + + +@pytest_asyncio.fixture(scope="function", name="app") +async def app_fixture( + model: Model, + basic_app: Application, + instance_helper: InstanceHelper, +) -> AsyncIterator[Application]: + """Setup and teardown the charm after each test. + + Ensure the charm has one runner before starting a test. + """ + await instance_helper.ensure_charm_has_runner(basic_app) + yield basic_app + + +@pytest.mark.openstack +@pytest.mark.asyncio +@pytest.mark.abort_on_fail +async def test_e2e_workflow( + model: Model, + app: Application, + github_repository: Repository, + test_github_branch: Branch, + instance_type: InstanceType, +): + """ + arrange: An app connected to an OpenStack cloud with no runners. + act: Run e2e test workflow. + assert: + """ + virt_type: str + if instance_type == InstanceType.OPENSTACK: + virt_type = "openstack" + else: + virt_type = "lxd" + + await dispatch_workflow( + app=app, + branch=test_github_branch, + github_repository=github_repository, + conclusion="success", + workflow_id_or_name=DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, + dispatch_input={"runner-tag": app.name, "runner-virt-type": virt_type}, + ) diff --git a/tests/integration/test_openstack_base_image.py b/tests/integration/test_openstack_base_image.py deleted file mode 100644 index c85e21a3d..000000000 --- a/tests/integration/test_openstack_base_image.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Integration tests for OpenStack integration.""" - -import openstack.connection -import pytest -from github.Branch import Branch -from github.Repository import Repository -from juju.application import Application -from juju.model import Model -from openstack.compute.v2.server import Server - -from charm_state import BASE_IMAGE_CONFIG_NAME -from tests.integration.helpers import ( - DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, - dispatch_workflow, - wait_for, -) - - -@pytest.mark.xfail -async def test_noble_base_image( - model: Model, - app_openstack_runner: Application, - openstack_connection: openstack.connection.Connection, - github_repository: Repository, - test_github_branch: Branch, -) -> None: - """ - arrange: A runner with noble as base image. - act: Dispatch a workflow. - assert: A server with noble image base is created and the workflow runs successfully. - """ - await app_openstack_runner.set_config( - { - BASE_IMAGE_CONFIG_NAME: "noble", - } - ) - await model.wait_for_idle(apps=[app_openstack_runner.name], status="blocked", timeout=70 * 60) - - # Server with noble base image is created - servers = openstack_connection.list_servers(detailed=True) - assert len(servers) == 1, f"Unexpected number of servers: {len(servers)}" - server: Server = servers[0] - assert "noble" in openstack_connection.get_image(server.image["id"]).name - - # Workflow completes successfully - workflow = await dispatch_workflow( - app=app_openstack_runner, - branch=test_github_branch, - github_repository=github_repository, - conclusion="success", - workflow_id_or_name=DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, - dispatch_input={"runner-tag": app_openstack_runner.name}, - ) - await wait_for(lambda: workflow.get_runs()[0].status == "completed") diff --git a/tests/integration/test_openstack_one_runner.py b/tests/integration/test_openstack_one_runner.py deleted file mode 100644 index 2cb0100b4..000000000 --- a/tests/integration/test_openstack_one_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Integration tests for OpenStack integration.""" - -import openstack.connection -import pytest -from github.Branch import Branch -from github.Repository import Repository -from github.WorkflowRun import WorkflowRun -from juju.application import Application -from juju.model import Model -from openstack.compute.v2.server import Server - -from tests.integration.helpers.common import ( - DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, - dispatch_workflow, -) - - -# 2024/03/19 - The firewall configuration on openstack will be implemented by follow up PR on -# launching openstack instances. -@pytest.mark.xfail(reason="Firewall to be implemented") -async def test_openstack_integration( - model: Model, - app_openstack_runner: Application, - openstack_connection: openstack.connection.Connection, - github_repository: Repository, - test_github_branch: Branch, -): - """ - arrange: given a runner with openstack cloud configured. - act: - 1. when the e2e_test_run workflow is created. - 2. when the servers are listed. - assert: - 1. the workflow run completes successfully. - 2. a server with image name jammy is created. - """ - await model.wait_for_idle(apps=[app_openstack_runner.name], status="blocked", timeout=40 * 60) - - # 1. when the e2e_test_run workflow is created. - workflow = await dispatch_workflow( - app=app_openstack_runner, - branch=test_github_branch, - github_repository=github_repository, - conclusion="success", - workflow_id_or_name=DISPATCH_E2E_TEST_RUN_WORKFLOW_FILENAME, - dispatch_input={"runner-tag": app_openstack_runner.name}, - ) - # 1. the workflow run completes successfully. - workflow_run: WorkflowRun = workflow.get_runs()[0] - assert workflow_run.status == "success" - - # 2. when the servers are listed. - servers = openstack_connection.list_servers(detailed=True) - assert len(servers) == 1, f"Unexpected number of servers: {len(servers)}" - server: Server = servers[0] - # 2. a server with image name jammy is created. - assert server.image.name == "jammy" diff --git a/tests/integration/test_openstack_runner.py b/tests/integration/test_openstack_runner.py deleted file mode 100644 index 148c33748..000000000 --- a/tests/integration/test_openstack_runner.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Integration tests for OpenStack integration.""" - -import pytest -from github.Branch import Branch -from github.Repository import Repository -from juju.application import Application -from juju.model import Model -from openstack.compute.v2.server import Server -from openstack.connection import Connection as OpenstackConnection - -from charm_state import TOKEN_CONFIG_NAME -from tests.integration.helpers.common import ( - ACTIVE, - DISPATCH_TEST_WORKFLOW_FILENAME, - dispatch_workflow, - reconcile, -) -from tests.integration.helpers.openstack import setup_repo_policy - - -async def test_openstack_check_runner( - app_openstack_runner: Application, -): - """ - arrange: An app connected to an OpenStack cloud with no runners. - act: Run check-runners action. - assert: No runners exists. - """ - unit = app_openstack_runner.units[0] - - action = await unit.run_action("check-runners") - await action.wait() - - assert action.status == "completed" - assert action.results["online"] == "0" - assert action.results["offline"] == "0" - assert action.results["unknown"] == "0" - - -async def test_openstack_reconcile_one_runner( - model: Model, - app_openstack_runner: Application, - openstack_connection: OpenstackConnection, -): - """ - arrange: An app connected to an OpenStack cloud with no runners. - act: - 1. Change number of runners to one and reconcile and run check-runners action. - 2. Change number of runners to zero and run check-runners action. - assert: - 1. One runner is spawned. - 2. No runners exist and no servers exist on openstack. - """ - # 1. - # Waits until one runner is spawned. - await app_openstack_runner.set_config({"virtual-machines": "1"}) - await reconcile(app=app_openstack_runner, model=model) - - unit = app_openstack_runner.units[0] - action = await unit.run_action("check-runners") - await action.wait() - - assert action.status == "completed" - assert action.results["online"] == "1" - assert action.results["offline"] == "0" - assert action.results["unknown"] == "0" - - # 2. - await app_openstack_runner.set_config({"virtual-machines": "0"}) - await reconcile(app=app_openstack_runner, model=model) - - action = await unit.run_action("check-runners") - await action.wait() - - assert action.status == "completed" - assert action.results["online"] == "0" - assert action.results["offline"] == "0" - assert action.results["unknown"] == "0" - - assert len(await openstack_connection.list_servers()) == 0, "Openstack runners not cleaned up" - - -async def test_openstack_flush_runners( - model: Model, - app_openstack_runner: Application, - openstack_connection: OpenstackConnection, -): - """ - arrange: An app with runners. - act: Call flush runners action. - assert: Runners are flushed and no servers exist on openstack. - """ - # Waits until one runner is spawned. - await app_openstack_runner.set_config({"virtual-machines": "1"}) - await reconcile(app=app_openstack_runner, model=model) - - unit = app_openstack_runner.units[0] - action = await unit.run_action("check-runners") - await action.wait() - - assert action.status == "completed" - assert action.results["delta"]["virtual-machines"] == "1" - - assert len(await openstack_connection.list_servers()) == 0, "Openstack runners not cleaned up" - - -@pytest.mark.asyncio -@pytest.mark.abort_on_fail -async def test_token_config_changed( - model: Model, - app_openstack_runner: Application, - openstack_connection: OpenstackConnection, - token_alt: str, -) -> None: - """ - arrange: A working application with one runner. - act: Change the token configuration. - assert: New runner is spawned. - """ - # Waits until one runner is spawned. - await app_openstack_runner.set_config({"virtual-machines": "1"}) - await reconcile(app=app_openstack_runner, model=model) - servers: list[Server] = openstack_connection.list_servers() - assert len(servers) == 1, f"Invalid number of servers found, expected 1, got {len(servers)}" - server_id = servers[0].id - - await app_openstack_runner.set_config({TOKEN_CONFIG_NAME: token_alt}) - await model.wait_for_idle(status=ACTIVE, timeout=30 * 60) - - servers = openstack_connection.list_servers() - assert len(servers) == 1, f"Invalid number of servers found, expected 1, got {len(servers)}" - assert ( - server_id != servers[0].id - ), f"Expected new runner spawned, same server id found {server_id}" - - -@pytest.mark.asyncio -@pytest.mark.abort_on_fail -async def test_repo_policy_enabled( - app_openstack_runner: Application, - openstack_connection: OpenstackConnection, - forked_github_repository: Repository, - forked_github_branch: Branch, - token: str, - https_proxy: str, -) -> None: - """ - arrange: A working application with one runner and repo policy enabled. - act: Dispatch a workflow. - assert: Run has successfully passed. - """ - await setup_repo_policy( - app=app_openstack_runner, - openstack_connection=openstack_connection, - token=token, - https_proxy=https_proxy, - ) - - await dispatch_workflow( - app=app_openstack_runner, - branch=forked_github_branch, - github_repository=forked_github_repository, - conclusion="success", - workflow_id_or_name=DISPATCH_TEST_WORKFLOW_FILENAME, - ) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index aab75c839..7e748d52c 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -11,7 +11,7 @@ import pytest import yaml -from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus +from ops.model import BlockedStatus, MaintenanceStatus from ops.testing import Harness from charm import GithubRunnerCharm @@ -615,7 +615,7 @@ def test_on_config_changed_openstack_clouds_yaml(self, run, wt, mkdir, orm, rm): """ arrange: Setup mocked charm. act: Fire config changed event to use openstack-clouds-yaml. - assert: Charm is in blocked state. + assert: Charm is in maintenance state. """ harness = Harness(GithubRunnerCharm) cloud_yaml = { diff --git a/tests/unit/test_openstack_manager.py b/tests/unit/test_openstack_manager.py index 64fef1f84..ed19e4718 100644 --- a/tests/unit/test_openstack_manager.py +++ b/tests/unit/test_openstack_manager.py @@ -7,14 +7,17 @@ from unittest.mock import MagicMock, call import jinja2 +import openstack.connection import openstack.exceptions import pytest +from fabric.connection import Connection as SshConnection from invoke import Result from openstack.compute.v2.keypair import Keypair +from openstack.compute.v2.server import Server import metrics.storage from charm_state import CharmState, ProxyConfig, RepoPolicyComplianceConfig -from errors import OpenStackError +from errors import OpenStackError, RunnerStartError from github_type import GitHubRunnerStatus, SelfHostedRunner from metrics import events as metric_events from metrics.runner import RUNNER_INSTALLED_TS_FILE_NAME @@ -32,10 +35,79 @@ def mock_openstack_connect_fixture(monkeypatch: pytest.MonkeyPatch) -> MagicMock """Mock openstack.connect.""" mock_connect = MagicMock(spec=openstack_manager.openstack.connect) monkeypatch.setattr("openstack_cloud.openstack_manager.openstack.connect", mock_connect) - return mock_connect +@pytest.fixture(name="mock_server") +def mock_server_fixture() -> MagicMock: + """Mock OpenStack Server object.""" + mock_server = MagicMock(spec=Server) + mock_server.key_name = "mock_key" + mock_server.addresses.values = MagicMock(return_value=[[{"addr": "10.0.0.1"}]]) + return mock_server + + +@pytest.fixture(name="patch_get_ssh_connection_health_check") +def patch_get_ssh_connection_health_check_fixture(monkeypatch: pytest.MonkeyPatch): + """Patch SSH connection to a MagicMock instance for get_ssh_connection health check.""" + mock_get_ssh_connection = MagicMock( + spec=openstack_manager.OpenstackRunnerManager._get_ssh_connection + ) + mock_ssh_connection = MagicMock(spec=SshConnection) + mock_ssh_connection.host = "test host IP" + mock_result = MagicMock(spec=Result) + mock_result.ok = True + mock_result.stderr = "" + mock_result.stdout = "hello world" + mock_ssh_connection.run.return_value = mock_result + mock_get_ssh_connection.return_value = [mock_ssh_connection] + + monkeypatch.setattr( + openstack_manager.OpenstackRunnerManager, + "_get_ssh_connection", + mock_get_ssh_connection, + ) + + +@pytest.fixture(name="ssh_connection_health_check") +def ssh_connection_health_check_fixture(monkeypatch: pytest.MonkeyPatch): + """SSH connection to a MagicMock instance for health check.""" + mock_get_ssh_connection = MagicMock( + spec=openstack_manager.OpenstackRunnerManager._get_ssh_connection + ) + mock_ssh_connection = MagicMock(spec=SshConnection) + mock_ssh_connection.host = "test host IP" + mock_result = MagicMock(spec=Result) + mock_result.ok = True + mock_result.stderr = "" + mock_result.stdout = "-- Test output: /bin/bash /home/ubuntu/actions-runner/run.sh --" + mock_ssh_connection.run.return_value = mock_result + mock_get_ssh_connection.return_value = mock_ssh_connection + + return mock_get_ssh_connection + + +@pytest.fixture(name="patch_ssh_connection_error") +def patch_ssh_connection_error_fixture(monkeypatch: pytest.MonkeyPatch): + """Patch SSH connection to a MagicMock instance with error on run.""" + mock_get_ssh_connection = MagicMock( + spec=openstack_manager.OpenstackRunnerManager._get_ssh_connection + ) + mock_ssh_connection = MagicMock(spec=SshConnection) + mock_result = MagicMock(spec=Result) + mock_result.ok = False + mock_result.stdout = "Mock stdout" + mock_result.stderr = "Mock stderr" + mock_ssh_connection.run.return_value = mock_result + mock_get_ssh_connection.return_value = mock_ssh_connection + + monkeypatch.setattr( + openstack_manager.OpenstackRunnerManager, + "_get_ssh_connection", + mock_get_ssh_connection, + ) + + @pytest.fixture(name="mock_github_client") def mock_github_client_fixture() -> MagicMock: """Mocked github client that returns runner application.""" @@ -536,7 +608,10 @@ def test_build_image_create_image_error( @pytest.mark.usefixtures("patch_execute_command") -def test_build_image(patched_create_connection_context: MagicMock, mock_github_client: MagicMock): +def test_build_image( + patched_create_connection_context: MagicMock, + mock_github_client: MagicMock, +): """ arrange: given monkeypatched execute_command and mocked openstack connection. act: when build_image is called. @@ -555,6 +630,98 @@ def test_build_image(patched_create_connection_context: MagicMock, mock_github_c ) +@pytest.mark.usefixtures("patch_execute_command") +def test_build_image_on_arm64( + patched_create_connection_context: MagicMock, mock_github_client: MagicMock +): + """ + arrange: given monkeypatched execute_command and mocked openstack connection. + act: when build_image is called on arm64. + assert: Openstack image is successfully created. + """ + patched_create_connection_context.search_images.return_value = ( + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + ) + + openstack_manager.build_image( + arch=openstack_manager.Arch.ARM64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + ) + + +@pytest.mark.usefixtures("patch_execute_command") +def test_build_image_on_unsupported_arch( + patched_create_connection_context: MagicMock, mock_github_client: MagicMock +): + """ + arrange: given monkeypatched execute_command and mocked openstack connection. + act: when build_image is called on unknown architecture. + assert: UnsupportedArchitectureError is raised. + """ + patched_create_connection_context.search_images.return_value = ( + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + ) + + with pytest.raises(openstack_manager.OpenstackImageBuildError) as exc: + openstack_manager.build_image( + # Use mock to represent unknown architecture. + arch=MagicMock(), + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + ) + assert str(exc.value) == "Unsupported architecture x64" + + +@pytest.mark.usefixtures("patch_execute_command") +def test_build_image_with_proxy_config( + patched_create_connection_context: MagicMock, mock_github_client: MagicMock +): + """ + arrange: given monkeypatched execute_command and mocked openstack connection. + act: when build_image is called with various valid ProxyConfig objects. + assert: Openstack image is successfully created. + """ + patched_create_connection_context.search_images.return_value = ( + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + MagicMock(spec=openstack_manager.openstack.image.v2.image.Image), + ) + + test_proxy_config = openstack_manager.ProxyConfig( + http=None, + https=None, + no_proxy=None, + use_aproxy=False, + ) + + openstack_manager.build_image( + arch=openstack_manager.Arch.ARM64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + proxies=test_proxy_config, + ) + + test_proxy_config = openstack_manager.ProxyConfig( + http="http://proxy.test", + https="https://proxy.test", + no_proxy="http://no.proxy", + use_aproxy=False, + ) + + openstack_manager.build_image( + arch=openstack_manager.Arch.ARM64, + cloud_config=MagicMock(), + github_client=mock_github_client, + path=MagicMock(), + proxies=test_proxy_config, + ) + + def test_reconcile_issues_runner_installed_event( openstack_manager_for_reconcile: openstack_manager.OpenstackRunnerManager, ): @@ -860,6 +1027,112 @@ def test_repo_policy_config( assert test_url in cloud_init_data_str +def test__ensure_security_group_with_existing_rules(): + """ + arrange: Mock OpenStack connection with the security rules created. + act: Run `_ensure_security_group`. + assert: The security rules are not created again. + """ + connection_mock = MagicMock(spec=openstack.connection.Connection) + connection_mock.get_security_group.return_value = { + "security_group_rules": [ + {"protocol": "icmp"}, + {"protocol": "tcp", "port_range_min": 22, "port_range_max": 22}, + {"protocol": "tcp", "port_range_min": 10022, "port_range_max": 10022}, + ] + } + + openstack_manager.OpenstackRunnerManager._ensure_security_group(connection_mock) + connection_mock.create_security_group_rule.assert_not_called() + + +def test__get_ssh_connection( + monkeypatch, + patch_get_ssh_connection_health_check, + mock_server: MagicMock, +): + """ + arrange: A server with SSH setup correctly. + act: Get the SSH connections. + assert: The SSH connections contains at least one connection. + """ + # Patching the `_get_key_path` to get around the keyfile checks. + mock__get_key_path = MagicMock(spec=openstack_manager.OpenstackRunnerManager._get_key_path) + mock_key_path = MagicMock(spec=Path) + mock_key_path.exists.return_value = True + mock__get_key_path.return_value = mock_key_path + monkeypatch.setattr( + openstack_manager.OpenstackRunnerManager, "_get_key_path", mock__get_key_path + ) + mock_connection = MagicMock(spec=openstack.connection.Connection) + mock_connection.get_server.return_value = mock_server + + conn = openstack_manager.OpenstackRunnerManager._get_ssh_connection( + mock_connection, mock_server.name + ) + assert conn is not None + + +def test__ssh_health_check_success( + mock_server: MagicMock, +): + """ + arrange: A server with SSH correctly setup. + act: Run health check on the server. + assert: The health check passes. + """ + mock_connection = MagicMock(spec=openstack.connection.Connection) + mock_connection.get_server.return_value = mock_server + assert openstack_manager.OpenstackRunnerManager._ssh_health_check( + mock_connection, mock_server.name, False + ) + + +def test__ssh_health_check_no_key(mock_server: MagicMock): + """ + arrange: A server with no key available. + act: Run health check on the server. + assert: The health check fails. + """ + # Remove the mock SSH key. + mock_server.key_name = None + + mock_connection = MagicMock(spec=openstack.connection.Connection) + mock_connection.get_server.return_value = mock_server + + assert openstack_manager.OpenstackRunnerManager._ssh_health_check( + mock_connection, mock_server.name, False + ) + + +def test__ssh_health_check_error(mock_server: MagicMock, patch_ssh_connection_error): + """ + arrange: A server with error on SSH run. + act: Run health check on the server. + assert: The health check fails. + """ + mock_connection = MagicMock(spec=openstack.connection.Connection) + mock_connection.get_server.return_value = mock_server + openstack_manager.OpenstackRunnerManager._ssh_health_check( + mock_connection, mock_server.name, False + ) + + +def test__wait_until_runner_process_running_no_server(): + """ + arrange: No server existing on the OpenStack connection. + act: Check if runner process is running. + assert: RunnerStartError thrown. + """ + mock_connection = MagicMock(spec=openstack.connection.Connection) + mock_connection.get_server.return_value = None + + with pytest.raises(RunnerStartError): + openstack_manager.OpenstackRunnerManager._wait_until_runner_process_running( + mock_connection, "Non-existing-server" + ) + + @pytest.mark.parametrize( "server", [