diff --git a/.github/workflows/comment_contributing.yaml b/.github/workflows/comment_contributing.yaml new file mode 100644 index 00000000..c51f8731 --- /dev/null +++ b/.github/workflows/comment_contributing.yaml @@ -0,0 +1,13 @@ +name: Comment on the pull request + +on: + pull_request: + types: + - opened + branches: + - 'track/**' + +jobs: + comment-on-pr: + uses: canonical/operator-workflows/.github/workflows/comment_contributing.yaml@main + secrets: inherit diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index 7391b6ab..f7ea0a7e 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -11,8 +11,6 @@ jobs: extra-arguments: -x --localstack-address 172.17.0.1 pre-run-script: localstack-installation.sh trivy-image-config: "trivy.yaml" - juju-channel: 3.1/stable + juju-channel: 3.4/stable channel: 1.28-strict/stable - modules: '["test_charm", "test_nginx", "test_s3", "test_scaling"]' - self-hosted-runner: false - self-hosted-runner-label: "edge" + modules: '["test_charm", "test_nginx", "test_s3", "test_scaling", "test_matrix_auth"]' diff --git a/.github/workflows/promote_charm.yaml b/.github/workflows/promote_charm.yaml index ba84ced9..2faa9c80 100644 --- a/.github/workflows/promote_charm.yaml +++ b/.github/workflows/promote_charm.yaml @@ -7,12 +7,12 @@ on: type: choice description: 'Origin Channel' options: - - latest/edge + - 1/edge destination-channel: type: choice description: 'Destination Channel' options: - - latest/stable + - 1/stable secrets: CHARMHUB_TOKEN: required: true diff --git a/.github/workflows/publish_charm.yaml b/.github/workflows/publish_charm.yaml index a84cba1f..aea94a3e 100644 --- a/.github/workflows/publish_charm.yaml +++ b/.github/workflows/publish_charm.yaml @@ -3,10 +3,11 @@ name: Publish to edge on: push: branches: - - main - - track/* + - track/1 jobs: publish-to-edge: uses: canonical/operator-workflows/.github/workflows/publish_charm.yaml@main secrets: inherit + with: + channel: 1/edge diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 00000000..2d3b1179 --- /dev/null +++ b/.trivyignore @@ -0,0 +1,62 @@ +# Vulnerabilites related to: Pebble, Node.JS and gosu +CVE-2021-39293 +CVE-2021-41771 +CVE-2021-41772 +CVE-2021-44716 +CVE-2022-23772 +CVE-2022-23806 +CVE-2022-24675 +CVE-2022-24921 +CVE-2022-25883 +CVE-2022-27664 +CVE-2022-28131 +CVE-2022-28327 +CVE-2022-2879 +CVE-2022-2880 +CVE-2022-29804 +CVE-2022-30580 +CVE-2022-30630 +CVE-2022-30631 +CVE-2022-30632 +CVE-2022-30633 +CVE-2022-30634 +CVE-2022-30635 +CVE-2022-32189 +CVE-2022-41715 +CVE-2022-41716 +CVE-2022-41720 +CVE-2022-41722 +CVE-2022-41723 +CVE-2022-41724 +CVE-2022-41725 +CVE-2022-46175 +CVE-2023-24534 +CVE-2023-24536 +CVE-2023-24537 +CVE-2023-24538 +CVE-2023-24539 +CVE-2023-24540 +CVE-2023-29400 +CVE-2023-29403 +CVE-2023-39325 +CVE-2023-45283 +CVE-2023-45287 +CVE-2023-45288 +CVE-2024-24790 +CVE-2024-29415 +CVE-2024-34156 +CVE-2024-21538 +CVE-2024-24788 +# This should be removed once the following PR is merged. +# https://github.com/element-hq/synapse/pull/17955 +CVE-2024-52804 +# Fix ongoing: +# https://github.com/element-hq/synapse/pull/17985 +CVE-2024-53981 +# The 3 following CVEs will be fixed by Synapse 1.120.2 +CVE-2024-52805 +CVE-2024-52815 +CVE-2024-53863 +# This should be removed once pebble releases a new version. +# https://github.com/canonical/pebble/commit/0c134f8e0d80f4bd8f42011279c8f0737b59a673 +CVE-2024-45338 diff --git a/LICENSE b/LICENSE index c4a371b8..4b8b005b 100644 --- a/LICENSE +++ b/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2024 Canonical Ltd. + Copyright 2025 Canonical Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index c24118ac..f99e904a 100644 --- a/README.md +++ b/README.md @@ -23,16 +23,49 @@ For DevOps or SRE teams this charm will make operating Synapse simple and straightforward through Juju's clean interface. It will allow easy deployment into multiple environments for testing of changes. -## Project and community +## Get started + +To begin, refer to the [Getting Started](https://charmhub.io/synapse/docs/tutorial-getting-started) +tutorial for step-by-step instructions. + +### Basic operations + +#### Configure a server name + +The configuration `server_name` sets the public-facing domain of the server and +refers to [`server_name`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#server_name) Synapse configuration. + +To change it to `tutorial-synapse.juju.local`, for example, run the following +command: + +``` +juju config synapse server_name=tutorial-synapse.juju.local +``` + +#### Create a user -The Synapse Operator is a member of the Ubuntu family. It's an open source -project that warmly welcomes community projects, contributions, suggestions, -fixes and constructive feedback. -* [Code of conduct](https://ubuntu.com/community/code-of-conduct) -* [Get support](https://discourse.charmhub.io/) -* [Join our online chat](https://matrix.to/#/#charmhub-charmdev:ubuntu.com) -* [Contribute](https://charmhub.io/synapse/docs/contributing) -* [Getting Started](https://charmhub.io/synapse/docs/getting-started) -Thinking about using the Synapse Operator for your next project? [Get in touch](https://matrix.to/#/#charmhub-charmdev:ubuntu.com)! - ---- +The following command creates a local user named `alice`. + +``` +juju run synapse/0 register-user username=alice password= admin=no +``` + +#### Promote user to admin + +The following command can be used to promote an existing user to admin. + +``` +juju run synapse/0 promote-user-admin username=alice +``` + +## Learn more +* [Read more](https://charmhub.io/synapse) +* [Developer documentation](https://element-hq.github.io/synapse/latest/development/contributing_guide.html) +* [Official webpage](https://github.com/element-hq/synapse) +* [Troubleshooting](https://element-hq.github.io/synapse/latest/usage/administration/admin_faq.html) + + +## Project and community +* [Issues](https://github.com/canonical/synapse-operator/issues) +* [Contributing](https://charmhub.io/synapse/docs/contributing) +* [Matrix](https://matrix.to/#/#charmhub-charmdev:ubuntu.com) diff --git a/actions.yaml b/actions.yaml index 97febaaf..f9fe0d2e 100644 --- a/actions.yaml +++ b/actions.yaml @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. anonymize-user: @@ -11,11 +11,6 @@ anonymize-user: type: string required: - username -reset-instance: - description: | - Set a new server_name before running this action. - Once a server_name is configured, you must start a new instance if you wish a different one. - This actions will erase all data and create a instance with the new server_name. register-user: description: | Registers a user for the Synapse server. diff --git a/charmcraft.yaml b/charmcraft.yaml index 15302750..d4a2019c 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. # This file configures Charmcraft. @@ -12,10 +12,18 @@ bases: run-on: - name: ubuntu channel: "22.04" +charm-libs: + - lib: traefik_k8s.ingress + version: "2" parts: charm: build-packages: - - cargo - - rustc + - libpq-dev + build-snaps: + - rustup + override-build: | + rustup default stable + craftctl default charm-binary-python-packages: - - psycopg2-binary==2.9.9 + - psycopg2-binary + - cryptography diff --git a/config.yaml b/config.yaml index e6427dde..72465613 100644 --- a/config.yaml +++ b/config.yaml @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. options: @@ -11,6 +11,12 @@ options: backup_passphrase: type: string description: Passphrase used to encrypt a backup using gpg with symmetric key. + block_non_admin_invites: + type: boolean + default: false + description: | + When enabled, room invites to users on this server will be blocked + (except those sent by local server admins). enable_email_notifs: type: boolean default: false @@ -22,19 +28,6 @@ options: description: | Configures whether to enable Mjolnir - moderation tool for Matrix. Reference: https://github.com/matrix-org/mjolnir - enable_irc_bridge: - type: boolean - default: false - description: | - Configures whether to enable IRC bridging for Matrix. - Reference: https://github.com/matrix-org/matrix-appservice-irc - enable_irc_ident: - type: boolean - default: false - description: | - Configures whether to enable IRC ident server. - Reference: | - https://github.com/matrix-org/matrix-appservice-irc/blob/develop/config.sample.yaml#L437 enable_password_config: type: boolean default: true @@ -57,11 +50,19 @@ options: Comma separated list of IP address CIDR ranges that should be allowed for federation, identity servers, push servers, and for checking key validity for third-party invite events. - irc_bridge_admins: + invite_checker_blocklist_allowlist_url: + type: string + description: URL to fetch the JSON file containing the allowlist and + blocklist. + invite_checker_policy_rooms: type: string description: | - Comma separated list of admins to be allowed to manage the bridge. - This takes the form of user1:domainX.com,user2:domainY.com... + Comma separated list of rooms to be used by the invite checker module. + limit_remote_rooms_complexity: + type: float + description: if set, the room "complexity" will be checked before a user + joins a new remote room. If the complexity is higher, the user will not be + able to join the room. notif_from: type: string description: defines the "From" address to use when sending emails. @@ -72,6 +73,23 @@ options: The public-facing base URL that clients use to access this Homeserver. Defaults to https:///. Only used if there is integration with SAML integrator charm. + experimental_alive_check: + type: string + description: Comma separated list of period,threshold and timeout for Synapse + pebble alive check. Refer to https://github.com/canonical/pebble/#layer-specification + to more details about allowed values. + This is an experimental feature and may be removed in future releases. + default: "2m,5,20s" + rc_joins_remote_burst_count: + type: int + description: Allows for ratelimiting number of remote rooms a user can join + before being throttled. + default: 10 + rc_joins_remote_per_second: + type: float + description: Allows for ratelimiting number of remote rooms a user can join + per second. + default: 0.01 report_stats: description: | Configures whether to report statistics. diff --git a/docs/explanation/charm-architecture.md b/docs/explanation/charm-architecture.md index 3763f900..5a297d79 100644 --- a/docs/explanation/charm-architecture.md +++ b/docs/explanation/charm-architecture.md @@ -16,7 +16,7 @@ Pebble `services` are configured through [layers](https://github.com/canonical/p and the following containers represent each one a layer forming the effective Pebble configuration, or `plan`: -1. An [NGINX](https://www.nginx.com/) container, which can be used to +1. An [NGINX](https://www.nginx.com/) Pebble layer, which can be used to efficiently serve static resources, as well as be the incoming point for all web traffic to the pod. 2. The [Synapse](https://github.com/matrix-org/synapse) container itself, which @@ -40,31 +40,27 @@ processes startup as explained above. ## OCI images We use [Rockcraft](https://canonical-rockcraft.readthedocs-hosted.com/en/latest/) -to build OCI Images for Synapse and NGINX. -The images are defined in [NGINX rock](https://github.com/canonical/synapse-operator/tree/main/nginx_rock/) -and [Synapse rock](https://github.com/canonical/synapse-operator/tree/main/synapse_rock). -They are published to [Charmhub](https://charmhub.io/), the official repository +to build OCI Image for Synapse. +The image is defined in [Synapse rock](https://github.com/canonical/synapse-operator/tree/main/synapse_rock) and is published to [Charmhub](https://charmhub.io/), the official repository of charms. This is done by publishing a resource to Charmhub as described in the [Juju SDK How-to guides](https://juju.is/docs/sdk/publishing). -## Containers +## Container -Configuration files for the containers can be found in the respective -directories that define the rocks, see the section above. +Configuration files for the container can be found in the respective +directory that define the rock, see the section above. ### NGINX -This container is the entry point for all web traffic to the pod (on port -`8080`). Serves static files directly and forwards non-static requests to -the Synapse container (on port `8008`). +NGINX is configured as a Pebble Layer and is the entry point for all web traffic +to the pod (on port `8080`). Serves static files directly and forwards +non-static requests to the Synapse container (on port `8008`). NGINX provides static content cache, reverse proxy, and load balancer among multiple application servers, as well as other features. It can be used in front of Synapse server to significantly reduce server and network load. -The workload that this container is running is defined in the [NGINX rock](https://github.com/canonical/synapse-operator/tree/main/nginx_rock/). - ### Synapse Synapse is a Python application run by the `start.py` script. diff --git a/docs/how-to/backup-and-restore.md b/docs/how-to/backup-and-restore.md index 994d11f1..6d3e4735 100644 --- a/docs/how-to/backup-and-restore.md +++ b/docs/how-to/backup-and-restore.md @@ -23,7 +23,7 @@ juju config s3-integrator endpoint= bucket= path= secret-key= ``` -Integrate with Synapse with: +Integrate with Synapse using: `juju integrate synapse:backup s3-integrator` @@ -37,7 +37,7 @@ juju config synapse backup_passphrase= ### Create the backup -Create the backup with the next command: +Create the backup: ``` juju run synapse/leader create-backup ``` @@ -53,20 +53,20 @@ juju run synapse/leader list-backups ### Back up PostgreSQL Follow the instructions of the PostgreSQL charm: - - For [postgresql-k8s](https://charmhub.io/postgresql-k8s/docs/h-create-and-list-backups). - - For [postgresql](https://charmhub.io/postgresql/docs/h-create-and-list-backups). + - For [postgresql-k8s](https://charmhub.io/postgresql-k8s/docs/h-create-backup). + - For [postgresql](https://charmhub.io/postgresql/docs/h-create-backup). If you plan to restore PostgreSQL in a different model or cluster, you will need to also back up the cluster passwords. See: - - For [postgresql-k8s](https://charmhub.io/postgresql-k8s/docs/h-migrate-cluster-via-restore). - - For [postgresql](https://charmhub.io/postgresql/docs/h-migrate-cluster-via-restore). + - For [postgresql-k8s](https://charmhub.io/postgresql-k8s/docs/h-migrate-cluster). + - For [postgresql](https://charmhub.io/postgresql/docs/h-migrate-cluster). ## Restore -The recommendation is to first restore PostgreSQL if necessary. Then deploying, -configuring and integrating Synapse with other charms as done in a normal deployment -and finally restoring Synapse. +The recommendation is to first restore PostgreSQL if necessary. Then deploy, +configure and integrate Synapse with other charms as done in a normal deployment. +Finally, restore Synapse. The PostgreSQL and Synapse charm revisions should be the same ones as the ones used for the backup. The configuration for Synapse before restoring the backup should also @@ -78,11 +78,11 @@ the configuration option `server_name` and any other configuration related to th If you use the PostgreSQL integration, follow the instructions given by PostgreSQL: - - For postgresql-k8s: [local restore](https://charmhub.io/postgresql/docs/h-restore-backup), [foreign backup](https://charmhub.io/postgresql/docs/h-migrate-cluster-via-restore). - - for postgresql: [local restore](https://charmhub.io/postgresql/docs/h-restore-backup), [foreign backup](https://charmhub.io/postgresql/docs/h-migrate-cluster-via-restore). + - For postgresql-k8s: [local restore](https://charmhub.io/postgresql/docs/h-restore-backup), [foreign backup](https://charmhub.io/postgresql/docs/h-migrate-cluster). + - for postgresql: [local restore](https://charmhub.io/postgresql/docs/h-restore-backup), [foreign backup](https://charmhub.io/postgresql/docs/h-migrate-cluster). If you used the foreign backup, once the backup for PostgreSQL is restored, you should remove the S3 integration, -as it was created in a different cluster, by running: +as it was created in a different cluster: ``` juju remove-relation s3-integrator postgresql @@ -91,7 +91,7 @@ juju remove-relation s3-integrator postgresql ### Deploy Synapse Synapse should be deployed, integrated with all necessary charms and configured. If necessary, Synapse should be integrated with the PostgreSQL charm that -has already being restored. +has already been restored. ### Restore Synapse @@ -123,4 +123,4 @@ Restore the backup: juju run synapse/leader restore-backup backup-id= ``` -At this point, Synapse should be active and the restore procedure complete. \ No newline at end of file +At this point, Synapse should be active and the restore procedure complete. diff --git a/docs/how-to/configure-smtp.md b/docs/how-to/configure-smtp.md index 26939547..36459c18 100644 --- a/docs/how-to/configure-smtp.md +++ b/docs/how-to/configure-smtp.md @@ -5,7 +5,7 @@ emails. Synapse should be deployed beforehand. ## Deploy smtp-integrator charm -For synapse to use SMTP, it uses the smtp-integrator charm. Replace the configuration options with your specific configuration. +For Synapse to use SMTP, it uses the smtp-integrator charm. Replace the configuration options with your specific configuration. Configuring SMTP without tls or starttls or without authentication is not supported. ``` @@ -33,4 +33,4 @@ juju integrate smtp-integrator:smtp-legacy synapse:smtp For the new integration with secrets, run: ``` juju integrate smtp-integrator:smtp synapse:smtp -``` \ No newline at end of file +``` diff --git a/docs/how-to/contribute.md b/docs/how-to/contribute.md index 8381b257..f9cf8839 100644 --- a/docs/how-to/contribute.md +++ b/docs/how-to/contribute.md @@ -41,12 +41,11 @@ source .tox/unit/bin/activate ### Testing -Note that the [Synapse](synapse_rock/rockcraft.yaml) and [Synapse NGINX](synapse_nginx_rock/rockcraft.yaml) -images need to be built and pushed to microk8s for the tests to run. They should -be tagged as `localhost:32000/synapse:latest` and -`localhost:32000/synapse-nginx:latest` so that Kubernetes knows how to pull them +Note that the [Synapse](synapse_rock/rockcraft.yaml) image need to be built and +pushed to microk8s for the tests to run. It should be tagged as +`localhost:32000/synapse:latest`so that Kubernetes knows how to pull them from the MicroK8s repository. Note that the MicroK8s registry needs to be -enabled using `microk8s enable registry`. More details regarding the OCI images +enabled using `microk8s enable registry`. More details regarding the OCI image below. The following commands can then be used to run the tests: * `tox`: Runs all of the basic checks (`lint`, `unit`, `static`, and `coverage-report`). @@ -73,7 +72,7 @@ Build the charm in this git repository using: charmcraft pack ``` For the integration tests (and also to deploy the charm locally), the synapse -and synapse-nginx images are required in the microk8s registry. To enable it: +image is required in the microk8s registry. To enable it: microk8s enable registry @@ -82,8 +81,6 @@ the registry: cd [project_dir]/synapse_rock && rockcraft pack skopeo --insecure-policy copy --dest-tls-verify=false oci-archive:synapse_1.0_amd64.rock docker://localhost:32000/synapse:latest - cd [project_dir]/nginx_rock && rockcraft pack - skopeo --insecure-policy copy --dest-tls-verify=false oci-archive:synapse-nginx_1.0_amd64.rock docker://localhost:32000/synapse-nginx:latest ### Deploy @@ -94,24 +91,22 @@ juju add-model synapse-dev juju model-config logging-config="=INFO;unit=DEBUG" # Deploy the charm (assuming you're on amd64) juju deploy ./synapse_ubuntu-22.04-amd64.charm \ - --resource synapse-image=localhost:32000/synapse:latest \ - --resource synapse-nginx-image=localhost:32000/synapse-nginx:latest + --resource synapse-image=localhost:32000/synapse:latest ``` -### Configure server_name +### Configure `server_name` -Synapse requires a server_name to be set before starting. Note that this cannot -be changed later so if you want a different server name, will need to run the -action `reset-instance` to re-create everything. +Synapse requires a `server_name` to be set before starting. Note that this cannot +be changed later. -The following command will configure the server_name mychat.test.com: +The following command will configure the `server_name` mychat.test.com: ```bash juju configure synapse server_name=mychat.test.com ``` -Read more about server_name in [Configuring Synapse](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#server_name). +Read more about `server_name` in [Configuring Synapse](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#server_name). ## Canonical Contributor Agreement -Canonical welcomes contributions to the Synapse Operator. Please check out our [contributor agreement](https://ubuntu.com/legal/contributors) if you're interested in contributing to the solution. \ No newline at end of file +Canonical welcomes contributions to the Synapse Operator. Please check out our [contributor agreement](https://ubuntu.com/legal/contributors) if you're interested in contributing to the solution. diff --git a/docs/how-to/horizontally-scale.md b/docs/how-to/horizontally-scale.md new file mode 100644 index 00000000..3a560a3e --- /dev/null +++ b/docs/how-to/horizontally-scale.md @@ -0,0 +1,86 @@ +# How to horizontally scale + +A Synapse deployment can scale horizontally by running multiple Synapse processes called workers. +While adding more units to the Synapse charm, one of them will be the Main process and others, +the workers. This architecture has specific requirements that will be described in this document. + +For more details about scaling, refer to ["Scaling synapse via workers"](https://element-hq.github.io/synapse/latest/workers.html#scaling-synapse-via-workers) in the Synapse documentation. + +## Requirements + +- Synapse charm deployed and integrated with PostgreSQL charm. + +The tutorial ["Getting started"](https://discourse.charmhub.io/t/synapse-docs-getting-started/12737) can be used to meet this requirement. + +## Steps + +### Deploy Redis and integrate it with Synapse + +Run the following commands. + +``` +juju deploy redis-k8s --channel edge +juju integrate synapse redis +``` + +Once the output of the `juju status` command shows that the units are active and idle, proceed with +the next step. + +### Deploy S3-integrator and integrate it with Synapse + +This will enable S3 storage provider so media will be stored on a S3 bucket. + +Replace the configuration options with your specific configuration. The ones used below should be +modified to match your environment. + +Refer to [s3-integrator](https://charmhub.io/s3-integrator/) for specific configuration options. + +``` +juju deploy s3-integrator s3-integrator-media --channel edge +juju config s3-integrator-media endpoint=http://endpoint bucket=synapse-media-bucket path=/media region=us-east-1 s3-uri-style=path +juju integrate synapse:media s3-integrator-media +``` + +Once the output of the `juju status` command shows that the units are active and idle, proceed with +the next step. + +### Scale Synapse application + +With all integrations set, scale Synapse up by running the following command. + +``` +juju scale-application synapse 3 +``` + +### Verify status + +The output of `juju status --relations` should look like this now. + +``` +$ juju status --relations +Model Controller Cloud/Region Version SLA Timestamp +prod-synapse-k8s ctr1 cloud1/default 3.1.8 unsupported 20:04:20Z + +SAAS Status Store URL +postgresql active cloud1 admin/prod-chat-synapse-db.postgresql + +App Version Status Scale Charm Channel Rev Address Exposed Message +media-s3-integrator active 1 s3-integrator latest/stable 13 10.10.10.1 no +redis 7.0.4 active 1 redis-k8s latest/edge 27 10.10.10.2 no +synapse active 3 synapse latest/stable 303 10.10.10.3 no + +Unit Workload Agent Address Ports Message +media-s3-integrator/0* active idle 192.168.1.2 +redis/0* active idle 192.168.1.7 +synapse/0 active idle 192.168.1.4 +synapse/1* active idle 192.168.1.8 +synapse/2 active idle 192.168.1.6 + +Integration provider Requirer Interface Type Message +media-s3-integrator:s3-credentials synapse:media s3 regular +media-s3-integrator:s3-integrator-peers media-s3-integrator:s3-integrator-peers s3-integrator-peers peer +postgresql:database synapse:database postgresql_client regular +redis:redis synapse:redis redis regular +redis:redis-peers redis:redis-peers redis-peers peer +synapse:synapse-peers synapse:synapse-peers synapse-instance peer +``` diff --git a/docs/index.md b/docs/index.md index 7323277a..c8e0d9e0 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,3 +1,5 @@ +# Synapse Operator + A Juju charm deploying and managing [Synapse](https://github.com/matrix-org/synapse) on Kubernetes. Synapse is a drop in replacement for other chat servers like Mattermost and Slack. This charm simplifies initial deployment and "day N" operations of Synapse on Kubernetes, such as integration with SSO, access to S3 for redundant file storage and more. It allows for deployment on @@ -7,22 +9,29 @@ As such, the charm makes it easy for those looking to take control of their own For DevOps or SRE teams this charm will make operating Synapse simple and straightforward through Juju's clean interface. It will allow easy deployment into multiple environments for testing of changes. +## In this documentation + +| | | +|--|--| +| [Tutorials](https://charmhub.io/synapse/docs/tutorial-getting-started)
Get started - a hands-on introduction to using the charm for new users
| [How-to guides](https://charmhub.io/synapse/docs/how-to-configure-smtp)
Step-by-step guides covering key operations and common tasks | +| [Reference](https://charmhub.io/synapse/docs/reference-actions)
Technical information - specifications, APIs, architecture | [Explanation](https://charmhub.io/synapse/docs/explanation-charm-architecture)
Concepts - discussion and clarification of key topics | + +## Contributing to this documentation + +Documentation is an important part of this project, and we take the same open-source approach to the documentation as the code. As such, we welcome community contributions, suggestions and constructive feedback on our documentation. Our documentation is hosted on the [Charmhub forum](https://discourse.charmhub.io/) to enable easy collaboration. Please use the “Help us improve this documentation” links on each documentation page to either directly change something you see that’s wrong, or ask a question, or make a suggestion about a potential change via the comments section. + +If there’s a particular area of documentation that you’d like to see that’s missing, please [file a bug](https://github.com/canonical/synapse-operator/issues). + ## Project and community Synapse is an open-source project that welcomes community contributions, suggestions, fixes and constructive feedback. * [Read our Code of Conduct](https://ubuntu.com/community/code-of-conduct) * [Join the Discourse forum](https://discourse.charmhub.io/tag/synapse) -* [Discuss on the Matrix chat service](https://chat.charmhub.io/charmhub/channels/charm-dev) +* [Discuss on the Matrix chat service](https://matrix.to/#/#charmhub-charmdev:ubuntu.com) * Contribute and report bugs to [the Synapse operator](https://github.com/canonical/synapse-operator) * Check the [release notes](https://github.com/canonical/synapse-operator/releases) -## Contributing to this documentation - -Documentation is an important part of this project, and we take the same open-source approach to the documentation as the code. As such, we welcome community contributions, suggestions and constructive feedback on our documentation. Our documentation is hosted on the [Charmhub forum](https://discourse.charmhub.io/) to enable easy collaboration. Please use the “Help us improve this documentation” links on each documentation page to either directly change something you see that’s wrong, or ask a question, or make a suggestion about a potential change via the comments section. - -If there’s a particular area of documentation that you’d like to see that’s missing, please [file a bug](https://github.com/canonical/synapse-operator/issues). - # Contents 1. [Tutorial](tutorial) @@ -35,4 +44,4 @@ If there’s a particular area of documentation that you’d like to see that’ 1. [Actions](reference/actions.md) 2. [Integrations](reference/integrations.md) 1. [Explanation](explanation) - 1. [Charm architecture](explanation/charm-architecture.md) \ No newline at end of file + 1. [Charm architecture](explanation/charm-architecture.md) diff --git a/docs/reference/actions.md b/docs/reference/actions.md index 03cd6210..cf023ca4 100644 --- a/docs/reference/actions.md +++ b/docs/reference/actions.md @@ -1,3 +1,5 @@ # Actions -See [Actions](https://charmhub.io/synapse/actions). \ No newline at end of file +See [Actions](https://charmhub.io/synapse/actions). + +> Read more about actions in the Juju docs: [Action](https://juju.is/docs/juju/action) diff --git a/docs/reference/configurations.md b/docs/reference/configurations.md new file mode 100644 index 00000000..5786bd2c --- /dev/null +++ b/docs/reference/configurations.md @@ -0,0 +1,5 @@ +# Configurations + +See [Configurations](https://charmhub.io/synapse/configurations). + +> Read more about configurations in the Juju docs: [Configuration](https://juju.is/docs/juju/configuration) diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md index dd96fdd2..1de9a6da 100644 --- a/docs/reference/integrations.md +++ b/docs/reference/integrations.md @@ -3,6 +3,7 @@ ### backup _Interface_: s3 + _Supported charms_: [s3-integrator](https://charmhub.io/s3-integrator/) In order to perform backups, Synapse has to be integrated with the s3-integrator charm using the @@ -11,11 +12,15 @@ indicated in the S3 compatible object storage provider configuration provided by The Synapse charm will back up the media files, signing keys and sqlite database file if applicable. If Synapse database integration is used, the Synapse charm will not back up the related database. -Example backup integrate command: `juju integrate synapse:backup s3-integrator` +Example backup integrate command: +``` +juju integrate synapse:backup s3-integrator +``` ### db _Interface_: pgsql + _Supported charms_: [postgresql-k8s](https://charmhub.io/postgresql-k8s), [postgresql](https://charmhub.io/postgresql) @@ -23,11 +28,15 @@ Database integration is a required relation for the Synapse charm to supply structured data storage for Synapse. -Example db integrate command: `juju integrate synapse postgresql-k8s:db` +Example db integrate command: +``` +juju integrate synapse postgresql-k8s:db +``` ### grafana-dashboard _Interface_: grafana-dashboard + _Supported charms_: [grafana-k8s](https://charmhub.io/grafana-k8s) Grafana-dashboard relation enables quick dashboard access already tailored to @@ -51,6 +60,7 @@ juju integrate synapse grafana-dashboard` ### ingress _Interface_: ingress + _Supported charms_: [nginx-ingress-integrator](https://charmhub.io/nginx-ingress-integrator), [traefik](https://charmhub.io/traefik-k8s) @@ -59,22 +69,30 @@ Note that the kubernetes cluster must already have an nginx ingress controller already deployed. Documentation to enable ingress in MicroK8s can be found in [Addon: Ingress](https://microk8s.io/docs/addon-ingress). -Example ingress integrate command: `juju integrate synapse nginx-ingress-integrator` +Example ingress integrate command: +``` +juju integrate synapse nginx-ingress-integrator +``` ### metrics-endpoint _Interface_: [prometheus_scrape](https://charmhub.io/interfaces/prometheus_scrape-v0) + _Supported charms_: [prometheus-k8s](https://charmhub.io/prometheus-k8s) Metrics-endpoint relation allows scraping the `/metrics` endpoint provided by Synapse. The metrics are exposed in the [open metrics format](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#data-model) and will only be scraped by Prometheus once the relation becomes active. For more information about the metrics exposed, refer to ["How to monitor Synapse metrics using Prometheus"](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md). -Metrics-endpoint integrate command: `juju integrate synapse prometheus-k8s` +Metrics-endpoint integrate command: +``` +juju integrate synapse prometheus-k8s +``` ### redis _Interface_: redis + _Supported charms_: [redis-k8s](https://charmhub.io/redis-k8s) Integrating Synapse with Redis is required by horizontal scaling the charm. @@ -82,17 +100,24 @@ If the integration is not present, the charm will be blocked. See more information in [Scaling synapse via workers](https://matrix-org.github.io/synapse/latest/workers.html) in documentation repository for Synapse. -Example redis integrate command: `juju integrate synapse redis-k8s` +Example redis integrate command: +``` +juju integrate synapse redis-k8s +``` ### saml _Interface_: saml + _Supported charms_: [saml-integrator](https://charmhub.io/saml-integrator/) Integrating Synapse with SAML Integrator provides SAML configuration details so users can be authenticated in via a SAML server. -Example saml integrate command: `juju integrate synapse saml-integrator:saml` +Example saml integrate command: +``` +juju integrate synapse saml-integrator:saml +``` Note that `public_baseurl` configuration set the public-facing base URL that clients use to access this Homeserver. It's used as `entity_id` if set instead of @@ -104,12 +129,16 @@ See more information in [Charm Architecture](https://charmhub.io/synapse/docs/ex ### smtp _Interface_: smtp + _Supported charms_: [smtp-integrator](https://charmhub.io/smtp-integrator/) Integrating Synapse with SMTP Integrator provides SMTP configuration details so a smtp server can be used in Synapse. -Example smtp integrate command: `juju integrate synapse smtp-integrator:smtp` +Example smtp integrate command: +``` +juju integrate synapse smtp-integrator:smtp +``` Note that the smtp-integrator provides two interfaces, `smtp` and `smtp-legacy`. Only use the first one if the Juju version used supports secrets. The "From" email diff --git a/docs/tutorial/getting-started.md b/docs/tutorial/getting-started.md index e1b2269f..57aca4d9 100644 --- a/docs/tutorial/getting-started.md +++ b/docs/tutorial/getting-started.md @@ -1,4 +1,4 @@ -# Getting Started +# Deploy the Synapse charm for the first time ## What you’ll do - Deploy the Synapse charm. @@ -11,15 +11,19 @@ Through the process, you'll verify the workload state, and log in to your Synapse instance via Element Desktop application. ## Requirements -- Juju 3 installed. -- Juju controller and model created. -For more information about how to install Juju, see [Get started with Juju](https://juju.is/docs/olm/get-started-with-juju). +* A working station, e.g., a laptop, with amd64 architecture. +* Juju 3 installed and bootstrapped to a MicroK8s controller. You can accomplish +this process by using a [Multipass](https://multipass.run/) VM as outlined in this guide: [Set up / Tear down your test environment](https://juju.is/docs/juju/set-up--tear-down-your-test-environment) -## Setting up a Tutorial Model +:warning: When using a Multipass VM, make sure to replace IP addresses with the +VM IP in steps that assume you're running locally. To get the IP address of the +Multipass instance run ```multipass info my-juju-vm```. + +## Set up a Tutorial Model To manage resources effectively and to separate this tutorial's workload from -your usual work, we recommend creating a new model using the following command. +your usual work, create a new model using the following command. ``` juju add-model synapse-tutorial @@ -28,7 +32,7 @@ juju add-model synapse-tutorial ## Deploy the Synapse charm Synapse requires connections to PostgreSQL. Deploy both charm applications. -### Deploy the charms: +### Deploy and integrate the charms ``` juju deploy postgresql-k8s --trust juju deploy synapse @@ -47,7 +51,7 @@ Run `juju status` again to see that the message has changed: synapse/0* waiting idle 10.1.74.70 Waiting for database availability ``` -Provide integration between Synapse and PostgreSQL: +Provide the integration between Synapse and PostgreSQL: ``` juju integrate synapse postgresql-k8s ``` @@ -70,7 +74,7 @@ ingress resource of Kubernetes. If you want to make Synapse charm available to external clients, you need to deploy the Traefik charm and integrate Synapse with it. -### Deploy the charm Traefik: +### Deploy the Traefik charm ``` juju deploy traefik-k8s --trust ``` @@ -88,7 +92,7 @@ juju integrate synapse traefik-k8s Now, you will need to go into your DNS settings and set the IP address of the Traefik charm to the DNS entry you’re setting up. Getting the IP address can be -done using juju status. +done using `juju status`. ``` App Version Status Scale Charm Channel Rev Address Exposed Message traefik-k8s 2.9.6 active 1 traefik-k8s stable 110 10.152.183.225 no @@ -104,13 +108,13 @@ to take effect. In case you don’t have access to a DNS: The browser uses entries in the `/etc/hosts` file to override what is returned by a DNS server. So, to resolve -it to your Traefik IP, edit /etc/hosts file and add the following line +it to your Traefik IP, open the `/etc/hosts` file and add the following line accordingly: ``` 10.152.183.225 tutorial-synapse.juju.local ``` -Optional: run `echo "10.152.183.225 tutorial-synapse.juju.local" >> /etc/hosts` +> Optional: run `echo "10.152.183.225 tutorial-synapse.juju.local" >> /etc/hosts` to redirect the output of the command `echo` to the end of the file `/etc/hosts`. After that, visit http://tutorial-synapse.juju.local in a browser and you'll be @@ -124,8 +128,8 @@ juju run-action synapse/0 register-user username=alice password==2.0.0"] +# Starting from what LIBPATCH number to apply legacy solutions +# v0.17 was the last version without secrets +LEGACY_SUPPORT_FROM = 17 + logger = logging.getLogger(__name__) Diff = namedtuple("Diff", "added changed deleted") @@ -351,36 +355,16 @@ def _on_topic_requested(self, event: TopicRequestedEvent): GROUP_MAPPING_FIELD = "secret_group_mapping" GROUP_SEPARATOR = "@" +MODEL_ERRORS = { + "not_leader": "this unit is not the leader", + "no_label_and_uri": "ERROR either URI or label should be used for getting an owned secret but not both", + "owner_no_refresh": "ERROR secret owner cannot use --refresh", +} -class SecretGroup(str): - """Secret groups specific type.""" - - -class SecretGroupsAggregate(str): - """Secret groups with option to extend with additional constants.""" - def __init__(self): - self.USER = SecretGroup("user") - self.TLS = SecretGroup("tls") - self.EXTRA = SecretGroup("extra") - - def __setattr__(self, name, value): - """Setting internal constants.""" - if name in self.__dict__: - raise RuntimeError("Can't set constant!") - else: - super().__setattr__(name, SecretGroup(value)) - - def groups(self) -> list: - """Return the list of stored SecretGroups.""" - return list(self.__dict__.values()) - - def get_group(self, group: str) -> Optional[SecretGroup]: - """If the input str translates to a group name, return that.""" - return SecretGroup(group) if group in self.groups() else None - - -SECRET_GROUPS = SecretGroupsAggregate() +############################################################################## +# Exceptions +############################################################################## class DataInterfacesError(Exception): @@ -407,6 +391,15 @@ class IllegalOperationError(DataInterfacesError): """To be used when an operation is not allowed to be performed.""" +############################################################################## +# Global helpers / utilities +############################################################################## + +############################################################################## +# Databag handling and comparison methods +############################################################################## + + def get_encoded_dict( relation: Relation, member: Union[Unit, Application], field: str ) -> Optional[Dict[str, str]]: @@ -482,6 +475,11 @@ def diff(event: RelationChangedEvent, bucket: Optional[Union[Unit, Application]] return Diff(added, changed, deleted) +############################################################################## +# Module decorators +############################################################################## + + def leader_only(f): """Decorator to ensure that only leader can perform given operation.""" @@ -536,6 +534,36 @@ def wrapper(self, *args, **kwargs): return wrapper +def legacy_apply_from_version(version: int) -> Callable: + """Decorator to decide whether to apply a legacy function or not. + + Based on LEGACY_SUPPORT_FROM module variable value, the importer charm may only want + to apply legacy solutions starting from a specific LIBPATCH. + + NOTE: All 'legacy' functions have to be defined and called in a way that they return `None`. + This results in cleaner and more secure execution flows in case the function may be disabled. + This requirement implicitly means that legacy functions change the internal state strictly, + don't return information. + """ + + def decorator(f: Callable[..., None]): + """Signature is ensuring None return value.""" + f.legacy_version = version + + def wrapper(self, *args, **kwargs) -> None: + if version >= LEGACY_SUPPORT_FROM: + return f(self, *args, **kwargs) + + return wrapper + + return decorator + + +############################################################################## +# Helper classes +############################################################################## + + class Scope(Enum): """Peer relations scope.""" @@ -543,9 +571,35 @@ class Scope(Enum): UNIT = "unit" -################################################################################ -# Secrets internal caching -################################################################################ +class SecretGroup(str): + """Secret groups specific type.""" + + +class SecretGroupsAggregate(str): + """Secret groups with option to extend with additional constants.""" + + def __init__(self): + self.USER = SecretGroup("user") + self.TLS = SecretGroup("tls") + self.EXTRA = SecretGroup("extra") + + def __setattr__(self, name, value): + """Setting internal constants.""" + if name in self.__dict__: + raise RuntimeError("Can't set constant!") + else: + super().__setattr__(name, SecretGroup(value)) + + def groups(self) -> list: + """Return the list of stored SecretGroups.""" + return list(self.__dict__.values()) + + def get_group(self, group: str) -> Optional[SecretGroup]: + """If the input str translates to a group name, return that.""" + return SecretGroup(group) if group in self.groups() else None + + +SECRET_GROUPS = SecretGroupsAggregate() class CachedSecret: @@ -554,6 +608,8 @@ class CachedSecret: The data structure is precisely re-using/simulating as in the actual Secret Storage """ + KNOWN_MODEL_ERRORS = [MODEL_ERRORS["no_label_and_uri"], MODEL_ERRORS["owner_no_refresh"]] + def __init__( self, model: Model, @@ -571,6 +627,95 @@ def __init__( self.legacy_labels = legacy_labels self.current_label = None + @property + def meta(self) -> Optional[Secret]: + """Getting cached secret meta-information.""" + if not self._secret_meta: + if not (self._secret_uri or self.label): + return + + try: + self._secret_meta = self._model.get_secret(label=self.label) + except SecretNotFoundError: + # Falling back to seeking for potential legacy labels + self._legacy_compat_find_secret_by_old_label() + + # If still not found, to be checked by URI, to be labelled with the proposed label + if not self._secret_meta and self._secret_uri: + self._secret_meta = self._model.get_secret(id=self._secret_uri, label=self.label) + return self._secret_meta + + ########################################################################## + # Backwards compatibility / Upgrades + ########################################################################## + # These functions are used to keep backwards compatibility on rolling upgrades + # Policy: + # All data is kept intact until the first write operation. (This allows a minimal + # grace period during which rollbacks are fully safe. For more info see the spec.) + # All data involves: + # - databag contents + # - secrets content + # - secret labels (!!!) + # Legacy functions must return None, and leave an equally consistent state whether + # they are executed or skipped (as a high enough versioned execution environment may + # not require so) + + # Compatibility + + @legacy_apply_from_version(34) + def _legacy_compat_find_secret_by_old_label(self) -> None: + """Compatibility function, allowing to find a secret by a legacy label. + + This functionality is typically needed when secret labels changed over an upgrade. + Until the first write operation, we need to maintain data as it was, including keeping + the old secret label. In order to keep track of the old label currently used to access + the secret, and additional 'current_label' field is being defined. + """ + for label in self.legacy_labels: + try: + self._secret_meta = self._model.get_secret(label=label) + except SecretNotFoundError: + pass + else: + if label != self.label: + self.current_label = label + return + + # Migrations + + @legacy_apply_from_version(34) + def _legacy_migration_to_new_label_if_needed(self) -> None: + """Helper function to re-create the secret with a different label. + + Juju does not provide a way to change secret labels. + Thus whenever moving from secrets version that involves secret label changes, + we "re-create" the existing secret, and attach the new label to the new + secret, to be used from then on. + + Note: we replace the old secret with a new one "in place", as we can't + easily switch the containing SecretCache structure to point to a new secret. + Instead we are changing the 'self' (CachedSecret) object to point to the + new instance. + """ + if not self.current_label or not (self.meta and self._secret_meta): + return + + # Create a new secret with the new label + content = self._secret_meta.get_content() + self._secret_uri = None + + # It will be nice to have the possibility to check if we are the owners of the secret... + try: + self._secret_meta = self.add_secret(content, label=self.label) + except ModelError as err: + if MODEL_ERRORS["not_leader"] not in str(err): + raise + self.current_label = None + + ########################################################################## + # Public functions + ########################################################################## + def add_secret( self, content: Dict[str, str], @@ -593,28 +738,6 @@ def add_secret( self._secret_meta = secret return self._secret_meta - @property - def meta(self) -> Optional[Secret]: - """Getting cached secret meta-information.""" - if not self._secret_meta: - if not (self._secret_uri or self.label): - return - - for label in [self.label] + self.legacy_labels: - try: - self._secret_meta = self._model.get_secret(label=label) - except SecretNotFoundError: - pass - else: - if label != self.label: - self.current_label = label - break - - # If still not found, to be checked by URI, to be labelled with the proposed label - if not self._secret_meta and self._secret_uri: - self._secret_meta = self._model.get_secret(id=self._secret_uri, label=self.label) - return self._secret_meta - def get_content(self) -> Dict[str, str]: """Getting cached secret content.""" if not self._secret_content: @@ -624,35 +747,14 @@ def get_content(self) -> Dict[str, str]: except (ValueError, ModelError) as err: # https://bugs.launchpad.net/juju/+bug/2042596 # Only triggered when 'refresh' is set - known_model_errors = [ - "ERROR either URI or label should be used for getting an owned secret but not both", - "ERROR secret owner cannot use --refresh", - ] if isinstance(err, ModelError) and not any( - msg in str(err) for msg in known_model_errors + msg in str(err) for msg in self.KNOWN_MODEL_ERRORS ): raise # Due to: ValueError: Secret owner cannot use refresh=True self._secret_content = self.meta.get_content() return self._secret_content - def _move_to_new_label_if_needed(self): - """Helper function to re-create the secret with a different label.""" - if not self.current_label or not (self.meta and self._secret_meta): - return - - # Create a new secret with the new label - content = self._secret_meta.get_content() - self._secret_uri = None - - # I wish we could just check if we are the owners of the secret... - try: - self._secret_meta = self.add_secret(content, label=self.label) - except ModelError as err: - if "this unit is not the leader" not in str(err): - raise - self.current_label = None - def set_content(self, content: Dict[str, str]) -> None: """Setting cached secret content.""" if not self.meta: @@ -663,7 +765,7 @@ def set_content(self, content: Dict[str, str]) -> None: return if content: - self._move_to_new_label_if_needed() + self._legacy_migration_to_new_label_if_needed() self.meta.set_content(content) self._secret_content = content else: @@ -926,6 +1028,23 @@ def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" raise NotImplementedError + # Optional overrides + + def _legacy_apply_on_fetch(self) -> None: + """This function should provide a list of compatibility functions to be applied when fetching (legacy) data.""" + pass + + def _legacy_apply_on_update(self, fields: List[str]) -> None: + """This function should provide a list of compatibility functions to be applied when writing data. + + Since data may be at a legacy version, migration may be mandatory. + """ + pass + + def _legacy_apply_on_delete(self, fields: List[str]) -> None: + """This function should provide a list of compatibility functions to be applied when deleting (legacy) data.""" + pass + # Internal helper methods @staticmethod @@ -1178,6 +1297,16 @@ def get_relation(self, relation_name, relation_id) -> Relation: return relation + def get_secret_uri(self, relation: Relation, group: SecretGroup) -> Optional[str]: + """Get the secret URI for the corresponding group.""" + secret_field = self._generate_secret_field_name(group) + return relation.data[self.component].get(secret_field) + + def set_secret_uri(self, relation: Relation, group: SecretGroup, secret_uri: str) -> None: + """Set the secret URI for the corresponding group.""" + secret_field = self._generate_secret_field_name(group) + relation.data[self.component][secret_field] = secret_uri + def fetch_relation_data( self, relation_ids: Optional[List[int]] = None, @@ -1194,6 +1323,8 @@ def fetch_relation_data( a dict of the values stored in the relation data bag for all relation instances (indexed by the relation ID). """ + self._legacy_apply_on_fetch() + if not relation_name: relation_name = self.relation_name @@ -1232,6 +1363,8 @@ def fetch_my_relation_data( NOTE: Since only the leader can read the relation's 'this_app'-side Application databag, the functionality is limited to leaders """ + self._legacy_apply_on_fetch() + if not relation_name: relation_name = self.relation_name @@ -1263,6 +1396,8 @@ def fetch_my_relation_field( @leader_only def update_relation_data(self, relation_id: int, data: dict) -> None: """Update the data within the relation.""" + self._legacy_apply_on_update(list(data.keys())) + relation_name = self.relation_name relation = self.get_relation(relation_name, relation_id) return self._update_relation_data(relation, data) @@ -1270,6 +1405,8 @@ def update_relation_data(self, relation_id: int, data: dict) -> None: @leader_only def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: """Remove field from the relation.""" + self._legacy_apply_on_delete(fields) + relation_name = self.relation_name relation = self.get_relation(relation_name, relation_id) return self._delete_relation_data(relation, fields) @@ -1336,8 +1473,7 @@ def _add_relation_secret( uri_to_databag=True, ) -> bool: """Add a new Juju Secret that will be registered in the relation databag.""" - secret_field = self._generate_secret_field_name(group_mapping) - if uri_to_databag and relation.data[self.component].get(secret_field): + if uri_to_databag and self.get_secret_uri(relation, group_mapping): logging.error("Secret for relation %s already exists, not adding again", relation.id) return False @@ -1348,7 +1484,7 @@ def _add_relation_secret( # According to lint we may not have a Secret ID if uri_to_databag and secret.meta and secret.meta.id: - relation.data[self.component][secret_field] = secret.meta.id + self.set_secret_uri(relation, group_mapping, secret.meta.id) # Return the content that was added return True @@ -1449,8 +1585,7 @@ def _get_relation_secret( if not relation: return - secret_field = self._generate_secret_field_name(group_mapping) - if secret_uri := relation.data[self.local_app].get(secret_field): + if secret_uri := self.get_secret_uri(relation, group_mapping): return self.secrets.get(label, secret_uri) def _fetch_specific_relation_data( @@ -1603,11 +1738,10 @@ def _register_secrets_to_relation(self, relation: Relation, params_name_list: Li for group in SECRET_GROUPS.groups(): secret_field = self._generate_secret_field_name(group) - if secret_field in params_name_list: - if secret_uri := relation.data[relation.app].get(secret_field): - self._register_secret_to_relation( - relation.name, relation.id, secret_uri, group - ) + if secret_field in params_name_list and ( + secret_uri := self.get_secret_uri(relation, group) + ): + self._register_secret_to_relation(relation.name, relation.id, secret_uri, group) def _is_resource_created_for_relation(self, relation: Relation) -> bool: if not relation.app: @@ -1618,6 +1752,17 @@ def _is_resource_created_for_relation(self, relation: Relation) -> bool: ) return bool(data.get("username")) and bool(data.get("password")) + # Public functions + + def get_secret_uri(self, relation: Relation, group: SecretGroup) -> Optional[str]: + """Getting relation secret URI for the corresponding Secret Group.""" + secret_field = self._generate_secret_field_name(group) + return relation.data[relation.app].get(secret_field) + + def set_secret_uri(self, relation: Relation, group: SecretGroup, uri: str) -> None: + """Setting relation secret URI is not possible for a Requirer.""" + raise NotImplementedError("Requirer can not change the relation secret URI.") + def is_resource_created(self, relation_id: Optional[int] = None) -> bool: """Check if the resource has been created. @@ -1768,7 +1913,6 @@ def __init__( secret_field_name: Optional[str] = None, deleted_label: Optional[str] = None, ): - """Manager of base client relations.""" RequirerData.__init__( self, model, @@ -1779,6 +1923,11 @@ def __init__( self.secret_field_name = secret_field_name if secret_field_name else self.SECRET_FIELD_NAME self.deleted_label = deleted_label self._secret_label_map = {} + + # Legacy information holders + self._legacy_labels = [] + self._legacy_secret_uri = None + # Secrets that are being dynamically added within the scope of this event handler run self._new_secrets = [] self._additional_secret_group_mapping = additional_secret_group_mapping @@ -1853,10 +2002,12 @@ def set_secret( value: The string value of the secret group_mapping: The name of the "secret group", in case the field is to be added to an existing secret """ + self._legacy_apply_on_update([field]) + full_field = self._field_to_internal_name(field, group_mapping) if self.secrets_enabled and full_field not in self.current_secret_fields: self._new_secrets.append(full_field) - if self._no_group_with_databag(field, full_field): + if self.valid_field_pattern(field, full_field): self.update_relation_data(relation_id, {full_field: value}) # Unlike for set_secret(), there's no harm using this operation with static secrets @@ -1869,6 +2020,8 @@ def get_secret( group_mapping: Optional[SecretGroup] = None, ) -> Optional[str]: """Public interface method to fetch secrets only.""" + self._legacy_apply_on_fetch() + full_field = self._field_to_internal_name(field, group_mapping) if ( self.secrets_enabled @@ -1876,7 +2029,7 @@ def get_secret( and field not in self.current_secret_fields ): return - if self._no_group_with_databag(field, full_field): + if self.valid_field_pattern(field, full_field): return self.fetch_my_relation_field(relation_id, full_field) @dynamic_secrets_only @@ -1887,14 +2040,19 @@ def delete_secret( group_mapping: Optional[SecretGroup] = None, ) -> Optional[str]: """Public interface method to delete secrets only.""" + self._legacy_apply_on_delete([field]) + full_field = self._field_to_internal_name(field, group_mapping) if self.secrets_enabled and full_field not in self.current_secret_fields: logger.warning(f"Secret {field} from group {group_mapping} was not found") return - if self._no_group_with_databag(field, full_field): + + if self.valid_field_pattern(field, full_field): self.delete_relation_data(relation_id, [full_field]) + ########################################################################## # Helpers + ########################################################################## @staticmethod def _field_to_internal_name(field: str, group: Optional[SecretGroup]) -> str: @@ -1936,10 +2094,69 @@ def _content_for_secret_group( if k in self.secret_fields } - # Backwards compatibility + def valid_field_pattern(self, field: str, full_field: str) -> bool: + """Check that no secret group is attempted to be used together without secrets being enabled. + + Secrets groups are impossible to use with versions that are not yet supporting secrets. + """ + if not self.secrets_enabled and full_field != field: + logger.error( + f"Can't access {full_field}: no secrets available (i.e. no secret groups either)." + ) + return False + return True + + ########################################################################## + # Backwards compatibility / Upgrades + ########################################################################## + # These functions are used to keep backwards compatibility on upgrades + # Policy: + # All data is kept intact until the first write operation. (This allows a minimal + # grace period during which rollbacks are fully safe. For more info see spec.) + # All data involves: + # - databag + # - secrets content + # - secret labels (!!!) + # Legacy functions must return None, and leave an equally consistent state whether + # they are executed or skipped (as a high enough versioned execution environment may + # not require so) + + # Full legacy stack for each operation + + def _legacy_apply_on_fetch(self) -> None: + """All legacy functions to be applied on fetch.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + + def _legacy_apply_on_update(self, fields) -> None: + """All legacy functions to be applied on update.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + self._legacy_migration_remove_secret_from_databag(relation, fields) + self._legacy_migration_remove_secret_field_name_from_databag(relation) + + def _legacy_apply_on_delete(self, fields) -> None: + """All legacy functions to be applied on delete.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + self._legacy_compat_check_deleted_label(relation, fields) + + # Compatibility + + @legacy_apply_from_version(18) + def _legacy_compat_check_deleted_label(self, relation, fields) -> None: + """Helper function for legacy behavior. + + As long as https://bugs.launchpad.net/juju/+bug/2028094 wasn't fixed, + we did not delete fields but rather kept them in the secret with a string value + expressing invalidity. This function is maintainnig that behavior when needed. + """ + if not self.deleted_label: + return - def _check_deleted_label(self, relation, fields) -> None: - """Helper function for legacy behavior.""" current_data = self.fetch_my_relation_data([relation.id], fields) if current_data is not None: # Check if the secret we wanna delete actually exists @@ -1952,7 +2169,43 @@ def _check_deleted_label(self, relation, fields) -> None: ", ".join(non_existent), ) - def _remove_secret_from_databag(self, relation, fields: List[str]) -> None: + @legacy_apply_from_version(18) + def _legacy_compat_secret_uri_from_databag(self, relation) -> None: + """Fetching the secret URI from the databag, in case stored there.""" + self._legacy_secret_uri = relation.data[self.component].get( + self._generate_secret_field_name(), None + ) + + @legacy_apply_from_version(34) + def _legacy_compat_generate_prev_labels(self) -> None: + """Generator for legacy secret label names, for backwards compatibility. + + Secret label is part of the data that MUST be maintained across rolling upgrades. + In case there may be a change on a secret label, the old label must be recognized + after upgrades, and left intact until the first write operation -- when we roll over + to the new label. + + This function keeps "memory" of previously used secret labels. + NOTE: Return value takes decorator into account -- all 'legacy' functions may return `None` + + v0.34 (rev69): Fixing issue https://github.com/canonical/data-platform-libs/issues/155 + meant moving from '.' (i.e. 'mysql.app', 'mysql.unit') + to labels '..' (like 'peer.mysql.app') + """ + if self._legacy_labels: + return + + result = [] + members = [self._model.app.name] + if self.scope: + members.append(self.scope.value) + result.append(f"{'.'.join(members)}") + self._legacy_labels = result + + # Migration + + @legacy_apply_from_version(18) + def _legacy_migration_remove_secret_from_databag(self, relation, fields: List[str]) -> None: """For Rolling Upgrades -- when moving from databag to secrets usage. Practically what happens here is to remove stuff from the databag that is @@ -1966,10 +2219,16 @@ def _remove_secret_from_databag(self, relation, fields: List[str]) -> None: if self._fetch_relation_data_without_secrets(self.component, relation, [field]): self._delete_relation_data_without_secrets(self.component, relation, [field]) - def _remove_secret_field_name_from_databag(self, relation) -> None: + @legacy_apply_from_version(18) + def _legacy_migration_remove_secret_field_name_from_databag(self, relation) -> None: """Making sure that the old databag URI is gone. This action should not be executed more than once. + + There was a phase (before moving secrets usage to libs) when charms saved the peer + secret URI to the databag, and used this URI from then on to retrieve their secret. + When upgrading to charm versions using this library, we need to add a label to the + secret and access it via label from than on, and remove the old traces from the databag. """ # Nothing to do if 'internal-secret' is not in the databag if not (relation.data[self.component].get(self._generate_secret_field_name())): @@ -1985,25 +2244,9 @@ def _remove_secret_field_name_from_databag(self, relation) -> None: # Databag reference to the secret URI can be removed, now that it's labelled relation.data[self.component].pop(self._generate_secret_field_name(), None) - def _previous_labels(self) -> List[str]: - """Generator for legacy secret label names, for backwards compatibility.""" - result = [] - members = [self._model.app.name] - if self.scope: - members.append(self.scope.value) - result.append(f"{'.'.join(members)}") - return result - - def _no_group_with_databag(self, field: str, full_field: str) -> bool: - """Check that no secret group is attempted to be used together with databag.""" - if not self.secrets_enabled and full_field != field: - logger.error( - f"Can't access {full_field}: no secrets available (i.e. no secret groups either)." - ) - return False - return True - + ########################################################################## # Event handlers + ########################################################################## def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the relation has changed.""" @@ -2013,7 +2256,9 @@ def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: """Event emitted when the secret has changed.""" pass + ########################################################################## # Overrides of Relation Data handling functions + ########################################################################## def _generate_secret_label( self, relation_name: str, relation_id: int, group_mapping: SecretGroup @@ -2050,13 +2295,14 @@ def _get_relation_secret( return label = self._generate_secret_label(relation_name, relation_id, group_mapping) - secret_uri = relation.data[self.component].get(self._generate_secret_field_name(), None) # URI or legacy label is only to applied when moving single legacy secret to a (new) label if group_mapping == SECRET_GROUPS.EXTRA: # Fetching the secret with fallback to URI (in case label is not yet known) # Label would we "stuck" on the secret in case it is found - return self.secrets.get(label, secret_uri, legacy_labels=self._previous_labels()) + return self.secrets.get( + label, self._legacy_secret_uri, legacy_labels=self._legacy_labels + ) return self.secrets.get(label) def _get_group_secret_contents( @@ -2086,7 +2332,6 @@ def _fetch_my_specific_relation_data( @either_static_or_dynamic_secrets def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" - self._remove_secret_from_databag(relation, list(data.keys())) _, normal_fields = self._process_secret_fields( relation, self.secret_fields, @@ -2095,7 +2340,6 @@ def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> Non data=data, uri_to_databag=False, ) - self._remove_secret_field_name_from_databag(relation) normal_content = {k: v for k, v in data.items() if k in normal_fields} self._update_relation_data_without_secrets(self.component, relation, normal_content) @@ -2104,8 +2348,6 @@ def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> Non def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" if self.secret_fields and self.deleted_label: - # Legacy, backwards compatibility - self._check_deleted_label(relation, fields) _, normal_fields = self._process_secret_fields( relation, @@ -2141,7 +2383,9 @@ def fetch_relation_field( "fetch_my_relation_data() and fetch_my_relation_field()" ) + ########################################################################## # Public functions -- inherited + ########################################################################## fetch_my_relation_data = Data.fetch_my_relation_data fetch_my_relation_field = Data.fetch_my_relation_field @@ -2606,6 +2850,14 @@ def set_version(self, relation_id: int, version: str) -> None: """ self.update_relation_data(relation_id, {"version": version}) + def set_subordinated(self, relation_id: int) -> None: + """Raises the subordinated flag in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + """ + self.update_relation_data(relation_id, {"subordinated": "true"}) + class DatabaseProviderEventHandlers(EventHandlers): """Provider-side of the database relation handlers.""" @@ -2842,6 +3094,21 @@ def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the database relation has changed.""" + is_subordinate = False + remote_unit_data = None + for key in event.relation.data.keys(): + if isinstance(key, Unit) and not key.name.startswith(self.charm.app.name): + remote_unit_data = event.relation.data[key] + elif isinstance(key, Application) and key.name != self.charm.app.name: + is_subordinate = event.relation.data[key].get("subordinated") == "true" + + if is_subordinate: + if not remote_unit_data: + return + + if remote_unit_data.get("state") != "ready": + return + # Check which data has changed to emit customs events. diff = self._diff(event) diff --git a/lib/charms/loki_k8s/v1/loki_push_api.py b/lib/charms/loki_k8s/v1/loki_push_api.py index c3c1d086..7f8372c4 100644 --- a/lib/charms/loki_k8s/v1/loki_push_api.py +++ b/lib/charms/loki_k8s/v1/loki_push_api.py @@ -527,7 +527,7 @@ def _alert_rules_error(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 11 +LIBPATCH = 12 PYDEPS = ["cosl"] @@ -2562,7 +2562,9 @@ def _update_logging(self, _): return for container in self._charm.unit.containers.values(): - self._update_endpoints(container, loki_endpoints) + if container.can_connect(): + self._update_endpoints(container, loki_endpoints) + # else: `_update_endpoints` will be called on pebble-ready anyway. def _retrieve_endpoints_from_relation(self) -> dict: loki_endpoints = {} diff --git a/lib/charms/saml_integrator/v0/saml.py b/lib/charms/saml_integrator/v0/saml.py index be555dc2..8fd1610e 100644 --- a/lib/charms/saml_integrator/v0/saml.py +++ b/lib/charms/saml_integrator/v0/saml.py @@ -68,7 +68,7 @@ class method `from_relation_data`. # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 9 +LIBPATCH = 10 # pylint: disable=wrong-import-position import re @@ -92,7 +92,7 @@ class SamlEndpoint(BaseModel): """ name: str = Field(..., min_length=1) - url: AnyHttpUrl + url: typing.Optional[AnyHttpUrl] binding: str = Field(..., min_length=1) response_url: typing.Optional[AnyHttpUrl] @@ -108,7 +108,8 @@ def to_relation_data(self) -> typing.Dict[str, str]: # Transform name into snakecase lowercase_name = re.sub(r"(? "SamlEndpoi prefix = f"{lowercase_name}_{http_method}_" return cls( name=name, - url=parse_obj_as(AnyHttpUrl, relation_data[f"{prefix}url"]), + url=( + parse_obj_as(AnyHttpUrl, relation_data[f"{prefix}url"]) + if relation_data[f"{prefix}url"] + else None + ), binding=relation_data[f"{prefix}binding"], response_url=( parse_obj_as(AnyHttpUrl, relation_data[f"{prefix}response_url"]) @@ -158,7 +163,7 @@ class SamlRelationData(BaseModel): """ entity_id: str = Field(..., min_length=1) - metadata_url: AnyHttpUrl + metadata_url: typing.Optional[AnyHttpUrl] certificates: typing.Tuple[str, ...] endpoints: typing.Tuple[SamlEndpoint, ...] @@ -170,9 +175,10 @@ def to_relation_data(self) -> typing.Dict[str, str]: """ result = { "entity_id": self.entity_id, - "metadata_url": str(self.metadata_url), "x509certs": ",".join(self.certificates), } + if self.metadata_url: + result["metadata_url"] = str(self.metadata_url) for endpoint in self.endpoints: result.update(endpoint.to_relation_data()) return result @@ -201,8 +207,10 @@ def from_relation_data(cls, relation_data: ops.RelationDataContent) -> "SamlRela endpoints.sort(key=lambda ep: ep.name) return cls( entity_id=relation_data.get("entity_id"), # type: ignore - metadata_url=parse_obj_as( - AnyHttpUrl, relation_data.get("metadata_url") + metadata_url=( + parse_obj_as(AnyHttpUrl, relation_data.get("metadata_url")) + if relation_data.get("metadata_url") + else None ), # type: ignore certificates=tuple(relation_data.get("x509certs").split(",")), # type: ignore endpoints=tuple(endpoints), @@ -232,7 +240,7 @@ def entity_id(self) -> str: return self.saml_relation_data.entity_id @property - def metadata_url(self) -> str: + def metadata_url(self) -> typing.Optional[str]: """Fetch the SAML metadata URL from the relation.""" return str(self.saml_relation_data.metadata_url) diff --git a/lib/charms/smtp_integrator/v0/smtp.py b/lib/charms/smtp_integrator/v0/smtp.py index 6238a10c..2816965e 100644 --- a/lib/charms/smtp_integrator/v0/smtp.py +++ b/lib/charms/smtp_integrator/v0/smtp.py @@ -68,7 +68,7 @@ def _on_config_changed(self, _) -> None: # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 9 +LIBPATCH = 11 PYDEPS = ["pydantic>=2"] @@ -76,6 +76,7 @@ def _on_config_changed(self, _) -> None: import itertools import logging import typing +from ast import literal_eval from enum import Enum from typing import Dict, Optional @@ -127,7 +128,8 @@ class SmtpRelationData(BaseModel): password_id: The secret ID where the SMTP AUTH password for the SMTP relay is stored. auth_type: The type used to authenticate with the SMTP relay. transport_security: The security protocol to use for the outgoing SMTP relay. - domain: The domain used by the sent emails from SMTP relay. + domain: The domain used by the emails sent from SMTP relay. + skip_ssl_verify: Specifies if certificate trust verification is skipped in the SMTP relay. """ host: str = Field(..., min_length=1) @@ -138,6 +140,7 @@ class SmtpRelationData(BaseModel): auth_type: AuthType transport_security: TransportSecurity domain: Optional[str] = None + skip_ssl_verify: Optional[bool] = False def to_relation_data(self) -> Dict[str, str]: """Convert an instance of SmtpRelationData to the relation representation. @@ -150,6 +153,7 @@ def to_relation_data(self) -> Dict[str, str]: "port": str(self.port), "auth_type": self.auth_type.value, "transport_security": self.transport_security.value, + "skip_ssl_verify": str(self.skip_ssl_verify), } if self.domain: result["domain"] = self.domain @@ -173,7 +177,8 @@ class SmtpDataAvailableEvent(ops.RelationEvent): password_id: The secret ID where the SMTP AUTH password for the SMTP relay is stored. auth_type: The type used to authenticate with the SMTP relay. transport_security: The security protocol to use for the outgoing SMTP relay. - domain: The domain used by the sent emails from SMTP relay. + domain: The domain used by the emails sent from SMTP relay. + skip_ssl_verify: Specifies if certificate trust verification is skipped in the SMTP relay. """ @property @@ -224,6 +229,14 @@ def domain(self) -> str: assert self.relation.app return typing.cast(str, self.relation.data[self.relation.app].get("domain")) + @property + def skip_ssl_verify(self) -> bool: + """Fetch the skip_ssl_verify flag from the relation.""" + assert self.relation.app + return literal_eval( + typing.cast(str, self.relation.data[self.relation.app].get("skip_ssl_verify")) + ) + class SmtpRequiresEvents(ops.CharmEvents): """SMTP events. @@ -287,6 +300,7 @@ def _get_relation_data_from_relation(self, relation: ops.Relation) -> SmtpRelati auth_type=AuthType(relation_data.get("auth_type")), transport_security=TransportSecurity(relation_data.get("transport_security")), domain=relation_data.get("domain"), + skip_ssl_verify=typing.cast(bool, relation_data.get("skip_ssl_verify")), ) def _is_relation_data_valid(self, relation: ops.Relation) -> bool: diff --git a/lib/charms/synapse/v0/matrix_auth.py b/lib/charms/synapse/v0/matrix_auth.py new file mode 100644 index 00000000..cc7dca24 --- /dev/null +++ b/lib/charms/synapse/v0/matrix_auth.py @@ -0,0 +1,476 @@ +# Copyright 2025 Canonical Ltd. +# Licensed under the Apache2.0. See LICENSE file in charm source for details. + +"""Library to manage the plugin integrations with the Synapse charm. + +This library contains the Requires and Provides classes for handling the integration +between an application and a charm providing the `matrix_plugin` integration. + +### Requirer Charm + +```python + +from charms.synapse.v0.matrix_auth import MatrixAuthRequires + +class MatrixAuthRequirerCharm(ops.CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.plugin_auth = MatrixAuthRequires(self) + self.framework.observe(self.matrix_auth.on.matrix_auth_request_processed, self._handler) + ... + + def _handler(self, events: MatrixAuthRequestProcessed) -> None: + ... + +``` + +As shown above, the library provides a custom event to handle the scenario in +which a matrix authentication (homeserver and shared secret) has been added or updated. + +The MatrixAuthRequires provides an `update_relation_data` method to update the relation data by +passing a `MatrixAuthRequirerData` data object, requesting a new authentication. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +from charms.synapse.v0.matrix_auth import MatrixAuthProvides + +class MatrixAuthProviderCharm(ops.CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.plugin_auth = MatrixAuthProvides(self) + ... + +``` +The MatrixAuthProvides object wraps the list of relations into a `relations` property +and provides an `update_relation_data` method to update the relation data by passing +a `MatrixAuthRelationData` data object. + +```python +class MatrixAuthProviderCharm(ops.CharmBase): + ... + + def _on_config_changed(self, _) -> None: + for relation in self.model.relations[self.plugin_auth.relation_name]: + self.plugin_auth.update_relation_data(relation, self._get_matrix_auth_data()) + +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "ff6788c89b204448b3b62ba6f93e2768" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 3 + +# pylint: disable=wrong-import-position +import json +import logging +from typing import Dict, List, Optional, Tuple, cast + +import ops +from pydantic import BaseModel, Field, SecretStr + +logger = logging.getLogger(__name__) + +#### Constants #### +APP_REGISTRATION_LABEL = "app-registration" +APP_REGISTRATION_CONTENT_LABEL = "app-registration-content" +DEFAULT_RELATION_NAME = "matrix-auth" +SHARED_SECRET_LABEL = "shared-secret" +SHARED_SECRET_CONTENT_LABEL = "shared-secret-content" + + +#### Data models for Provider and Requirer #### +class MatrixAuthProviderData(BaseModel): + """Represent the MatrixAuth provider data. + + Attributes: + homeserver: the homeserver URL. + shared_secret: the Matrix shared secret. + shared_secret_id: the shared secret Juju secret ID. + """ + + homeserver: str + shared_secret: Optional[SecretStr] = Field(default=None, exclude=True) + shared_secret_id: Optional[SecretStr] = Field(default=None) + + def set_shared_secret_id(self, model: ops.Model, relation: ops.Relation) -> None: + """Store the Matrix shared secret as a Juju secret. + + Args: + model: the Juju model + relation: relation to grant access to the secrets to. + """ + # password is always defined since pydantic guarantees it + password = cast(SecretStr, self.shared_secret) + # pylint doesn't like get_secret_value + secret_value = password.get_secret_value() # pylint: disable=no-member + try: + secret = model.get_secret(label=SHARED_SECRET_LABEL) + secret.set_content({SHARED_SECRET_CONTENT_LABEL: secret_value}) + # secret.id is not None at this point + self.shared_secret_id = cast(str, secret.id) + except ops.SecretNotFoundError: + secret = relation.app.add_secret( + {SHARED_SECRET_CONTENT_LABEL: secret_value}, label=SHARED_SECRET_LABEL + ) + secret.grant(relation) + self.shared_secret_id = cast(str, secret.id) + + @classmethod + def get_shared_secret( + cls, model: ops.Model, shared_secret_id: Optional[str] + ) -> Optional[SecretStr]: + """Retrieve the shared secret corresponding to the shared_secret_id. + + Args: + model: the Juju model. + shared_secret_id: the secret ID for the shared secret. + + Returns: + the shared secret or None if not found. + """ + if not shared_secret_id: + return None + try: + secret = model.get_secret(id=shared_secret_id) + password = secret.get_content().get(SHARED_SECRET_CONTENT_LABEL) + if not password: + return None + return SecretStr(password) + except ops.SecretNotFoundError: + return None + + def to_relation_data(self, model: ops.Model, relation: ops.Relation) -> Dict[str, str]: + """Convert an instance of MatrixAuthProviderData to the relation representation. + + Args: + model: the Juju model. + relation: relation to grant access to the secrets to. + + Returns: + Dict containing the representation. + """ + self.set_shared_secret_id(model, relation) + return self.model_dump(exclude_unset=True) + + @classmethod + def from_relation(cls, model: ops.Model, relation: ops.Relation) -> "MatrixAuthProviderData": + """Initialize a new instance of the MatrixAuthProviderData class from the relation. + + Args: + relation: the relation. + + Returns: + A MatrixAuthProviderData instance. + + Raises: + ValueError: if the value is not parseable. + """ + app = cast(ops.Application, relation.app) + relation_data = relation.data[app] + shared_secret_id = ( + (relation_data["shared_secret_id"]) + if "shared_secret_id" in relation_data + else None + ) + shared_secret = MatrixAuthProviderData.get_shared_secret(model, shared_secret_id) + homeserver = relation_data.get("homeserver") + if shared_secret is None or homeserver is None: + raise ValueError("Invalid relation data") + return MatrixAuthProviderData( + homeserver=homeserver, + shared_secret=shared_secret, + ) + + +class MatrixAuthRequirerData(BaseModel): + """Represent the MatrixAuth requirer data. + + Attributes: + registration: a generated app registration file. + registration_id: the registration Juju secret ID. + """ + + registration: Optional[SecretStr] = Field(default=None, exclude=True) + registration_secret_id: Optional[SecretStr] = Field(default=None) + + def set_registration_id(self, model: ops.Model, relation: ops.Relation) -> None: + """Store the app registration as a Juju secret. + + Args: + model: the Juju model + relation: relation to grant access to the secrets to. + """ + # password is always defined since pydantic guarantees it + password = cast(SecretStr, self.registration) + # pylint doesn't like get_secret_value + secret_value = password.get_secret_value() # pylint: disable=no-member + try: + secret = model.get_secret(label=APP_REGISTRATION_LABEL) + secret.set_content({APP_REGISTRATION_CONTENT_LABEL: secret_value}) + # secret.id is not None at this point + self.registration_secret_id = cast(str, secret.id) + except ops.SecretNotFoundError: + secret = relation.app.add_secret( + {APP_REGISTRATION_CONTENT_LABEL: secret_value}, label=APP_REGISTRATION_LABEL + ) + secret.grant(relation) + self.registration_secret_id = cast(str, secret.id) + + @classmethod + def get_registration( + cls, model: ops.Model, registration_secret_id: Optional[str] + ) -> Optional[SecretStr]: + """Retrieve the registration corresponding to the registration_secret_id. + + Args: + model: the Juju model. + registration_secret_id: the secret ID for the registration. + + Returns: + the registration or None if not found. + """ + if not registration_secret_id: + return None + try: + secret = model.get_secret(id=registration_secret_id) + password = secret.get_content().get(APP_REGISTRATION_CONTENT_LABEL) + if not password: + return None + return SecretStr(password) + except ops.SecretNotFoundError: + return None + + def to_relation_data(self, model: ops.Model, relation: ops.Relation) -> Dict[str, str]: + """Convert an instance of MatrixAuthRequirerData to the relation representation. + + Args: + model: the Juju model. + relation: relation to grant access to the secrets to. + + Returns: + Dict containing the representation. + """ + self.set_registration_id(model, relation) + dumped_model = self.model_dump(exclude_unset=True) + dumped_data = { + "registration_secret_id": dumped_model["registration_secret_id"], + } + return dumped_data + + @classmethod + def from_relation(cls, model: ops.Model, relation: ops.Relation) -> "MatrixAuthRequirerData": + """Get a MatrixAuthRequirerData from the relation data. + + Args: + model: the Juju model. + relation: the relation. + + Returns: + the relation data and the processed entries for it. + + Raises: + ValueError: if the value is not parseable. + """ + app = cast(ops.Application, relation.app) + relation_data = relation.data[app] + registration_secret_id = relation_data.get("registration_secret_id") + registration = MatrixAuthRequirerData.get_registration(model, registration_secret_id) + return MatrixAuthRequirerData( + registration=registration, + ) + + +#### Events #### +class MatrixAuthRequestProcessed(ops.RelationEvent): + """MatrixAuth event emitted when a new request is processed.""" + + def get_matrix_auth_provider_relation_data(self) -> MatrixAuthProviderData: + """Get a MatrixAuthProviderData for the relation data. + + Returns: + the MatrixAuthProviderData for the relation data. + """ + return MatrixAuthProviderData.from_relation(self.framework.model, self.relation) + + +class MatrixAuthRequestReceived(ops.RelationEvent): + """MatrixAuth event emitted when a new request is made.""" + + +class MatrixAuthRequiresEvents(ops.CharmEvents): + """MatrixAuth requirer events. + + This class defines the events that a MatrixAuth requirer can emit. + + Attributes: + matrix_auth_request_processed: the MatrixAuthRequestProcessed. + """ + + matrix_auth_request_processed = ops.EventSource(MatrixAuthRequestProcessed) + + +class MatrixAuthProvidesEvents(ops.CharmEvents): + """MatrixAuth provider events. + + This class defines the events that a MatrixAuth provider can emit. + + Attributes: + matrix_auth_request_received: the MatrixAuthRequestReceived. + """ + + matrix_auth_request_received = ops.EventSource(MatrixAuthRequestReceived) + + +#### Provides and Requires #### +class MatrixAuthProvides(ops.Object): + """Provider side of the MatrixAuth relation. + + Attributes: + on: events the provider can emit. + """ + + on = MatrixAuthProvidesEvents() + + def __init__(self, charm: ops.CharmBase, relation_name: str = DEFAULT_RELATION_NAME) -> None: + """Construct. + + Args: + charm: the provider charm. + relation_name: the relation name. + """ + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + + def get_remote_relation_data(self) -> Optional[MatrixAuthRequirerData]: + """Retrieve the remote relation data. + + Returns: + MatrixAuthRequirerData: the relation data. + """ + relation = self.model.get_relation(self.relation_name) + return MatrixAuthRequirerData.from_relation(self.model, relation=relation) if relation else None + + def _is_remote_relation_data_valid(self, relation: ops.Relation) -> bool: + """Validate the relation data. + + Args: + relation: the relation to validate. + + Returns: + true: if the relation data is valid. + """ + try: + _ = MatrixAuthRequirerData.from_relation(self.model, relation=relation) + return True + except ValueError as ex: + logger.warning("Error validating the relation data %s", ex) + return False + + def _on_relation_changed(self, event: ops.RelationChangedEvent) -> None: + """Event emitted when the relation has changed. + + Args: + event: event triggering this handler. + """ + assert event.relation.app + relation_data = event.relation.data[event.relation.app] + if relation_data and self._is_remote_relation_data_valid(event.relation): + self.on.matrix_auth_request_received.emit( + event.relation, app=event.app, unit=event.unit + ) + + def update_relation_data( + self, relation: ops.Relation, matrix_auth_provider_data: MatrixAuthProviderData + ) -> None: + """Update the relation data. + + Args: + relation: the relation for which to update the data. + matrix_auth_provider_data: a MatrixAuthProviderData instance wrapping the data to be + updated. + """ + relation_data = matrix_auth_provider_data.to_relation_data(self.model, relation) + relation.data[self.model.app].update(relation_data) + + +class MatrixAuthRequires(ops.Object): + """Requirer side of the MatrixAuth requires relation. + + Attributes: + on: events the provider can emit. + """ + + on = MatrixAuthRequiresEvents() + + def __init__(self, charm: ops.CharmBase, relation_name: str = DEFAULT_RELATION_NAME) -> None: + """Construct. + + Args: + charm: the provider charm. + relation_name: the relation name. + """ + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + + def get_remote_relation_data(self) -> Optional[MatrixAuthProviderData]: + """Retrieve the remote relation data. + + Returns: + MatrixAuthProviderData: the relation data. + """ + relation = self.model.get_relation(self.relation_name) + return MatrixAuthProviderData.from_relation(self.model, relation=relation) if relation else None + + def _is_remote_relation_data_valid(self, relation: ops.Relation) -> bool: + """Validate the relation data. + + Args: + relation: the relation to validate. + + Returns: + true: if the relation data is valid. + """ + try: + _ = MatrixAuthProviderData.from_relation(self.model, relation=relation) + return True + except ValueError as ex: + logger.warning("Error validating the relation data %s", ex) + return False + + def _on_relation_changed(self, event: ops.RelationChangedEvent) -> None: + """Event emitted when the relation has changed. + + Args: + event: event triggering this handler. + """ + assert event.relation.app + relation_data = event.relation.data[event.relation.app] + if relation_data and self._is_remote_relation_data_valid(event.relation): + self.on.matrix_auth_request_processed.emit( + event.relation, app=event.app, unit=event.unit + ) + + def update_relation_data( + self, + relation: ops.Relation, + matrix_auth_requirer_data: MatrixAuthRequirerData, + ) -> None: + """Update the relation data. + + Args: + relation: the relation for which to update the data. + matrix_auth_requirer_data: MatrixAuthRequirerData wrapping the data to be updated. + """ + relation_data = matrix_auth_requirer_data.to_relation_data(self.model, relation) + relation.data[self.model.app].update(relation_data) diff --git a/lib/charms/synapse/v1/matrix_auth.py b/lib/charms/synapse/v1/matrix_auth.py new file mode 100644 index 00000000..e9488690 --- /dev/null +++ b/lib/charms/synapse/v1/matrix_auth.py @@ -0,0 +1,526 @@ +# Copyright 2025 Canonical Ltd. +# Licensed under the Apache2.0. See LICENSE file in charm source for details. + +"""Library to manage the plugin integrations with the Synapse charm. + +This library contains the Requires and Provides classes for handling the integration +between an application and a charm providing the `matrix_plugin` integration. + +### Requirer Charm + +```python + +from charms.synapse.v0.matrix_auth import MatrixAuthRequires + +class MatrixAuthRequirerCharm(ops.CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.plugin_auth = MatrixAuthRequires(self) + self.framework.observe(self.matrix_auth.on.matrix_auth_request_processed, self._handler) + ... + + def _handler(self, events: MatrixAuthRequestProcessed) -> None: + ... + +``` + +As shown above, the library provides a custom event to handle the scenario in +which a matrix authentication (homeserver and shared secret) has been added or updated. + +The MatrixAuthRequires provides an `update_relation_data` method to update the relation data by +passing a `MatrixAuthRequirerData` data object, requesting a new authentication. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +from charms.synapse.v0.matrix_auth import MatrixAuthProvides + +class MatrixAuthProviderCharm(ops.CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.plugin_auth = MatrixAuthProvides(self) + ... + +``` +The MatrixAuthProvides object wraps the list of relations into a `relations` property +and provides an `update_relation_data` method to update the relation data by passing +a `MatrixAuthRelationData` data object. + +```python +class MatrixAuthProviderCharm(ops.CharmBase): + ... + + def _on_config_changed(self, _) -> None: + for relation in self.model.relations[self.plugin_auth.relation_name]: + self.plugin_auth.update_relation_data(relation, self._get_matrix_auth_data()) + +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "ff6788c89b204448b3b62ba6f93e2768" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 0 + +# pylint: disable=wrong-import-position +import json +import logging +from typing import Dict, List, Optional, Tuple, cast +import secrets +import base64 +from cryptography.fernet import Fernet + +import ops +from pydantic import BaseModel, Field, SecretStr + +logger = logging.getLogger(__name__) + +APP_REGISTRATION_LABEL = "app-registration" +APP_REGISTRATION_CONTENT_LABEL = "app-registration-content" +DEFAULT_RELATION_NAME = "matrix-auth" +SHARED_SECRET_LABEL = "shared-secret" +SHARED_SECRET_CONTENT_LABEL = "shared-secret-content" +ENCRYPTION_KEY_SECRET_LABEL = "encryption-key-secret" +ENCRYPTION_KEY_SECRET_CONTENT_LABEL = "encryption-key-content" + +def encrypt_string(key: bytes, plaintext: SecretStr) -> str: + """Encrypt a string using Fernet. + + Args: + key: encryption key in bytes. + plaintext: text to encrypt. + + Returns: + encrypted text. + """ + plaintext = cast(SecretStr, plaintext) + encryptor = Fernet(key) + ciphertext = encryptor.encrypt(plaintext.get_secret_value().encode('utf-8')) + return ciphertext.decode() + +def decrypt_string(key: bytes, ciphertext: str) -> str: + """Decrypt a string using Fernet. + + Args: + key: encryption key in bytes. + ciphertext: encrypted text. + + Returns: + decrypted text. + """ + decryptor = Fernet(key) + plaintext = decryptor.decrypt(ciphertext.encode('utf-8')) + return plaintext.decode() + +#### Data models for Provider and Requirer #### +class MatrixAuthProviderData(BaseModel): + """Represent the MatrixAuth provider data. + + Attributes: + homeserver: the homeserver URL. + shared_secret: the Matrix shared secret. + shared_secret_id: the shared secret Juju secret ID. + """ + + homeserver: str + shared_secret: Optional[SecretStr] = Field(default=None, exclude=True) + shared_secret_id: Optional[SecretStr] = Field(default=None) + encryption_key_secret_id: Optional[SecretStr] = Field(default=None) + + def set_shared_secret_id(self, model: ops.Model, relation: ops.Relation) -> None: + """Store the Matrix shared secret as a Juju secret. + + Args: + model: the Juju model + relation: relation to grant access to the secrets to. + """ + # password is always defined since pydantic guarantees it + password = cast(SecretStr, self.shared_secret) + # pylint doesn't like get_secret_value + secret_value = password.get_secret_value() # pylint: disable=no-member + try: + secret = model.get_secret(label=SHARED_SECRET_LABEL) + secret.set_content({SHARED_SECRET_CONTENT_LABEL: secret_value}) + # secret.id is not None at this point + self.shared_secret_id = cast(str, secret.id) + except ops.SecretNotFoundError: + secret = relation.app.add_secret( + {SHARED_SECRET_CONTENT_LABEL: secret_value}, label=SHARED_SECRET_LABEL + ) + secret.grant(relation) + self.shared_secret_id = cast(str, secret.id) + + def set_encryption_key_secret_id(self, model: ops.Model, relation: ops.Relation) -> None: + """Store the encryption key to encrypt/decrypt appservice registrations. + + Args: + model: the Juju model + relation: relation to grant access to the secrets to. + """ + key = Fernet.generate_key() + encryption_key = key.decode('utf-8') + try: + secret = model.get_secret(label=ENCRYPTION_KEY_SECRET_LABEL) + secret.set_content({ENCRYPTION_KEY_SECRET_CONTENT_LABEL: encryption_key}) + # secret.id is not None at this point + self.encryption_key_secret_id = cast(str, secret.id) + except ops.SecretNotFoundError: + secret = relation.app.add_secret( + {ENCRYPTION_KEY_SECRET_CONTENT_LABEL: encryption_key}, label=ENCRYPTION_KEY_SECRET_LABEL + ) + secret.grant(relation) + self.encryption_key_secret_id = cast(str, secret.id) + + @classmethod + def get_shared_secret( + cls, model: ops.Model, shared_secret_id: Optional[str] + ) -> Optional[SecretStr]: + """Retrieve the shared secret corresponding to the shared_secret_id. + + Args: + model: the Juju model. + shared_secret_id: the secret ID for the shared secret. + + Returns: + the shared secret or None if not found. + """ + if not shared_secret_id: + return None + try: + secret = model.get_secret(id=shared_secret_id) + password = secret.get_content().get(SHARED_SECRET_CONTENT_LABEL) + if not password: + return None + return SecretStr(password) + except ops.SecretNotFoundError: + return None + + def to_relation_data(self, model: ops.Model, relation: ops.Relation) -> Dict[str, str]: + """Convert an instance of MatrixAuthProviderData to the relation representation. + + Args: + model: the Juju model. + relation: relation to grant access to the secrets to. + + Returns: + Dict containing the representation. + """ + self.set_shared_secret_id(model, relation) + self.set_encryption_key_secret_id(model, relation) + return self.model_dump(exclude_unset=True) + + @classmethod + def from_relation(cls, model: ops.Model, relation: ops.Relation) -> "MatrixAuthProviderData": + """Initialize a new instance of the MatrixAuthProviderData class from the relation. + + Args: + relation: the relation. + + Returns: + A MatrixAuthProviderData instance. + + Raises: + ValueError: if the value is not parseable. + """ + app = cast(ops.Application, relation.app) + relation_data = relation.data[app] + shared_secret_id = ( + (relation_data["shared_secret_id"]) + if "shared_secret_id" in relation_data + else None + ) + shared_secret = MatrixAuthProviderData.get_shared_secret(model, shared_secret_id) + homeserver = relation_data.get("homeserver") + if shared_secret is None or homeserver is None: + raise ValueError("Invalid relation data") + return MatrixAuthProviderData( + homeserver=homeserver, + shared_secret=shared_secret, + ) + + +class MatrixAuthRequirerData(BaseModel): + """Represent the MatrixAuth requirer data. + + Attributes: + registration: a generated app registration file. + """ + + registration: Optional[SecretStr] = Field(default=None, exclude=True) + + @classmethod + def get_encryption_key_secret( + cls, model: ops.Model, encryption_key_secret_id: Optional[str] + ) -> Optional[bytes]: + """Retrieve the encryption key secret corresponding to the encryption_key_secret_id. + + Args: + model: the Juju model. + encryption_key_secret_id: the secret ID for the encryption key secret. + + Returns: + the encryption key secret as bytes or None if not found. + """ + try: + if not encryption_key_secret_id: + # then its the provider and we can get using label + secret = model.get_secret(label=ENCRYPTION_KEY_SECRET_LABEL) + else: + secret = model.get_secret(id=encryption_key_secret_id) + encryption_key = secret.get_content().get(ENCRYPTION_KEY_SECRET_CONTENT_LABEL) + if not encryption_key: + return None + return encryption_key.encode('utf-8') + except ops.SecretNotFoundError: + return None + + def to_relation_data(self, model: ops.Model, relation: ops.Relation) -> Dict[str, str]: + """Convert an instance of MatrixAuthRequirerData to the relation representation. + + Args: + model: the Juju model. + relation: relation to grant access to the secrets to. + + Returns: + Dict containing the representation. + + Raises: + ValueError if encryption key not found. + """ + # get encryption key + app = cast(ops.Application, relation.app) + relation_data = relation.data[app] + encryption_key_secret_id = relation_data.get("encryption_key_secret_id") + encryption_key = MatrixAuthRequirerData.get_encryption_key_secret(model, encryption_key_secret_id) + if not encryption_key: + raise ValueError("Invalid relation data: encryption_key_secret_id not found") + # encrypt content + content = encrypt_string(key=encryption_key, plaintext=self.registration) + dumped_data = { + "registration_secret": content, + } + return dumped_data + + @classmethod + def from_relation(cls, model: ops.Model, relation: ops.Relation) -> "MatrixAuthRequirerData": + """Get a MatrixAuthRequirerData from the relation data. + + Args: + model: the Juju model. + relation: the relation. + + Returns: + the relation data and the processed entries for it. + + Raises: + ValueError: if the value is not parseable. + """ + # get encryption key + app = cast(ops.Application, relation.app) + relation_data = relation.data[app] + encryption_key_secret_id = relation_data.get("encryption_key_secret_id") + encryption_key = MatrixAuthRequirerData.get_encryption_key_secret(model, encryption_key_secret_id) + if not encryption_key: + logger.warning("Invalid relation data: encryption_key_secret_id not found") + return None + # decrypt content + registration_secret = relation_data.get("registration_secret") + if not registration_secret: + return MatrixAuthRequirerData() + return MatrixAuthRequirerData( + registration=decrypt_string(key=encryption_key, ciphertext=registration_secret), + ) + + +#### Events #### +class MatrixAuthRequestProcessed(ops.RelationEvent): + """MatrixAuth event emitted when a new request is processed.""" + + def get_matrix_auth_provider_relation_data(self) -> MatrixAuthProviderData: + """Get a MatrixAuthProviderData for the relation data. + + Returns: + the MatrixAuthProviderData for the relation data. + """ + return MatrixAuthProviderData.from_relation(self.framework.model, self.relation) + + +class MatrixAuthRequestReceived(ops.RelationEvent): + """MatrixAuth event emitted when a new request is made.""" + + +class MatrixAuthRequiresEvents(ops.CharmEvents): + """MatrixAuth requirer events. + + This class defines the events that a MatrixAuth requirer can emit. + + Attributes: + matrix_auth_request_processed: the MatrixAuthRequestProcessed. + """ + + matrix_auth_request_processed = ops.EventSource(MatrixAuthRequestProcessed) + + +class MatrixAuthProvidesEvents(ops.CharmEvents): + """MatrixAuth provider events. + + This class defines the events that a MatrixAuth provider can emit. + + Attributes: + matrix_auth_request_received: the MatrixAuthRequestReceived. + """ + + matrix_auth_request_received = ops.EventSource(MatrixAuthRequestReceived) + + +#### Provides and Requires #### +class MatrixAuthProvides(ops.Object): + """Provider side of the MatrixAuth relation. + + Attributes: + on: events the provider can emit. + """ + + on = MatrixAuthProvidesEvents() + + def __init__(self, charm: ops.CharmBase, relation_name: str = DEFAULT_RELATION_NAME) -> None: + """Construct. + + Args: + charm: the provider charm. + relation_name: the relation name. + """ + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + + def get_remote_relation_data(self) -> Optional[MatrixAuthRequirerData]: + """Retrieve the remote relation data. + + Returns: + MatrixAuthRequirerData: the relation data. + """ + relation = self.model.get_relation(self.relation_name) + return MatrixAuthRequirerData.from_relation(self.model, relation=relation) if relation else None + + def _is_remote_relation_data_valid(self, relation: ops.Relation) -> bool: + """Validate the relation data. + + Args: + relation: the relation to validate. + + Returns: + true: if the relation data is valid. + """ + try: + _ = MatrixAuthRequirerData.from_relation(self.model, relation=relation) + return True + except ValueError as ex: + logger.warning("Error validating the relation data %s", ex) + return False + + def _on_relation_changed(self, event: ops.RelationChangedEvent) -> None: + """Event emitted when the relation has changed. + + Args: + event: event triggering this handler. + """ + assert event.relation.app + relation_data = event.relation.data[event.relation.app] + if relation_data and self._is_remote_relation_data_valid(event.relation): + self.on.matrix_auth_request_received.emit( + event.relation, app=event.app, unit=event.unit + ) + + def update_relation_data( + self, relation: ops.Relation, matrix_auth_provider_data: MatrixAuthProviderData + ) -> None: + """Update the relation data. + + Args: + relation: the relation for which to update the data. + matrix_auth_provider_data: a MatrixAuthProviderData instance wrapping the data to be + updated. + """ + relation_data = matrix_auth_provider_data.to_relation_data(self.model, relation) + relation.data[self.model.app].update(relation_data) + + +class MatrixAuthRequires(ops.Object): + """Requirer side of the MatrixAuth requires relation. + + Attributes: + on: events the provider can emit. + """ + + on = MatrixAuthRequiresEvents() + + def __init__(self, charm: ops.CharmBase, relation_name: str = DEFAULT_RELATION_NAME) -> None: + """Construct. + + Args: + charm: the provider charm. + relation_name: the relation name. + """ + super().__init__(charm, relation_name) + self.relation_name = relation_name + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + + def get_remote_relation_data(self) -> Optional[MatrixAuthProviderData]: + """Retrieve the remote relation data. + + Returns: + MatrixAuthProviderData: the relation data. + """ + relation = self.model.get_relation(self.relation_name) + return MatrixAuthProviderData.from_relation(self.model, relation=relation) if relation else None + + def _is_remote_relation_data_valid(self, relation: ops.Relation) -> bool: + """Validate the relation data. + + Args: + relation: the relation to validate. + + Returns: + true: if the relation data is valid. + """ + try: + _ = MatrixAuthProviderData.from_relation(self.model, relation=relation) + return True + except ValueError as ex: + logger.warning("Error validating the relation data %s", ex) + return False + + def _on_relation_changed(self, event: ops.RelationChangedEvent) -> None: + """Event emitted when the relation has changed. + + Args: + event: event triggering this handler. + """ + assert event.relation.app + relation_data = event.relation.data[event.relation.app] + if relation_data and self._is_remote_relation_data_valid(event.relation): + self.on.matrix_auth_request_processed.emit( + event.relation, app=event.app, unit=event.unit + ) + + def update_relation_data( + self, + relation: ops.Relation, + matrix_auth_requirer_data: MatrixAuthRequirerData, + ) -> None: + """Update the relation data. + + Args: + relation: the relation for which to update the data. + matrix_auth_requirer_data: MatrixAuthRequirerData wrapping the data to be updated. + """ + relation_data = matrix_auth_requirer_data.to_relation_data(self.model, relation) + relation.data[self.model.app].update(relation_data) diff --git a/lib/charms/traefik_k8s/v1/ingress.py b/lib/charms/traefik_k8s/v1/ingress.py deleted file mode 100644 index b5afac33..00000000 --- a/lib/charms/traefik_k8s/v1/ingress.py +++ /dev/null @@ -1,551 +0,0 @@ -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. - -r"""# [DEPRECATED!] Interface Library for ingress. - -This is a DEPRECATED version of the Ingress interface library. - -It was dropped in favour of ingress v2 because it contained a data model bug that -could not be fixed while maintaining backwards compatibility. - -What the bug means, is that by using the ingress v1 interface you are not able to obtain -unit-level load balancing, but instead, all traffic will be routed to your leader unit. -Which is not what you most likely want. - -If it IS what you want after all, consider opening a feature request for explicit -'ingress-per-leader' support. -""" - -import logging -import socket -import typing -from typing import Any, Dict, Optional, Tuple, Union - -import yaml -from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent -from ops.framework import EventSource, Object, ObjectEvents, StoredState -from ops.model import ModelError, Relation - -# The unique Charmhub library identifier, never change it -LIBID = "e6de2a5cd5b34422a204668f3b8f90d2" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 18 - -DEFAULT_RELATION_NAME = "ingress" -RELATION_INTERFACE = "ingress" - -log = logging.getLogger(__name__) - -try: - import jsonschema - - DO_VALIDATION = True -except ModuleNotFoundError: - log.warning( - "The `ingress` library needs the `jsonschema` package to be able " - "to do runtime data validation; without it, it will still work but validation " - "will be disabled. \n" - "It is recommended to add `jsonschema` to the 'requirements.txt' of your charm, " - "which will enable this feature." - ) - DO_VALIDATION = False - -INGRESS_REQUIRES_APP_SCHEMA = { - "type": "object", - "properties": { - "model": {"type": "string"}, - "name": {"type": "string"}, - "host": {"type": "string"}, - "port": {"type": "string"}, - "strip-prefix": {"type": "string"}, - "redirect-https": {"type": "string"}, - }, - "required": ["model", "name", "host", "port"], -} - -INGRESS_PROVIDES_APP_SCHEMA = { - "type": "object", - "properties": { - "ingress": {"type": "object", "properties": {"url": {"type": "string"}}}, - }, - "required": ["ingress"], -} - -try: - from typing import TypedDict -except ImportError: - from typing_extensions import TypedDict # py35 compatibility - -# Model of the data a unit implementing the requirer will need to provide. -RequirerData = TypedDict( - "RequirerData", - { - "model": str, - "name": str, - "host": str, - "port": int, - "strip-prefix": bool, - "redirect-https": bool, - }, - total=False, -) -# Provider ingress data model. -ProviderIngressData = TypedDict("ProviderIngressData", {"url": str}) -# Provider application databag model. -ProviderApplicationData = TypedDict("ProviderApplicationData", {"ingress": ProviderIngressData}) # type: ignore - - -def _validate_data(data, schema): - """Checks whether `data` matches `schema`. - - Will raise DataValidationError if the data is not valid, else return None. - """ - if not DO_VALIDATION: - return - try: - jsonschema.validate(instance=data, schema=schema) # pyright: ignore[reportUnboundVariable] - except jsonschema.ValidationError as e: # pyright: ignore[reportUnboundVariable] - raise DataValidationError(data, schema) from e - - -class DataValidationError(RuntimeError): - """Raised when data validation fails on IPU relation data.""" - - -class _IngressPerAppBase(Object): - """Base class for IngressPerUnit interface classes.""" - - def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): - super().__init__(charm, relation_name + "_V1") - - self.charm: CharmBase = charm - self.relation_name = relation_name - self.app = self.charm.app - self.unit = self.charm.unit - - observe = self.framework.observe - rel_events = charm.on[relation_name] - observe(rel_events.relation_created, self._handle_relation) - observe(rel_events.relation_joined, self._handle_relation) - observe(rel_events.relation_changed, self._handle_relation) - observe(rel_events.relation_broken, self._handle_relation_broken) - observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore - observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore - - @property - def relations(self): - """The list of Relation instances associated with this endpoint.""" - return list(self.charm.model.relations[self.relation_name]) - - def _handle_relation(self, event): - """Subclasses should implement this method to handle a relation update.""" - pass - - def _handle_relation_broken(self, event): - """Subclasses should implement this method to handle a relation breaking.""" - pass - - def _handle_upgrade_or_leader(self, event): - """Subclasses should implement this method to handle upgrades or leadership change.""" - pass - - -class _IPAEvent(RelationEvent): - __args__: Tuple[str, ...] = () - __optional_kwargs__: Dict[str, Any] = {} - - @classmethod - def __attrs__(cls): - return cls.__args__ + tuple(cls.__optional_kwargs__.keys()) - - def __init__(self, handle, relation, *args, **kwargs): - super().__init__(handle, relation) - - if not len(self.__args__) == len(args): - raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args))) - - for attr, obj in zip(self.__args__, args): - setattr(self, attr, obj) - for attr, default in self.__optional_kwargs__.items(): - obj = kwargs.get(attr, default) - setattr(self, attr, obj) - - def snapshot(self): - dct = super().snapshot() - for attr in self.__attrs__(): - obj = getattr(self, attr) - try: - dct[attr] = obj - except ValueError as e: - raise ValueError( - "cannot automagically serialize {}: " - "override this method and do it " - "manually.".format(obj) - ) from e - - return dct - - def restore(self, snapshot) -> None: - super().restore(snapshot) - for attr, obj in snapshot.items(): - setattr(self, attr, obj) - - -class IngressPerAppDataProvidedEvent(_IPAEvent): - """Event representing that ingress data has been provided for an app.""" - - __args__ = ("name", "model", "port", "host", "strip_prefix", "redirect_https") - - if typing.TYPE_CHECKING: - name: Optional[str] = None - model: Optional[str] = None - port: Optional[str] = None - host: Optional[str] = None - strip_prefix: bool = False - redirect_https: bool = False - - -class IngressPerAppDataRemovedEvent(RelationEvent): - """Event representing that ingress data has been removed for an app.""" - - -class IngressPerAppProviderEvents(ObjectEvents): - """Container for IPA Provider events.""" - - data_provided = EventSource(IngressPerAppDataProvidedEvent) - data_removed = EventSource(IngressPerAppDataRemovedEvent) - - -class IngressPerAppProvider(_IngressPerAppBase): - """Implementation of the provider of ingress.""" - - on = IngressPerAppProviderEvents() # type: ignore - - def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): - """Constructor for IngressPerAppProvider. - - Args: - charm: The charm that is instantiating the instance. - relation_name: The name of the relation endpoint to bind to - (defaults to "ingress"). - """ - super().__init__(charm, relation_name) - - def _handle_relation(self, event): - # created, joined or changed: if remote side has sent the required data: - # notify listeners. - if self.is_ready(event.relation): - data = self._get_requirer_data(event.relation) - self.on.data_provided.emit( # type: ignore - event.relation, - data["name"], - data["model"], - data["port"], - data["host"], - data.get("strip-prefix", False), - data.get("redirect-https", False), - ) - - def _handle_relation_broken(self, event): - self.on.data_removed.emit(event.relation) # type: ignore - - def wipe_ingress_data(self, relation: Relation): - """Clear ingress data from relation.""" - assert self.unit.is_leader(), "only leaders can do this" - try: - relation.data - except ModelError as e: - log.warning( - "error {} accessing relation data for {!r}. " - "Probably a ghost of a dead relation is still " - "lingering around.".format(e, relation.name) - ) - return - del relation.data[self.app]["ingress"] - - def _get_requirer_data(self, relation: Relation) -> RequirerData: # type: ignore - """Fetch and validate the requirer's app databag. - - For convenience, we convert 'port' to integer. - """ - if not relation.app or not relation.app.name: # type: ignore - # Handle edge case where remote app name can be missing, e.g., - # relation_broken events. - # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 - return {} - - databag = relation.data[relation.app] - remote_data: Dict[str, Union[int, str]] = {} - for k in ("port", "host", "model", "name", "mode", "strip-prefix", "redirect-https"): - v = databag.get(k) - if v is not None: - remote_data[k] = v - _validate_data(remote_data, INGRESS_REQUIRES_APP_SCHEMA) - remote_data["port"] = int(remote_data["port"]) - remote_data["strip-prefix"] = bool(remote_data.get("strip-prefix", "false") == "true") - remote_data["redirect-https"] = bool(remote_data.get("redirect-https", "false") == "true") - return typing.cast(RequirerData, remote_data) - - def get_data(self, relation: Relation) -> RequirerData: # type: ignore - """Fetch the remote app's databag, i.e. the requirer data.""" - return self._get_requirer_data(relation) - - def is_ready(self, relation: Optional[Relation] = None): - """The Provider is ready if the requirer has sent valid data.""" - if not relation: - return any(map(self.is_ready, self.relations)) - - try: - return bool(self._get_requirer_data(relation)) - except DataValidationError as e: - log.info("Provider not ready; validation error encountered: %s" % str(e)) - return False - - def _provided_url(self, relation: Relation) -> ProviderIngressData: # type: ignore - """Fetch and validate this app databag; return the ingress url.""" - if not relation.app or not relation.app.name or not self.unit.is_leader(): # type: ignore - # Handle edge case where remote app name can be missing, e.g., - # relation_broken events. - # Also, only leader units can read own app databags. - # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 - return typing.cast(ProviderIngressData, {}) # noqa - - # fetch the provider's app databag - raw_data = relation.data[self.app].get("ingress") - if not raw_data: - raise RuntimeError("This application did not `publish_url` yet.") - - ingress: ProviderIngressData = yaml.safe_load(raw_data) - _validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA) - return ingress - - def publish_url(self, relation: Relation, url: str): - """Publish to the app databag the ingress url.""" - ingress = {"url": url} - ingress_data = {"ingress": ingress} - _validate_data(ingress_data, INGRESS_PROVIDES_APP_SCHEMA) - relation.data[self.app]["ingress"] = yaml.safe_dump(ingress) - - @property - def proxied_endpoints(self): - """Returns the ingress settings provided to applications by this IngressPerAppProvider. - - For example, when this IngressPerAppProvider has provided the - `http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary - will be: - - ``` - { - "my-app": { - "url": "http://foo.bar/my-model.my-app" - } - } - ``` - """ - results = {} - - for ingress_relation in self.relations: - assert ( - ingress_relation.app - ), "no app in relation (shouldn't happen)" # for type checker - results[ingress_relation.app.name] = self._provided_url(ingress_relation) - - return results - - -class IngressPerAppReadyEvent(_IPAEvent): - """Event representing that ingress for an app is ready.""" - - __args__ = ("url",) - if typing.TYPE_CHECKING: - url: Optional[str] = None - - -class IngressPerAppRevokedEvent(RelationEvent): - """Event representing that ingress for an app has been revoked.""" - - -class IngressPerAppRequirerEvents(ObjectEvents): - """Container for IPA Requirer events.""" - - ready = EventSource(IngressPerAppReadyEvent) - revoked = EventSource(IngressPerAppRevokedEvent) - - -class IngressPerAppRequirer(_IngressPerAppBase): - """Implementation of the requirer of the ingress relation.""" - - on = IngressPerAppRequirerEvents() # type: ignore - - # used to prevent spurious urls to be sent out if the event we're currently - # handling is a relation-broken one. - _stored = StoredState() - - def __init__( - self, - charm: CharmBase, - relation_name: str = DEFAULT_RELATION_NAME, - *, - host: Optional[str] = None, - port: Optional[int] = None, - strip_prefix: bool = False, - redirect_https: bool = False, - ): - """Constructor for IngressRequirer. - - The request args can be used to specify the ingress properties when the - instance is created. If any are set, at least `port` is required, and - they will be sent to the ingress provider as soon as it is available. - All request args must be given as keyword args. - - Args: - charm: the charm that is instantiating the library. - relation_name: the name of the relation endpoint to bind to (defaults to `ingress`); - relation must be of interface type `ingress` and have "limit: 1") - host: Hostname to be used by the ingress provider to address the requiring - application; if unspecified, the default Kubernetes service name will be used. - strip_prefix: configure Traefik to strip the path prefix. - redirect_https: redirect incoming requests to the HTTPS. - - Request Args: - port: the port of the service - """ - log.warning( - "The ``ingress v1`` library is DEPRECATED in favour of ``ingress v2`` " - "and no longer maintained. This library does NOT in fact implement the " - "``ingress`` interface, but, instead, the ``ingress-per-leader`` one." - "Please bump with ``charmcraft fetch-lib charms.traefik_k8s.v2.ingress``." - ) - - super().__init__(charm, relation_name) - self.charm: CharmBase = charm - self.relation_name = relation_name - self._strip_prefix = strip_prefix - self._redirect_https = redirect_https - - self._stored.set_default(current_url=None) # type: ignore - - # if instantiated with a port, and we are related, then - # we immediately publish our ingress data to speed up the process. - if port: - self._auto_data = host, port - else: - self._auto_data = None - - def _handle_relation(self, event): - # created, joined or changed: if we have auto data: publish it - self._publish_auto_data(event.relation) - - if self.is_ready(): - # Avoid spurious events, emit only when there is a NEW URL available - new_url = ( - None - if isinstance(event, RelationBrokenEvent) - else self._get_url_from_relation_data() - ) - if self._stored.current_url != new_url: # type: ignore - self._stored.current_url = new_url # type: ignore - self.on.ready.emit(event.relation, new_url) # type: ignore - - def _handle_relation_broken(self, event): - self._stored.current_url = None # type: ignore - self.on.revoked.emit(event.relation) # type: ignore - - def _handle_upgrade_or_leader(self, event): - """On upgrade/leadership change: ensure we publish the data we have.""" - for relation in self.relations: - self._publish_auto_data(relation) - - def is_ready(self): - """The Requirer is ready if the Provider has sent valid data.""" - try: - return bool(self._get_url_from_relation_data()) - except DataValidationError as e: - log.info("Requirer not ready; validation error encountered: %s" % str(e)) - return False - - def _publish_auto_data(self, relation: Relation): - if self._auto_data and self.unit.is_leader(): - host, port = self._auto_data - self.provide_ingress_requirements(host=host, port=port) - - def provide_ingress_requirements(self, *, host: Optional[str] = None, port: int): - """Publishes the data that Traefik needs to provide ingress. - - NB only the leader unit is supposed to do this. - - Args: - host: Hostname to be used by the ingress provider to address the - requirer unit; if unspecified, FQDN will be used instead - port: the port of the service (required) - """ - # get only the leader to publish the data since we only - # require one unit to publish it -- it will not differ between units, - # unlike in ingress-per-unit. - assert self.unit.is_leader(), "only leaders should do this." - assert self.relation, "no relation" - - if not host: - host = socket.getfqdn() - - data = { - "model": self.model.name, - "name": self.app.name, - "host": host, - "port": str(port), - } - - if self._strip_prefix: - data["strip-prefix"] = "true" - - if self._redirect_https: - data["redirect-https"] = "true" - - _validate_data(data, INGRESS_REQUIRES_APP_SCHEMA) - self.relation.data[self.app].update(data) - - @property - def relation(self): - """The established Relation instance, or None.""" - return self.relations[0] if self.relations else None - - def _get_url_from_relation_data(self) -> Optional[str]: - """The full ingress URL to reach the current unit. - - Returns None if the URL isn't available yet. - """ - relation = self.relation - if not relation or not relation.app: - return None - - # fetch the provider's app databag - try: - raw = relation.data.get(relation.app, {}).get("ingress") - except ModelError as e: - log.debug( - f"Error {e} attempting to read remote app data; " - f"probably we are in a relation_departed hook" - ) - return None - - if not raw: - return None - - ingress: ProviderIngressData = yaml.safe_load(raw) - _validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA) - return ingress["url"] - - @property - def url(self) -> Optional[str]: - """The full ingress URL to reach the current unit. - - Returns None if the URL isn't available yet. - """ - data = self._stored.current_url or self._get_url_from_relation_data() # type: ignore - assert isinstance(data, (str, type(None))) # for static checker - return data diff --git a/lib/charms/traefik_k8s/v2/ingress.py b/lib/charms/traefik_k8s/v2/ingress.py new file mode 100644 index 00000000..bb7ac5ed --- /dev/null +++ b/lib/charms/traefik_k8s/v2/ingress.py @@ -0,0 +1,849 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +r"""# Interface Library for ingress. + +This library wraps relation endpoints using the `ingress` interface +and provides a Python API for both requesting and providing per-application +ingress, with load-balancing occurring across all units. + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. + +```shell +cd some-charm +charmcraft fetch-lib charms.traefik_k8s.v2.ingress +``` + +In the `metadata.yaml` of the charm, add the following: + +```yaml +requires: + ingress: + interface: ingress + limit: 1 +``` + +Then, to initialise the library: + +```python +from charms.traefik_k8s.v2.ingress import (IngressPerAppRequirer, + IngressPerAppReadyEvent, IngressPerAppRevokedEvent) + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.ingress = IngressPerAppRequirer(self, port=80) + # The following event is triggered when the ingress URL to be used + # by this deployment of the `SomeCharm` is ready (or changes). + self.framework.observe( + self.ingress.on.ready, self._on_ingress_ready + ) + self.framework.observe( + self.ingress.on.revoked, self._on_ingress_revoked + ) + + def _on_ingress_ready(self, event: IngressPerAppReadyEvent): + logger.info("This app's ingress URL: %s", event.url) + + def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent): + logger.info("This app no longer has ingress") +""" +import ipaddress +import json +import logging +import socket +import typing +from dataclasses import dataclass +from functools import partial +from typing import Any, Callable, Dict, List, MutableMapping, Optional, Sequence, Tuple, Union + +import pydantic +from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent +from ops.framework import EventSource, Object, ObjectEvents, StoredState +from ops.model import ModelError, Relation, Unit +from pydantic import AnyHttpUrl, BaseModel, Field + +# The unique Charmhub library identifier, never change it +LIBID = "e6de2a5cd5b34422a204668f3b8f90d2" + +# Increment this major API version when introducing breaking changes +LIBAPI = 2 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 14 + +PYDEPS = ["pydantic"] + +DEFAULT_RELATION_NAME = "ingress" +RELATION_INTERFACE = "ingress" + +log = logging.getLogger(__name__) +BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"} + +PYDANTIC_IS_V1 = int(pydantic.version.VERSION.split(".")[0]) < 2 +if PYDANTIC_IS_V1: + from pydantic import validator + + input_validator = partial(validator, pre=True) + + class DatabagModel(BaseModel): # type: ignore + """Base databag model.""" + + class Config: + """Pydantic config.""" + + allow_population_by_field_name = True + """Allow instantiating this class by field name (instead of forcing alias).""" + + _NEST_UNDER = None + + @classmethod + def load(cls, databag: MutableMapping): + """Load this model from a Juju databag.""" + if cls._NEST_UNDER: + return cls.parse_obj(json.loads(databag[cls._NEST_UNDER])) + + try: + data = { + k: json.loads(v) + for k, v in databag.items() + # Don't attempt to parse model-external values + if k in {f.alias for f in cls.__fields__.values()} # type: ignore + } + except json.JSONDecodeError as e: + msg = f"invalid databag contents: expecting json. {databag}" + log.error(msg) + raise DataValidationError(msg) from e + + try: + return cls.parse_raw(json.dumps(data)) # type: ignore + except pydantic.ValidationError as e: + msg = f"failed to validate databag: {databag}" + log.debug(msg, exc_info=True) + raise DataValidationError(msg) from e + + def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): + """Write the contents of this model to Juju databag. + + :param databag: the databag to write the data to. + :param clear: ensure the databag is cleared before writing it. + """ + if clear and databag: + databag.clear() + + if databag is None: + databag = {} + + if self._NEST_UNDER: + databag[self._NEST_UNDER] = self.json(by_alias=True, exclude_defaults=True) + return databag + + for key, value in self.dict(by_alias=True, exclude_defaults=True).items(): # type: ignore + databag[key] = json.dumps(value) + + return databag + +else: + from pydantic import ConfigDict, field_validator + + input_validator = partial(field_validator, mode="before") + + class DatabagModel(BaseModel): + """Base databag model.""" + + model_config = ConfigDict( + # tolerate additional keys in databag + extra="ignore", + # Allow instantiating this class by field name (instead of forcing alias). + populate_by_name=True, + # Custom config key: whether to nest the whole datastructure (as json) + # under a field or spread it out at the toplevel. + _NEST_UNDER=None, + ) # type: ignore + """Pydantic config.""" + + @classmethod + def load(cls, databag: MutableMapping): + """Load this model from a Juju databag.""" + nest_under = cls.model_config.get("_NEST_UNDER") + if nest_under: + return cls.model_validate(json.loads(databag[nest_under])) # type: ignore + + try: + data = { + k: json.loads(v) + for k, v in databag.items() + # Don't attempt to parse model-external values + if k in {(f.alias or n) for n, f in cls.model_fields.items()} # type: ignore + } + except json.JSONDecodeError as e: + msg = f"invalid databag contents: expecting json. {databag}" + log.error(msg) + raise DataValidationError(msg) from e + + try: + return cls.model_validate_json(json.dumps(data)) # type: ignore + except pydantic.ValidationError as e: + msg = f"failed to validate databag: {databag}" + log.debug(msg, exc_info=True) + raise DataValidationError(msg) from e + + def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): + """Write the contents of this model to Juju databag. + + :param databag: the databag to write the data to. + :param clear: ensure the databag is cleared before writing it. + """ + if clear and databag: + databag.clear() + + if databag is None: + databag = {} + nest_under = self.model_config.get("_NEST_UNDER") + if nest_under: + databag[nest_under] = self.model_dump_json( # type: ignore + by_alias=True, + # skip keys whose values are default + exclude_defaults=True, + ) + return databag + + dct = self.model_dump(mode="json", by_alias=True, exclude_defaults=True) # type: ignore + databag.update({k: json.dumps(v) for k, v in dct.items()}) + return databag + + +# todo: import these models from charm-relation-interfaces/ingress/v2 instead of redeclaring them +class IngressUrl(BaseModel): + """Ingress url schema.""" + + url: AnyHttpUrl + + +class IngressProviderAppData(DatabagModel): + """Ingress application databag schema.""" + + ingress: IngressUrl + + +class ProviderSchema(BaseModel): + """Provider schema for Ingress.""" + + app: IngressProviderAppData + + +class IngressRequirerAppData(DatabagModel): + """Ingress requirer application databag model.""" + + model: str = Field(description="The model the application is in.") + name: str = Field(description="the name of the app requesting ingress.") + port: int = Field(description="The port the app wishes to be exposed.") + + # fields on top of vanilla 'ingress' interface: + strip_prefix: Optional[bool] = Field( + default=False, + description="Whether to strip the prefix from the ingress url.", + alias="strip-prefix", + ) + redirect_https: Optional[bool] = Field( + default=False, + description="Whether to redirect http traffic to https.", + alias="redirect-https", + ) + + scheme: Optional[str] = Field( + default="http", description="What scheme to use in the generated ingress url" + ) + + @input_validator("scheme") + def validate_scheme(cls, scheme): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate scheme arg.""" + if scheme not in {"http", "https", "h2c"}: + raise ValueError("invalid scheme: should be one of `http|https|h2c`") + return scheme + + @input_validator("port") + def validate_port(cls, port): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate port.""" + assert isinstance(port, int), type(port) + assert 0 < port < 65535, "port out of TCP range" + return port + + +class IngressRequirerUnitData(DatabagModel): + """Ingress requirer unit databag model.""" + + host: str = Field(description="Hostname at which the unit is reachable.") + ip: Optional[str] = Field( + None, + description="IP at which the unit is reachable, " + "IP can only be None if the IP information can't be retrieved from juju.", + ) + + @input_validator("host") + def validate_host(cls, host): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate host.""" + assert isinstance(host, str), type(host) + return host + + @input_validator("ip") + def validate_ip(cls, ip): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate ip.""" + if ip is None: + return None + if not isinstance(ip, str): + raise TypeError(f"got ip of type {type(ip)} instead of expected str") + try: + ipaddress.IPv4Address(ip) + return ip + except ipaddress.AddressValueError: + pass + try: + ipaddress.IPv6Address(ip) + return ip + except ipaddress.AddressValueError: + raise ValueError(f"{ip!r} is not a valid ip address") + + +class RequirerSchema(BaseModel): + """Requirer schema for Ingress.""" + + app: IngressRequirerAppData + unit: IngressRequirerUnitData + + +class IngressError(RuntimeError): + """Base class for custom errors raised by this library.""" + + +class NotReadyError(IngressError): + """Raised when a relation is not ready.""" + + +class DataValidationError(IngressError): + """Raised when data validation fails on IPU relation data.""" + + +class _IngressPerAppBase(Object): + """Base class for IngressPerUnit interface classes.""" + + def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): + super().__init__(charm, relation_name) + + self.charm: CharmBase = charm + self.relation_name = relation_name + self.app = self.charm.app + self.unit = self.charm.unit + + observe = self.framework.observe + rel_events = charm.on[relation_name] + observe(rel_events.relation_created, self._handle_relation) + observe(rel_events.relation_joined, self._handle_relation) + observe(rel_events.relation_changed, self._handle_relation) + observe(rel_events.relation_departed, self._handle_relation) + observe(rel_events.relation_broken, self._handle_relation_broken) + observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore + observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore + + @property + def relations(self): + """The list of Relation instances associated with this endpoint.""" + return list(self.charm.model.relations[self.relation_name]) + + def _handle_relation(self, event): + """Subclasses should implement this method to handle a relation update.""" + pass + + def _handle_relation_broken(self, event): + """Subclasses should implement this method to handle a relation breaking.""" + pass + + def _handle_upgrade_or_leader(self, event): + """Subclasses should implement this method to handle upgrades or leadership change.""" + pass + + +class _IPAEvent(RelationEvent): + __args__: Tuple[str, ...] = () + __optional_kwargs__: Dict[str, Any] = {} + + @classmethod + def __attrs__(cls): + return cls.__args__ + tuple(cls.__optional_kwargs__.keys()) + + def __init__(self, handle, relation, *args, **kwargs): + super().__init__(handle, relation) + + if not len(self.__args__) == len(args): + raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args))) + + for attr, obj in zip(self.__args__, args): + setattr(self, attr, obj) + for attr, default in self.__optional_kwargs__.items(): + obj = kwargs.get(attr, default) + setattr(self, attr, obj) + + def snapshot(self): + dct = super().snapshot() + for attr in self.__attrs__(): + obj = getattr(self, attr) + try: + dct[attr] = obj + except ValueError as e: + raise ValueError( + "cannot automagically serialize {}: " + "override this method and do it " + "manually.".format(obj) + ) from e + + return dct + + def restore(self, snapshot) -> None: + super().restore(snapshot) + for attr, obj in snapshot.items(): + setattr(self, attr, obj) + + +class IngressPerAppDataProvidedEvent(_IPAEvent): + """Event representing that ingress data has been provided for an app.""" + + __args__ = ("name", "model", "hosts", "strip_prefix", "redirect_https") + + if typing.TYPE_CHECKING: + name: Optional[str] = None + model: Optional[str] = None + # sequence of hostname, port dicts + hosts: Sequence["IngressRequirerUnitData"] = () + strip_prefix: bool = False + redirect_https: bool = False + + +class IngressPerAppDataRemovedEvent(RelationEvent): + """Event representing that ingress data has been removed for an app.""" + + +class IngressPerAppProviderEvents(ObjectEvents): + """Container for IPA Provider events.""" + + data_provided = EventSource(IngressPerAppDataProvidedEvent) + data_removed = EventSource(IngressPerAppDataRemovedEvent) + + +@dataclass +class IngressRequirerData: + """Data exposed by the ingress requirer to the provider.""" + + app: "IngressRequirerAppData" + units: List["IngressRequirerUnitData"] + + +class IngressPerAppProvider(_IngressPerAppBase): + """Implementation of the provider of ingress.""" + + on = IngressPerAppProviderEvents() # type: ignore + + def __init__( + self, + charm: CharmBase, + relation_name: str = DEFAULT_RELATION_NAME, + ): + """Constructor for IngressPerAppProvider. + + Args: + charm: The charm that is instantiating the instance. + relation_name: The name of the relation endpoint to bind to + (defaults to "ingress"). + """ + super().__init__(charm, relation_name) + + def _handle_relation(self, event): + # created, joined or changed: if remote side has sent the required data: + # notify listeners. + if self.is_ready(event.relation): + data = self.get_data(event.relation) + self.on.data_provided.emit( # type: ignore + event.relation, + data.app.name, + data.app.model, + [ + unit.dict() if PYDANTIC_IS_V1 else unit.model_dump(mode="json") + for unit in data.units + ], + data.app.strip_prefix or False, + data.app.redirect_https or False, + ) + + def _handle_relation_broken(self, event): + self.on.data_removed.emit(event.relation) # type: ignore + + def wipe_ingress_data(self, relation: Relation): + """Clear ingress data from relation.""" + assert self.unit.is_leader(), "only leaders can do this" + try: + relation.data + except ModelError as e: + log.warning( + "error {} accessing relation data for {!r}. " + "Probably a ghost of a dead relation is still " + "lingering around.".format(e, relation.name) + ) + return + del relation.data[self.app]["ingress"] + + def _get_requirer_units_data(self, relation: Relation) -> List["IngressRequirerUnitData"]: + """Fetch and validate the requirer's app databag.""" + out: List["IngressRequirerUnitData"] = [] + + unit: Unit + for unit in relation.units: + databag = relation.data[unit] + try: + data = IngressRequirerUnitData.load(databag) + out.append(data) + except pydantic.ValidationError: + log.info(f"failed to validate remote unit data for {unit}") + raise + return out + + @staticmethod + def _get_requirer_app_data(relation: Relation) -> "IngressRequirerAppData": + """Fetch and validate the requirer's app databag.""" + app = relation.app + if app is None: + raise NotReadyError(relation) + + databag = relation.data[app] + return IngressRequirerAppData.load(databag) + + def get_data(self, relation: Relation) -> IngressRequirerData: + """Fetch the remote (requirer) app and units' databags.""" + try: + return IngressRequirerData( + self._get_requirer_app_data(relation), self._get_requirer_units_data(relation) + ) + except (pydantic.ValidationError, DataValidationError) as e: + raise DataValidationError("failed to validate ingress requirer data") from e + + def is_ready(self, relation: Optional[Relation] = None): + """The Provider is ready if the requirer has sent valid data.""" + if not relation: + return any(map(self.is_ready, self.relations)) + + try: + self.get_data(relation) + except (DataValidationError, NotReadyError) as e: + log.debug("Provider not ready; validation error encountered: %s" % str(e)) + return False + return True + + def _published_url(self, relation: Relation) -> Optional["IngressProviderAppData"]: + """Fetch and validate this app databag; return the ingress url.""" + if not self.is_ready(relation) or not self.unit.is_leader(): + # Handle edge case where remote app name can be missing, e.g., + # relation_broken events. + # Also, only leader units can read own app databags. + # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 + return None + + # fetch the provider's app databag + databag = relation.data[self.app] + if not databag.get("ingress"): + raise NotReadyError("This application did not `publish_url` yet.") + + return IngressProviderAppData.load(databag) + + def publish_url(self, relation: Relation, url: str): + """Publish to the app databag the ingress url.""" + ingress_url = {"url": url} + IngressProviderAppData(ingress=ingress_url).dump(relation.data[self.app]) # type: ignore + + @property + def proxied_endpoints(self) -> Dict[str, Dict[str, str]]: + """Returns the ingress settings provided to applications by this IngressPerAppProvider. + + For example, when this IngressPerAppProvider has provided the + `http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary + will be: + + ``` + { + "my-app": { + "url": "http://foo.bar/my-model.my-app" + } + } + ``` + """ + results: Dict[str, Dict[str, str]] = {} + + for ingress_relation in self.relations: + if not ingress_relation.app: + log.warning( + f"no app in relation {ingress_relation} when fetching proxied endpoints: skipping" + ) + continue + try: + ingress_data = self._published_url(ingress_relation) + except NotReadyError: + log.warning( + f"no published url found in {ingress_relation}: " + f"traefik didn't publish_url yet to this relation." + ) + continue + + if not ingress_data: + log.warning(f"relation {ingress_relation} not ready yet: try again in some time.") + continue + if PYDANTIC_IS_V1: + results[ingress_relation.app.name] = ingress_data.ingress.dict() + else: + results[ingress_relation.app.name] = ingress_data.ingress.model_dump(mode="json") + return results + + +class IngressPerAppReadyEvent(_IPAEvent): + """Event representing that ingress for an app is ready.""" + + __args__ = ("url",) + if typing.TYPE_CHECKING: + url: Optional[str] = None + + +class IngressPerAppRevokedEvent(RelationEvent): + """Event representing that ingress for an app has been revoked.""" + + +class IngressPerAppRequirerEvents(ObjectEvents): + """Container for IPA Requirer events.""" + + ready = EventSource(IngressPerAppReadyEvent) + revoked = EventSource(IngressPerAppRevokedEvent) + + +class IngressPerAppRequirer(_IngressPerAppBase): + """Implementation of the requirer of the ingress relation.""" + + on = IngressPerAppRequirerEvents() # type: ignore + + # used to prevent spurious urls to be sent out if the event we're currently + # handling is a relation-broken one. + _stored = StoredState() + + def __init__( + self, + charm: CharmBase, + relation_name: str = DEFAULT_RELATION_NAME, + *, + host: Optional[str] = None, + ip: Optional[str] = None, + port: Optional[int] = None, + strip_prefix: bool = False, + redirect_https: bool = False, + # fixme: this is horrible UX. + # shall we switch to manually calling provide_ingress_requirements with all args when ready? + scheme: Union[Callable[[], str], str] = lambda: "http", + ): + """Constructor for IngressRequirer. + + The request args can be used to specify the ingress properties when the + instance is created. If any are set, at least `port` is required, and + they will be sent to the ingress provider as soon as it is available. + All request args must be given as keyword args. + + Args: + charm: the charm that is instantiating the library. + relation_name: the name of the relation endpoint to bind to (defaults to `ingress`); + relation must be of interface type `ingress` and have "limit: 1") + host: Hostname to be used by the ingress provider to address the requiring + application; if unspecified, the default Kubernetes service name will be used. + ip: Alternative addressing method other than host to be used by the ingress provider; + if unspecified, binding address from juju network API will be used. + strip_prefix: configure Traefik to strip the path prefix. + redirect_https: redirect incoming requests to HTTPS. + scheme: callable returning the scheme to use when constructing the ingress url. + Or a string, if the scheme is known and stable at charm-init-time. + + Request Args: + port: the port of the service + """ + super().__init__(charm, relation_name) + self.charm: CharmBase = charm + self.relation_name = relation_name + self._strip_prefix = strip_prefix + self._redirect_https = redirect_https + self._get_scheme = scheme if callable(scheme) else lambda: scheme + + self._stored.set_default(current_url=None) # type: ignore + + # if instantiated with a port, and we are related, then + # we immediately publish our ingress data to speed up the process. + if port: + self._auto_data = host, ip, port + else: + self._auto_data = None + + def _handle_relation(self, event): + # created, joined or changed: if we have auto data: publish it + self._publish_auto_data() + if self.is_ready(): + # Avoid spurious events, emit only when there is a NEW URL available + new_url = ( + None + if isinstance(event, RelationBrokenEvent) + else self._get_url_from_relation_data() + ) + if self._stored.current_url != new_url: # type: ignore + self._stored.current_url = new_url # type: ignore + self.on.ready.emit(event.relation, new_url) # type: ignore + + def _handle_relation_broken(self, event): + self._stored.current_url = None # type: ignore + self.on.revoked.emit(event.relation) # type: ignore + + def _handle_upgrade_or_leader(self, event): + """On upgrade/leadership change: ensure we publish the data we have.""" + self._publish_auto_data() + + def is_ready(self): + """The Requirer is ready if the Provider has sent valid data.""" + try: + return bool(self._get_url_from_relation_data()) + except DataValidationError as e: + log.debug("Requirer not ready; validation error encountered: %s" % str(e)) + return False + + def _publish_auto_data(self): + if self._auto_data: + host, ip, port = self._auto_data + self.provide_ingress_requirements(host=host, ip=ip, port=port) + + def provide_ingress_requirements( + self, + *, + scheme: Optional[str] = None, + host: Optional[str] = None, + ip: Optional[str] = None, + port: int, + ): + """Publishes the data that Traefik needs to provide ingress. + + Args: + scheme: Scheme to be used; if unspecified, use the one used by __init__. + host: Hostname to be used by the ingress provider to address the + requirer unit; if unspecified, FQDN will be used instead + ip: Alternative addressing method other than host to be used by the ingress provider. + if unspecified, binding address from juju network API will be used. + port: the port of the service (required) + """ + for relation in self.relations: + self._provide_ingress_requirements(scheme, host, ip, port, relation) + + def _provide_ingress_requirements( + self, + scheme: Optional[str], + host: Optional[str], + ip: Optional[str], + port: int, + relation: Relation, + ): + if self.unit.is_leader(): + self._publish_app_data(scheme, port, relation) + + self._publish_unit_data(host, ip, relation) + + def _publish_unit_data( + self, + host: Optional[str], + ip: Optional[str], + relation: Relation, + ): + if not host: + host = socket.getfqdn() + + if ip is None: + network_binding = self.charm.model.get_binding(relation) + if ( + network_binding is not None + and (bind_address := network_binding.network.bind_address) is not None + ): + ip = str(bind_address) + else: + log.error("failed to retrieve ip information from juju") + + unit_databag = relation.data[self.unit] + try: + IngressRequirerUnitData(host=host, ip=ip).dump(unit_databag) + except pydantic.ValidationError as e: + msg = "failed to validate unit data" + log.info(msg, exc_info=True) # log to INFO because this might be expected + raise DataValidationError(msg) from e + + def _publish_app_data( + self, + scheme: Optional[str], + port: int, + relation: Relation, + ): + # assumes leadership! + app_databag = relation.data[self.app] + + if not scheme: + # If scheme was not provided, use the one given to the constructor. + scheme = self._get_scheme() + + try: + IngressRequirerAppData( # type: ignore # pyright does not like aliases + model=self.model.name, + name=self.app.name, + scheme=scheme, + port=port, + strip_prefix=self._strip_prefix, # type: ignore # pyright does not like aliases + redirect_https=self._redirect_https, # type: ignore # pyright does not like aliases + ).dump(app_databag) + except pydantic.ValidationError as e: + msg = "failed to validate app data" + log.info(msg, exc_info=True) # log to INFO because this might be expected + raise DataValidationError(msg) from e + + @property + def relation(self): + """The established Relation instance, or None.""" + return self.relations[0] if self.relations else None + + def _get_url_from_relation_data(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + relation = self.relation + if not relation or not relation.app: + return None + + # fetch the provider's app databag + try: + databag = relation.data[relation.app] + except ModelError as e: + log.debug( + f"Error {e} attempting to read remote app data; " + f"probably we are in a relation_departed hook" + ) + return None + + if not databag: # not ready yet + return None + + return str(IngressProviderAppData.load(databag).ingress.url) + + @property + def url(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + data = ( + typing.cast(Optional[str], self._stored.current_url) # type: ignore + or self._get_url_from_relation_data() + ) + return data diff --git a/localstack-installation.sh b/localstack-installation.sh index 827c94b6..852f2c66 100755 --- a/localstack-installation.sh +++ b/localstack-installation.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. pip install pip --upgrade diff --git a/metadata.yaml b/metadata.yaml index c4b638a1..63e9676d 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. name: synapse @@ -22,16 +22,11 @@ containers: mounts: - storage: data location: /data - synapse-nginx: - resource: synapse-nginx-image resources: synapse-image: type: oci-image description: OCI image for Synapse - synapse-nginx-image: - type: oci-image - description: OCI image for Synapse NGINX storage: data: @@ -43,6 +38,8 @@ provides: interface: grafana_dashboard metrics-endpoint: interface: prometheus_scrape + matrix-auth: + interface: matrix_auth requires: backup: @@ -57,10 +54,6 @@ requires: interface: ingress limit: 2 optional: true - irc-bridge-database: - interface: postgresql_client - limit: 1 - optional: true logging: interface: loki_push_api limit: 1 diff --git a/nginx_rock/rockcraft.yaml b/nginx_rock/rockcraft.yaml deleted file mode 100644 index 0d36bf85..00000000 --- a/nginx_rock/rockcraft.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -name: synapse-nginx -summary: synapse nginx rock -description: Nginx OCI image for the synapse charm -version: "1.0" -base: ubuntu@22.04 -build-base: ubuntu@22.04 -license: Apache-2.0 -platforms: - amd64: -parts: - add-user: - plugin: nil - overlay-script: | - chmod 755 $CRAFT_OVERLAY/etc - groupadd -R $CRAFT_OVERLAY --gid 2000 nginx - useradd -R $CRAFT_OVERLAY --system --gid 2000 --uid 2000 --no-create-home nginx - nginx-conf: - plugin: dump - source: etc - organize: - nginx.conf: etc/nginx/nginx.conf - worker_location.conf: etc/nginx/worker_location.conf - abuse_report_location.conf.template: etc/nginx/abuse_report_location.conf.template - abuse_report_location.conf: etc/nginx/abuse_report_location.conf - main_location.conf.template: etc/nginx/main_location.conf.template - main_location.conf: etc/nginx/main_location.conf - nginx: - stage-packages: - - logrotate - - nginx - - sed - plugin: nil - override-build: | - craftctl default - rm $CRAFT_PART_INSTALL/etc/nginx/nginx.conf - override-prime: | - craftctl default - mkdir run diff --git a/pyproject.toml b/pyproject.toml index 0b07e95a..101acc8f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. [tool.bandit] @@ -11,7 +11,7 @@ skips = ["*/*test.py", "*/test_*.py", "*tests/*.py"] branch = true [tool.coverage.report] -fail_under = 92 +fail_under = 91 show_missing = true diff --git a/renovate.json b/renovate.json index 25eced57..c9dba586 100644 --- a/renovate.json +++ b/renovate.json @@ -12,7 +12,17 @@ "# renovate: base:\\s+(?[^:]*):(?[^\\s@]*)(@(?sha256:[0-9a-f]*))?"], "datasourceTemplate": "docker", "versioningTemplate": "ubuntu" + }, + { + "fileMatch": ["(^|/)rockcraft.yaml$"], + "description": "Update Synapse workload", + "matchStringsStrategy": "any", + "matchStrings": ["source-tag: (?.+)"], + "datasourceTemplate": "github-releases", + "depNameTemplate": "element-hq/synapse", + "versioningTemplate": "semver-coerced" } + ], "packageRules": [ { diff --git a/requirements.txt b/requirements.txt index 03038c29..80e8594a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,10 @@ -cosl ==0.0.12 -boto3 ==1.34.151 -deepdiff ==7.0.1 +boto3 ==1.35.81 +cosl ==0.0.47 +cryptography ==44.0.0 +deepdiff ==8.0.1 jinja2 ==3.1.4 -jsonschema ==4.22.0 -ops ==2.14.0 -pydantic ==1.10.16 -psycopg2-binary ==2.9.9 +jsonschema ==4.23.0 +ops ==2.17.1 +pydantic ==2.10.2 +psycopg2-binary ==2.9.10 requests ==2.32.3 diff --git a/src-docs/api.md b/src-docs/api.md deleted file mode 100644 index 31969373..00000000 --- a/src-docs/api.md +++ /dev/null @@ -1,610 +0,0 @@ - - - - -# module `api` -Helper module used to manage interactions with Synapse API. - -**Global Variables** ---------------- -- **SYNAPSE_PORT** -- **SYNAPSE_URL** -- **ADD_USER_ROOM_URL** -- **PROMOTE_USER_ADMIN_URL** -- **CREATE_ROOM_URL** -- **DEACTIVATE_ACCOUNT_URL** -- **LIST_ROOMS_URL** -- **LIST_USERS_URL** -- **LOGIN_URL** -- **MJOLNIR_MANAGEMENT_ROOM** -- **MJOLNIR_MEMBERSHIP_ROOM** -- **REGISTER_URL** -- **SYNAPSE_VERSION_REGEX** -- **VERSION_URL** -- **WHOAMI_URL** - ---- - - - -## function `register_user` - -```python -register_user( - registration_shared_secret: str, - user: User, - server: Optional[str] = None, - admin_access_token: Optional[str] = None -) → str -``` - -Register user. - - - -**Args:** - - - `registration_shared_secret`: secret to be used to register the user. - - `user`: user to be registered. - - `server`: to be used to create the user id. - - `admin_access_token`: admin access token to get user's access token if it exists. - - - -**Raises:** - - - `RegisterUserError`: if there was an error registering the user. - - - -**Returns:** - Access token to be used by the user. - - ---- - - - -## function `get_version` - -```python -get_version(main_unit_address: str) → str -``` - -Get version. - -Expected API output: { "server_version": "0.99.2rc1 (b=develop, abcdef123)", "python_version": "3.7.8" } - -We're using retry here because after the config change, Synapse is restarted. - - - -**Args:** - - - `main_unit_address`: main unit address to be used instead of localhost in case of horizontal scaling. - - - -**Returns:** - The version returned by Synapse API. - - - -**Raises:** - - - `GetVersionError`: if there was an error while reading version. - - `VersionUnexpectedContentError`: if the version has unexpected content. - - ---- - - - -## function `get_access_token` - -```python -get_access_token(user: User, server: str, admin_access_token: str) → str -``` - -Get an access token that can be used to authenticate as that user. - -This is a way to do actions on behalf of a user. - - - -**Args:** - - - `user`: the user on behalf of whom you want to request the access token. - - `server`: to be used to create the user id. User ID example: @user:server.com. - - `admin_access_token`: a server admin access token to be used for the request. - - - -**Returns:** - Access token. - - - -**Raises:** - - - `GetAccessTokenError`: if there was an error while getting access token. - - ---- - - - -## function `override_rate_limit` - -```python -override_rate_limit( - user: User, - admin_access_token: str, - charm_state: CharmState -) → None -``` - -Override user's rate limit. - - - -**Args:** - - - `user`: user to be used for requesting access token. - - `admin_access_token`: server admin access token to be used. - - `charm_state`: Instance of CharmState. - - ---- - - - -## function `get_room_id` - -```python -get_room_id(room_name: str, admin_access_token: str) → Optional[str] -``` - -Get room id. - - - -**Args:** - - - `room_name`: room name. - - `admin_access_token`: server admin access token to be used. - - - -**Returns:** - The room id. - - - -**Raises:** - - - `GetRoomIDError`: if there was an error while getting room id. - - ---- - - - -## function `deactivate_user` - -```python -deactivate_user(user: User, server: str, admin_access_token: str) → None -``` - -Deactivate user. - - - -**Args:** - - - `user`: user to be deactivated. - - `server`: to be used to create the user id. - - `admin_access_token`: server admin access token to be used. - - ---- - - - -## function `create_management_room` - -```python -create_management_room(admin_access_token: str) → str -``` - -Create the management room to be used by Mjolnir. - - - -**Args:** - - - `admin_access_token`: server admin access token to be used. - - - -**Raises:** - - - `GetRoomIDError`: if there was an error while getting room id. - - - -**Returns:** - Room id. - - ---- - - - -## function `make_room_admin` - -```python -make_room_admin( - user: User, - server: str, - admin_access_token: str, - room_id: str -) → None -``` - -Make user a room's admin. - - - -**Args:** - - - `user`: user to add to the room as admin. - - `server`: to be used to create the user id. - - `admin_access_token`: server admin access token to be used for the request. - - `room_id`: room id to add the user. - - ---- - - - -## function `promote_user_admin` - -```python -promote_user_admin(user: User, server: str, admin_access_token: str) → None -``` - -Promote user to admin. - - - -**Args:** - - - `user`: user to be promoted to admin. - - `server`: to be used to promote the user id. - - `admin_access_token`: server admin access token to be used. - - ---- - - - -## function `is_token_valid` - -```python -is_token_valid(access_token: str) → bool -``` - -Check if the access token is valid making a request to whoami. - - - -**Args:** - - - `access_token`: server access token to be used. - - - -**Returns:** - If the token is valid or not. - - ---- - - - -## class `APIError` -Exception raised when something fails while calling the API. - -Attrs: msg (str): Explanation of the error. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `NetworkError` -Exception raised when requesting API fails due network issues. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `UnauthorizedError` -Exception raised when requesting API fails due to unauthorized access. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `GetNonceError` -Exception raised when getting nonce fails. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `GetVersionError` -Exception raised when getting version fails. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `VersionUnexpectedContentError` -Exception raised when output of getting version is unexpected. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `GetRoomIDError` -Exception raised when getting room id fails. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `GetUserIDError` -Exception raised when getting user id fails. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `UserExistsError` -Exception raised when checking if user exists fails. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `GetAccessTokenError` -Exception raised when getting access token fails. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - ---- - - - -## class `RegisterUserError` -Exception raised when registering user fails. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the APIError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - diff --git a/src-docs/charm.py.md b/src-docs/charm.py.md deleted file mode 100644 index a317a37b..00000000 --- a/src-docs/charm.py.md +++ /dev/null @@ -1,275 +0,0 @@ - - - - -# module `charm.py` -Charm for Synapse on kubernetes. - -**Global Variables** ---------------- -- **MAIN_UNIT_ID** - - ---- - -## class `SynapseCharm` -Charm the service. - -Attrs: on: listen to Redis events. - - - -### function `__init__` - -```python -__init__(*args: Any) → None -``` - -Construct. - - - -**Args:** - - - `args`: class arguments. - - ---- - -#### property app - -Application that this unit is part of. - ---- - -#### property charm_dir - -Root directory of the charm as it is running. - ---- - -#### property config - -A mapping containing the charm's config and current values. - ---- - -#### property meta - -Metadata of this charm. - ---- - -#### property model - -Shortcut for more simple access the model. - ---- - -#### property unit - -Unit that this execution is responsible for. - - - ---- - - - -### function `build_charm_state` - -```python -build_charm_state() → CharmState -``` - -Build charm state. - - - -**Returns:** - The current charm state. - ---- - - - -### function `get_main_unit` - -```python -get_main_unit() → Optional[str] -``` - -Get main unit. - - - -**Returns:** - main unit if main unit exists in peer relation data. - ---- - - - -### function `get_main_unit_address` - -```python -get_main_unit_address() → str -``` - -Get main unit address. If main unit is None, use unit name. - - - -**Returns:** - main unit address as unit-0.synapse-endpoints. - ---- - - - -### function `get_signing_key` - -```python -get_signing_key() → Optional[str] -``` - -Get signing key from secret. - - - -**Returns:** - Signing key as string or None if not found. - ---- - - - -### function `get_unit_number` - -```python -get_unit_number(unit_name: str = '') → str -``` - -Get unit number from unit name. - - - -**Args:** - - - `unit_name`: unit name or address. E.g.: synapse/0 or synapse-0.synapse-endpoints. - - - -**Returns:** - - - `Unit number. E.g.`: 0 - ---- - - - -### function `instance_map` - -```python -instance_map() → Optional[Dict] -``` - -Build instance_map config. - - - -**Returns:** - Instance map configuration as a dict or None if there is only one unit. - ---- - - - -### function `is_main` - -```python -is_main() → bool -``` - -Verify if this unit is the main. - - - -**Returns:** - - - `bool`: true if is the main unit. - ---- - - - -### function `peer_units_total` - -```python -peer_units_total() → int -``` - -Get peer units total. - - - -**Returns:** - total of units in peer relation or None if there is no peer relation. - ---- - - - -### function `reconcile` - -```python -reconcile(charm_state: CharmState) → None -``` - -Reconcile Synapse configuration with charm state. - -This is the main entry for changes that require a restart. - - - -**Args:** - - - `charm_state`: Instance of CharmState - ---- - - - -### function `set_main_unit` - -```python -set_main_unit(unit: str) → None -``` - -Create/Renew an admin access token and put it in the peer relation. - - - -**Args:** - - - `unit`: Unit to be the main. - ---- - - - -### function `set_signing_key` - -```python -set_signing_key(signing_key: str) → None -``` - -Create secret with signing key content. - - - -**Args:** - - - `signing_key`: signing key as string. - - diff --git a/src-docs/charm_state.py.md b/src-docs/charm_state.py.md index 112b933a..60795c90 100644 --- a/src-docs/charm_state.py.md +++ b/src-docs/charm_state.py.md @@ -168,13 +168,13 @@ State of the Charm. - `synapse_config`: synapse configuration. - `datasource`: datasource information. - - `irc_bridge_datasource`: irc bridge datasource information. - `saml_config`: saml configuration. - `smtp_config`: smtp configuration. - `media_config`: media configuration. - `redis_config`: redis configuration. - `proxy`: proxy information. - `instance_map_config`: Instance map configuration with main and worker addresses. + - `registration_secrets`: Registration secrets received via matrix-auth integration. --- @@ -192,7 +192,7 @@ Get charm proxy information from juju charm environment. --- - + ### classmethod `from_charm` @@ -200,12 +200,12 @@ Get charm proxy information from juju charm environment. from_charm( charm: CharmBase, datasource: Optional[DatasourcePostgreSQL], - irc_bridge_datasource: Optional[DatasourcePostgreSQL], saml_config: Optional[SAMLConfiguration], smtp_config: Optional[SMTPConfiguration], media_config: Optional[MediaConfiguration], redis_config: Optional[RedisConfiguration], - instance_map_config: Optional[Dict] + instance_map_config: Optional[Dict], + registration_secrets: Optional[List] ) → CharmState ``` @@ -217,12 +217,12 @@ Initialize a new instance of the CharmState class from the associated charm. - `charm`: The charm instance associated with this state. - `datasource`: datasource information to be used by Synapse. - - `irc_bridge_datasource`: irc bridge datasource information to be used by Synapse. - `saml_config`: saml configuration to be used by Synapse. - `smtp_config`: SMTP configuration to be used by Synapse. - `media_config`: Media configuration to be used by Synapse. - `redis_config`: Redis configuration to be used by Synapse. - `instance_map_config`: Instance map configuration with main and worker addresses. + - `registration_secrets`: Registration secrets received via matrix-auth integration. Return: The CharmState instance created by the provided charm. @@ -281,18 +281,22 @@ Represent Synapse builtin configuration values. **Attributes:** - `allow_public_rooms_over_federation`: allow_public_rooms_over_federation config. + - `block_non_admin_invites`: block_non_admin_invites config. - `enable_email_notifs`: enable_email_notifs config. - - `enable_irc_bridge`: creates a registration file in Synapse and starts an irc bridge app. - - `enable_irc_ident`: starts an ident server for the IRC bridge. - - `irc_bridge_admins`: a comma separated list of user IDs who are admins of the IRC bridge. - `enable_mjolnir`: enable_mjolnir config. - `enable_password_config`: enable_password_config config. - `enable_room_list_search`: enable_room_list_search config. - `federation_domain_whitelist`: federation_domain_whitelist config. + - `invite_checker_blocklist_allowlist_url`: invite_checker_blocklist_allowlist_url config. + - `invite_checker_policy_rooms`: invite_checker_policy_rooms config. - `ip_range_whitelist`: ip_range_whitelist config. + - `limit_remote_rooms_complexity`: limit_remote_rooms_complexity config. - `notif_from`: defines the "From" address to use when sending emails. - `public_baseurl`: public_baseurl config. - `publish_rooms_allowlist`: publish_rooms_allowlist config. + - `experimental_alive_check`: experimental_alive_check config. + - `rc_joins_remote_burst_count`: rc_join burst_count config. + - `rc_joins_remote_per_second`: rc_join per_second config. - `report_stats`: report_stats config. - `server_name`: server_name config. - `trusted_key_servers`: trusted_key_servers config. @@ -303,7 +307,7 @@ Represent Synapse builtin configuration values. --- - + ### classmethod `get_default_notif_from` @@ -327,7 +331,65 @@ Set server_name as default value to notif_from. --- - + + +### classmethod `roomids_to_list` + +```python +roomids_to_list(value: str) → List[str] +``` + +Convert a comma separated list of rooms to list. + + + +**Args:** + + - `value`: the input value. + + + +**Returns:** + The string converted to list. + + + +**Raises:** + + - `ValidationError`: if rooms is not as expected. + +--- + + + +### classmethod `to_pebble_check` + +```python +to_pebble_check(value: str) → Dict[str, Union[str, int]] +``` + +Convert the experimental_alive_check field to pebble check. + + + +**Args:** + + - `value`: the input value. + + + +**Returns:** + The pebble check. + + + +**Raises:** + + - `ValidationError`: if experimental_alive_check is invalid. + +--- + + ### classmethod `to_yes_or_no` @@ -350,7 +412,7 @@ Convert the report_stats field to yes or no. --- - + ### classmethod `userids_to_list` diff --git a/src-docs/database_observer.py.md b/src-docs/database_observer.py.md deleted file mode 100644 index b2931bef..00000000 --- a/src-docs/database_observer.py.md +++ /dev/null @@ -1,76 +0,0 @@ - - - - -# module `database_observer.py` -The Database agent relation observer. - - - ---- - -## class `DatabaseObserver` -The Database relation observer. - - - -### function `__init__` - -```python -__init__(charm: CharmBaseWithState, relation_name: str) → None -``` - -Initialize the observer and register event handlers. - - - -**Args:** - - - `charm`: The parent charm to attach the observer to. - - `relation_name`: The name of the relation to observe. - - ---- - -#### property model - -Shortcut for more simple access the model. - - - ---- - - - -### function `get_charm` - -```python -get_charm() → CharmBaseWithState -``` - -Return the current charm. - - - -**Returns:** - The current charm - ---- - - - -### function `get_relation_as_datasource` - -```python -get_relation_as_datasource() → Optional[DatasourcePostgreSQL] -``` - -Get database data from relation. - - - -**Returns:** - - - `Dict`: Information needed for setting environment variables. - - diff --git a/src-docs/irc_bridge.py.md b/src-docs/irc_bridge.py.md deleted file mode 100644 index f1978fb8..00000000 --- a/src-docs/irc_bridge.py.md +++ /dev/null @@ -1,61 +0,0 @@ - - - - -# module `irc_bridge.py` -Provide the IRC bridge class to represent the matrix-appservice-app plugin for Synapse. - -**Global Variables** ---------------- -- **IRC_SERVICE_NAME** - ---- - - - -## function `enable_irc_bridge` - -```python -enable_irc_bridge(charm_state: CharmState, container: Container) → None -``` - -Enable irc service. - -The required steps to enable the IRC bridge are: - - Create the IRC bridge configuration file. - - Generate a PEM file for the IRC bridge. - - Add the IRC bridge application in the homeserver config. - - - -**Args:** - - - `charm_state`: Instance of CharmState. - - `container`: The container to enable the IRC bridge in. - - ---- - -## class `PEMCreateError` -An exception raised when the PEM file creation fails. - - - -### function `__init__` - -```python -__init__(message: str) -``` - -Initialize a new instance of the PEMCreateError class. - - - -**Args:** - - - `message`: The error message. - - - - - diff --git a/src-docs/matrix_auth_observer.py.md b/src-docs/matrix_auth_observer.py.md new file mode 100644 index 00000000..8bd51a21 --- /dev/null +++ b/src-docs/matrix_auth_observer.py.md @@ -0,0 +1,92 @@ + + + + +# module `matrix_auth_observer.py` +The Matrix Auth relation observer. + + + +--- + +## class `MatrixAuthObserver` +The Matrix Auth relation observer. + + + +### function `__init__` + +```python +__init__(charm: CharmBaseWithState) +``` + +Initialize the observer and register event handlers. + + + +**Args:** + + - `charm`: The parent charm to attach the observer to. + + +--- + +#### property model + +Shortcut for more simple access the model. + + + +--- + + + +### function `get_charm` + +```python +get_charm() → CharmBaseWithState +``` + +Return the current charm. + + + +**Returns:** + The current charm + +--- + + + +### function `get_requirer_registration_secrets` + +```python +get_requirer_registration_secrets() → Optional[List] +``` + +Get requirers registration secrets (application services). + + + +**Returns:** + dict with filepath and content for creating the secret files. + +--- + + + +### function `update_matrix_auth_integration` + +```python +update_matrix_auth_integration(charm_state: CharmState) → None +``` + +Update matrix auth integration relation data. + + + +**Args:** + + - `charm_state`: The charm state. + + diff --git a/src-docs/mjolnir.py.md b/src-docs/mjolnir.py.md index 213e656c..40b0ab8d 100644 --- a/src-docs/mjolnir.py.md +++ b/src-docs/mjolnir.py.md @@ -46,7 +46,7 @@ Shortcut for more simple access the model. --- - + ### function `enable_mjolnir` @@ -94,7 +94,7 @@ Return the current charm. --- - + ### function `get_membership_room_id` diff --git a/src-docs/observability.py.md b/src-docs/observability.py.md index b1dc108b..1b91be1d 100644 --- a/src-docs/observability.py.md +++ b/src-docs/observability.py.md @@ -16,7 +16,7 @@ Provide the Observability class to represent the observability stack for Synapse ## class `Observability` A class representing the observability stack for Synapse application. - + ### function `__init__` @@ -35,22 +35,4 @@ Initialize a new instance of the Observability class. ---- - - - -### function `update_targets` - -```python -update_targets(targets: List[str]) → None -``` - -Update prometheus targets. - - - -**Args:** - - - `targets`: new target list. - diff --git a/src-docs/pebble.py.md b/src-docs/pebble.py.md index 44466753..afa27bfc 100644 --- a/src-docs/pebble.py.md +++ b/src-docs/pebble.py.md @@ -8,18 +8,25 @@ Class to interact with pebble. **Global Variables** --------------- - **STATS_EXPORTER_SERVICE_NAME** +- **MAS_CONFIGURATION_PATH** --- - + -## function `check_synapse_ready` +## function `check_synapse_alive` ```python -check_synapse_ready() → CheckDict +check_synapse_alive(charm_state: CharmState) → CheckDict ``` -Return the Synapse container ready check. +Return the Synapse container alive check. + + + +**Args:** + + - `charm_state`: Instance of CharmState. @@ -30,15 +37,15 @@ Return the Synapse container ready check. --- - + -## function `check_synapse_alive` +## function `check_synapse_ready` ```python -check_synapse_alive() → CheckDict +check_synapse_ready() → CheckDict ``` -Return the Synapse container alive check. +Return the Synapse container ready check. @@ -49,7 +56,7 @@ Return the Synapse container alive check. --- - + ## function `restart_synapse` @@ -76,7 +83,7 @@ This will force a restart even if its plan hasn't changed. --- - + ## function `check_nginx_ready` @@ -95,7 +102,7 @@ Return the Synapse NGINX container check. --- - + ## function `check_mjolnir_ready` @@ -114,46 +121,47 @@ Return the Synapse Mjolnir service check. --- - + -## function `check_irc_bridge_ready` +## function `restart_nginx` ```python -check_irc_bridge_ready() → CheckDict +restart_nginx(container: Container, main_unit_address: str) → None ``` -Return the Synapse IRC bridge service check. +Restart Synapse NGINX service and regenerate configuration. -**Returns:** +**Args:** - - `Dict`: check object converted to its dict representation. + - `container`: Charm container. + - `main_unit_address`: Main unit address to be used in configuration. --- - + -## function `restart_nginx` +## function `restart_federation_sender` ```python -restart_nginx(container: Container, main_unit_address: str) → None +restart_federation_sender(container: Container, charm_state: CharmState) → None ``` -Restart Synapse NGINX service and regenerate configuration. +Restart Synapse federation sender service and regenerate configuration. **Args:** - `container`: Charm container. - - `main_unit_address`: Main unit address to be used in configuration. + - `charm_state`: Instance of CharmState. --- - + ## function `replan_mjolnir` @@ -172,34 +180,38 @@ Replan Synapse Mjolnir service. --- - + -## function `replan_irc_bridge` +## function `replan_stats_exporter` ```python -replan_irc_bridge(container: Container) → None +replan_stats_exporter(container: Container, charm_state: CharmState) → None ``` -Replan Synapse IRC bridge service. +Replan Synapse StatsExporter service. **Args:** - `container`: Charm container. + - `charm_state`: Instance of CharmState. --- - + -## function `replan_stats_exporter` +## function `replan_synapse_federation_sender` ```python -replan_stats_exporter(container: Container, charm_state: CharmState) → None +replan_synapse_federation_sender( + container: Container, + charm_state: CharmState +) → None ``` -Replan Synapse StatsExporter service. +Replan Synapse Federation Sender service. @@ -211,13 +223,14 @@ Replan Synapse StatsExporter service. --- - + ## function `reconcile` ```python reconcile( charm_state: CharmState, + rendered_mas_configuration: str, container: Container, is_main: bool = True, unit_number: str = '' @@ -233,38 +246,13 @@ This is the main entry for changes that require a restart done via Pebble. **Args:** - `charm_state`: Instance of CharmState + - `rendered_mas_configuration`: Rendered MAS yaml configuration. - `container`: Charm container. - `is_main`: if unit is main. - `unit_number`: unit number id to set the worker name. -**Raises:** - - - `PebbleServiceError`: if something goes wrong while interacting with Pebble. - - ---- - - - -## function `reset_instance` - -```python -reset_instance(charm_state: CharmState, container: Container) → None -``` - -Reset instance. - - - -**Args:** - - - `charm_state`: Instance of CharmState - - `container`: Charm container. - - - **Raises:** - `PebbleServiceError`: if something goes wrong while interacting with Pebble. @@ -277,7 +265,7 @@ Exception raised when something fails while interacting with Pebble. Attrs: msg (str): Explanation of the error. - + ### function `__init__` diff --git a/src-docs/register_user.md b/src-docs/register_user.md deleted file mode 100644 index 13b93aaa..00000000 --- a/src-docs/register_user.md +++ /dev/null @@ -1,77 +0,0 @@ - - - - -# module `register_user` -Module to interact with Register User action. - - ---- - - - -## function `register_user` - -```python -register_user( - container: Container, - username: str, - admin: bool, - admin_access_token: Optional[str] = None, - server: str = '' -) → User -``` - -Run register user action. - - - -**Args:** - - - `container`: Container of the charm. - - `username`: username to be registered. - - `admin`: if user is admin. - - `server`: to be used to create the user id. - - `admin_access_token`: server admin access token to get user's access token if it exists. - - - -**Raises:** - - - `RegisterUserError`: if something goes wrong while registering the user. - - - -**Returns:** - User with password registered. - - ---- - - - -## class `RegisterUserError` -Exception raised when something fails while running register-user. - -Attrs: msg (str): Explanation of the error. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the RegisterUserError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - diff --git a/src-docs/reset_instance.md b/src-docs/reset_instance.md deleted file mode 100644 index 5c604f9a..00000000 --- a/src-docs/reset_instance.md +++ /dev/null @@ -1,68 +0,0 @@ - - - - -# module `reset_instance` -Module to interact with Reset Instance action. - - ---- - - - -## function `reset_instance` - -```python -reset_instance( - container: Container, - charm_state: CharmState, - datasource: Optional[DatasourcePostgreSQL] -) → None -``` - -Run reset instance action. - - - -**Args:** - - - `container`: Container of the charm. - - `charm_state`: charm state from the charm. - - `datasource`: datasource to interact with the database. - - - -**Raises:** - - - `ResetInstanceError`: if something goes wrong while resetting the instance. - - ---- - - - -## class `ResetInstanceError` -Exception raised when something fails while running reset-instance. - -Attrs: msg (str): Explanation of the error. - - - -### method `__init__` - -```python -__init__(msg: str) -``` - -Initialize a new instance of the ResetInstanceError exception. - - - -**Args:** - - - `msg` (str): Explanation of the error. - - - - - diff --git a/src-docs/saml_observer.py.md b/src-docs/saml_observer.py.md index 49f5a031..1fb45d9b 100644 --- a/src-docs/saml_observer.py.md +++ b/src-docs/saml_observer.py.md @@ -12,7 +12,7 @@ The SAML integrator relation observer. ## class `SAMLObserver` The SAML Integrator relation observer. - + ### function `__init__` @@ -39,7 +39,7 @@ Shortcut for more simple access the model. --- - + ### function `get_charm` @@ -56,7 +56,7 @@ Return the current charm. --- - + ### function `get_relation_as_saml_conf` diff --git a/src-docs/user.py.md b/src-docs/user.py.md index a89c1c00..b5db2c99 100644 --- a/src-docs/user.py.md +++ b/src-docs/user.py.md @@ -21,7 +21,7 @@ Synapse user. - `password`: users password. - `access_token`: obtained when the user is registered. - + ### function `__init__` @@ -43,7 +43,7 @@ Initialize the User. --- - + ### classmethod `username_must_not_be_empty` diff --git a/src/actions/__init__.py b/src/actions/__init__.py index ec86e6eb..389830fa 100644 --- a/src/actions/__init__.py +++ b/src/actions/__init__.py @@ -1,8 +1,7 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Actions package is used to run actions provided by the charm.""" # Exporting methods to be used for another modules from .register_user import RegisterUserError, register_user # noqa: F401 -from .reset_instance import ResetInstanceError, reset_instance # noqa: F401 diff --git a/src/actions/register_user.py b/src/actions/register_user.py index 2781ca34..bd391069 100644 --- a/src/actions/register_user.py +++ b/src/actions/register_user.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Module to interact with Register User action.""" @@ -11,7 +11,7 @@ import ops # pydantic is causing this no-name-in-module problem -from pydantic import ValidationError # pylint: disable=no-name-in-module,import-error +from pydantic.v1 import ValidationError # pylint: disable=no-name-in-module,import-error import synapse from user import User diff --git a/src/actions/reset_instance.py b/src/actions/reset_instance.py deleted file mode 100644 index 7cf93985..00000000 --- a/src/actions/reset_instance.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Module to interact with Reset Instance action.""" - -import logging -import typing - -import ops -import psycopg2 - -import synapse -from charm_state import CharmState -from database_client import DatabaseClient, DatasourcePostgreSQL - -logger = logging.getLogger(__name__) - - -class ResetInstanceError(Exception): - """Exception raised when something fails while running reset-instance. - - Attrs: - msg (str): Explanation of the error. - """ - - def __init__(self, msg: str): - """Initialize a new instance of the ResetInstanceError exception. - - Args: - msg (str): Explanation of the error. - """ - self.msg = msg - - -def reset_instance( - container: ops.Container, - charm_state: CharmState, - datasource: typing.Optional[DatasourcePostgreSQL], -) -> None: - """Run reset instance action. - - Args: - container: Container of the charm. - charm_state: charm state from the charm. - datasource: datasource to interact with the database. - - Raises: - ResetInstanceError: if something goes wrong while resetting the instance. - """ - try: - if datasource is not None: - logger.info("Erase Synapse database") - # Connecting to template1 to make it possible to erase the database. - # Otherwise PostgreSQL will prevent it if there are open connections. - db_client = DatabaseClient(datasource=datasource, alternative_database="template1") - db_client.erase() - synapse.execute_migrate_config(container=container, charm_state=charm_state) - except (psycopg2.Error, synapse.WorkloadError) as exc: - raise ResetInstanceError(str(exc)) from exc diff --git a/src/admin_access_token.py b/src/admin_access_token.py index 2981dfb8..3de1fe23 100644 --- a/src/admin_access_token.py +++ b/src/admin_access_token.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. # While this is a refactor,is expected to have few public methods. diff --git a/src/backup.py b/src/backup.py index 4c52f4ae..130abb1c 100644 --- a/src/backup.py +++ b/src/backup.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Provides backup functionality for Synapse.""" @@ -454,8 +454,9 @@ def _build_backup_command( paths = _paths_to_args(backup_paths) tar_command = f"tar -c {paths}" gpg_command = ( - f"gpg --batch --no-symkey-cache --passphrase-file '{passphrase_file}' --symmetric" + f"gpg --batch --no-symkey-cache " f"--passphrase-file '{passphrase_file}' " f"--symmetric" ) + s3_url = _s3_path( prefix=s3_parameters.path, object_name=backup_id, bucket=s3_parameters.bucket ) diff --git a/src/backup_observer.py b/src/backup_observer.py index 24a9871b..1c2c3e7b 100644 --- a/src/backup_observer.py +++ b/src/backup_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """S3 Backup relation observer for Synapse.""" diff --git a/src/charm.py b/src/charm.py index cc0179e6..2073472e 100755 --- a/src/charm.py +++ b/src/charm.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Charm for Synapse on kubernetes.""" @@ -13,9 +13,9 @@ import ops from charms.nginx_ingress_integrator.v0.nginx_route import require_nginx_route from charms.redis_k8s.v0.redis import RedisRelationCharmEvents -from charms.traefik_k8s.v1.ingress import IngressPerAppRequirer +from charms.traefik_k8s.v2.ingress import IngressPerAppRequirer +from ops import main from ops.charm import ActionEvent, RelationDepartedEvent -from ops.main import main import actions import pebble @@ -24,6 +24,7 @@ from backup_observer import BackupObserver from charm_state import CharmBaseWithState, CharmState, inject_charm_state from database_observer import DatabaseObserver +from matrix_auth_observer import MatrixAuthObserver from media_observer import MediaObserver from mjolnir import Mjolnir from observability import Observability @@ -35,6 +36,7 @@ logger = logging.getLogger(__name__) MAIN_UNIT_ID = "main_unit_id" +INGRESS_INTEGRATION_NAME = "ingress" class SynapseCharm(CharmBaseWithState): @@ -57,9 +59,9 @@ def __init__(self, *args: typing.Any) -> None: """ super().__init__(*args) self._backup = BackupObserver(self) + self._matrix_auth = MatrixAuthObserver(self) self._media = MediaObserver(self) self._database = DatabaseObserver(self, relation_name=synapse.SYNAPSE_DB_RELATION_NAME) - self._irc_bridge_database = DatabaseObserver(self, relation_name="irc-bridge-database") self._saml = SAMLObserver(self) self._smtp = SMTPObserver(self) self._redis = RedisObserver(self) @@ -74,13 +76,9 @@ def __init__(self, *args: typing.Any) -> None: service_port=synapse.SYNAPSE_NGINX_PORT, ) self._ingress = IngressPerAppRequirer( - self, + charm=self, + relation_name=INGRESS_INTEGRATION_NAME, port=synapse.SYNAPSE_NGINX_PORT, - # We're forced to use the app's service endpoint - # as the ingress per app interface currently always routes to the leader. - # https://github.com/canonical/traefik-k8s-operator/issues/159 - host=f"{self.app.name}-endpoints.{self.model.name}.svc.cluster.local", - strip_prefix=True, ) self._observability = Observability(self) self._mjolnir = Mjolnir(self, token_service=self.token_service) @@ -93,11 +91,7 @@ def __init__(self, *args: typing.Any) -> None: self.framework.observe( self.on[synapse.SYNAPSE_PEER_RELATION_NAME].relation_changed, self._on_relation_changed ) - self.framework.observe(self.on.reset_instance_action, self._on_reset_instance_action) self.framework.observe(self.on.synapse_pebble_ready, self._on_synapse_pebble_ready) - self.framework.observe( - self.on.synapse_nginx_pebble_ready, self._on_synapse_nginx_pebble_ready - ) self.framework.observe(self.on.register_user_action, self._on_register_user_action) self.framework.observe( self.on.promote_user_admin_action, self._on_promote_user_admin_action @@ -113,11 +107,11 @@ def build_charm_state(self) -> CharmState: return CharmState.from_charm( charm=self, datasource=self._database.get_relation_as_datasource(), - irc_bridge_datasource=self._irc_bridge_database.get_relation_as_datasource(), saml_config=self._saml.get_relation_as_saml_conf(), smtp_config=self._smtp.get_relation_as_smtp_conf(), media_config=self._media.get_relation_as_media_conf(), redis_config=self._redis.get_relation_as_redis_conf(), + registration_secrets=self._matrix_auth.get_requirer_registration_secrets(), instance_map_config=self.instance_map(), ) @@ -174,7 +168,10 @@ def instance_map(self) -> typing.Optional[typing.Dict]: address = f"{unit_name}.{app_name}-endpoints" addresses.append(address) logger.debug("addresses values are: %s", str(addresses)) - instance_map = {"main": {"host": self.get_main_unit_address(), "port": 8034}} + instance_map = { + "main": {"host": self.get_main_unit_address(), "port": 8035}, + "federationsender1": {"host": self.get_main_unit_address(), "port": 8034}, + } for address in addresses: match = re.search(r"-(\d+)", address) # A Juju unit name is s always named on the @@ -200,22 +197,13 @@ def reconcile(self, charm_state: CharmState) -> None: if self.get_main_unit() is None and self.unit.is_leader(): logging.debug("Change_config is setting main unit.") self.set_main_unit(self.unit.name) - # Reconciling prometheus targets - targets = [ - f"*:{synapse.PROMETHEUS_MAIN_TARGET_PORT}", - f"*:{synapse.STATS_EXPORTER_PORT}", - ] - if not self.is_main(): - targets = [ - f"*:{synapse.PROMETHEUS_WORKER_TARGET_PORT}", - ] - self._observability.update_targets(targets) container = self.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME) if not container.can_connect(): self.unit.status = ops.MaintenanceStatus("Waiting for Synapse pebble") return self.model.unit.status = ops.MaintenanceStatus("Configuring Synapse") try: + # check signing key signing_key_path = f"/data/{charm_state.synapse_config.server_name}.signing.key" signing_key_from_secret = self.get_signing_key() if signing_key_from_secret: @@ -223,17 +211,26 @@ def reconcile(self, charm_state: CharmState) -> None: container.push( signing_key_path, signing_key_from_secret, make_dirs=True, encoding="utf-8" ) + + # reconcile configuration pebble.reconcile( charm_state, container, is_main=self.is_main(), unit_number=self.get_unit_number() ) + + # create new signing key if needed if self.is_main() and not signing_key_from_secret: logger.debug("Signing key secret not found, creating secret") with container.pull(signing_key_path) as f: signing_key = f.read() self.set_signing_key(signing_key.rstrip()) + + # update matrix-auth integration with configuration data + if self.unit.is_leader(): + self._matrix_auth.update_matrix_auth_integration(charm_state) except (pebble.PebbleServiceError, FileNotFoundError) as exc: self.model.unit.status = ops.BlockedStatus(str(exc)) return + pebble.restart_nginx(container, self.get_main_unit_address()) self._set_unit_status() def _set_unit_status(self) -> None: @@ -260,10 +257,6 @@ def _set_unit_status(self) -> None: self.unit.status = ops.MaintenanceStatus("Waiting for Synapse") return # NGINX checks - container = self.unit.get_container(synapse.SYNAPSE_NGINX_CONTAINER_NAME) - if not container.can_connect(): - self.unit.status = ops.MaintenanceStatus("Waiting for Synapse NGINX pebble") - return nginx_service = container.get_services(synapse.SYNAPSE_NGINX_SERVICE_NAME) nginx_not_active = [ service for service in nginx_service.values() if not service.is_running() @@ -485,71 +478,12 @@ def _on_relation_changed(self, _: ops.HookEvent, charm_state: CharmState) -> Non """ logger.debug("_on_relation_changed emitting reconcile") self.reconcile(charm_state) - # Reload NGINX configuration with new main address - nginx_container = self.unit.get_container(synapse.SYNAPSE_NGINX_CONTAINER_NAME) - if not nginx_container.can_connect(): - logger.warning( - "Relation changed received but NGINX container is not available for reloading." - ) - return - pebble.restart_nginx(nginx_container, self.get_main_unit_address()) - - def _on_synapse_nginx_pebble_ready(self, _: ops.HookEvent) -> None: - """Handle synapse nginx pebble ready event.""" - container = self.unit.get_container(synapse.SYNAPSE_NGINX_CONTAINER_NAME) - if not container.can_connect(): - logger.debug("synapse_nginx_pebble_ready failed to connect") - self.unit.status = ops.MaintenanceStatus("Waiting for Synapse NGINX pebble") - return - logger.debug("synapse_nginx_pebble_ready replanning nginx") - # Replan pebble layer - pebble.restart_nginx(container, self.get_main_unit_address()) - self._set_unit_status() - - @inject_charm_state - def _on_reset_instance_action(self, event: ActionEvent, charm_state: CharmState) -> None: - """Reset instance and report action result. - - Args: - event: Event triggering the reset instance action. - charm_state: The charm state. - """ - results = { - "reset-instance": False, - } - if not self.model.unit.is_leader(): - event.fail("Only the juju leader unit can run reset instance action") - return - container = self.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME) - if not container.can_connect(): - event.fail("Failed to connect to the container") - return - try: - self.model.unit.status = ops.MaintenanceStatus("Resetting Synapse instance") - try: - container.stop(pebble.STATS_EXPORTER_SERVICE_NAME) - except (ops.pebble.Error, RuntimeError) as e: - event.fail(f"Failed to stop Synapse Stats Exporter: {str(e)}") - pebble.reset_instance(charm_state, container) - datasource = self._database.get_relation_as_datasource() - actions.reset_instance( - container=container, charm_state=charm_state, datasource=datasource - ) - logger.info("Start Synapse") - pebble.restart_synapse(charm_state, container, self.is_main()) - results["reset-instance"] = True - except (pebble.PebbleServiceError, actions.ResetInstanceError) as exc: - self.model.unit.status = ops.BlockedStatus(str(exc)) - event.fail(str(exc)) - return - event.set_results(results) - self.model.unit.status = ops.ActiveStatus() def _on_register_user_action(self, event: ActionEvent) -> None: - """Reset instance and report action result. + """Register user and report action result. Args: - event: Event triggering the reset instance action. + event: Event triggering the register user instance action. """ container = self.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME) if not container.can_connect(): diff --git a/src/charm_state.py b/src/charm_state.py index 1e28d1cf..013f8c8d 100644 --- a/src/charm_state.py +++ b/src/charm_state.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """State of the Charm.""" @@ -13,7 +13,7 @@ import ops # pydantic is causing this no-name-in-module problem -from pydantic import ( # pylint: disable=no-name-in-module,import-error +from pydantic.v1 import ( # pylint: disable=no-name-in-module,import-error AnyHttpUrl, BaseModel, Extra, @@ -160,18 +160,22 @@ class SynapseConfig(BaseModel): # pylint: disable=too-few-public-methods Attributes: allow_public_rooms_over_federation: allow_public_rooms_over_federation config. + block_non_admin_invites: block_non_admin_invites config. enable_email_notifs: enable_email_notifs config. - enable_irc_bridge: creates a registration file in Synapse and starts an irc bridge app. - enable_irc_ident: starts an ident server for the IRC bridge. - irc_bridge_admins: a comma separated list of user IDs who are admins of the IRC bridge. enable_mjolnir: enable_mjolnir config. enable_password_config: enable_password_config config. enable_room_list_search: enable_room_list_search config. federation_domain_whitelist: federation_domain_whitelist config. + invite_checker_blocklist_allowlist_url: invite_checker_blocklist_allowlist_url config. + invite_checker_policy_rooms: invite_checker_policy_rooms config. ip_range_whitelist: ip_range_whitelist config. + limit_remote_rooms_complexity: limit_remote_rooms_complexity config. notif_from: defines the "From" address to use when sending emails. public_baseurl: public_baseurl config. publish_rooms_allowlist: publish_rooms_allowlist config. + experimental_alive_check: experimental_alive_check config. + rc_joins_remote_burst_count: rc_join burst_count config. + rc_joins_remote_per_second: rc_join per_second config. report_stats: report_stats config. server_name: server_name config. trusted_key_servers: trusted_key_servers config. @@ -179,19 +183,24 @@ class SynapseConfig(BaseModel): # pylint: disable=too-few-public-methods """ allow_public_rooms_over_federation: bool = False + block_non_admin_invites: bool = False enable_email_notifs: bool = False - enable_irc_bridge: bool = False - enable_irc_ident: bool = False - irc_bridge_admins: str | None = Field(None) enable_mjolnir: bool = False enable_password_config: bool = True enable_room_list_search: bool = True + experimental_alive_check: str | None = Field(None) federation_domain_whitelist: str | None = Field(None) + invite_checker_blocklist_allowlist_url: str | None = Field(None) + invite_checker_policy_rooms: str | None = Field(None) ip_range_whitelist: str | None = Field(None, regex=r"^[\.:,/\d]+\d+(?:,[:,\d]+)*$") - public_baseurl: str | None = Field(None) + limit_remote_rooms_complexity: float | None = Field(None) + public_baseurl: str = Field(..., min_length=2) publish_rooms_allowlist: str | None = Field(None) + rc_joins_remote_burst_count: int | None = Field(None) + rc_joins_remote_per_second: float | None = Field(None) report_stats: str | None = Field(None) server_name: str = Field(..., min_length=2) + # notif_from should be after server_name because of how the validator is set. notif_from: str | None = Field(None) trusted_key_servers: str | None = Field( None, regex=r"^[A-Za-z0-9][A-Za-z0-9-.]*(?:,[A-Za-z0-9][A-Za-z0-9-.]*)*\.\D{2,4}$" @@ -241,7 +250,32 @@ def to_yes_or_no(cls, value: str) -> str: return "yes" return "no" - @validator("irc_bridge_admins", "publish_rooms_allowlist") + @validator("invite_checker_policy_rooms") + @classmethod + def roomids_to_list(cls, value: str) -> typing.List[str]: + """Convert a comma separated list of rooms to list. + + Args: + value: the input value. + + Returns: + The string converted to list. + + Raises: + ValidationError: if rooms is not as expected. + """ + # Based on documentation + # https://spec.matrix.org/v1.10/appendices/#user-identifiers + roomid_regex = r"![a-zA-Z0-9._=/+-]+:[a-zA-Z0-9-.]+" + if value is None: + return [] + value_list = ["!" + room_id.strip() for room_id in value.split(",")] + for room_id in value_list: + if not re.fullmatch(roomid_regex, room_id): + raise ValidationError(f"Invalid room ID format: {room_id}", cls) + return value_list + + @validator("publish_rooms_allowlist") @classmethod def userids_to_list(cls, value: str) -> typing.List[str]: """Convert a comma separated list of users to list. @@ -266,6 +300,45 @@ def userids_to_list(cls, value: str) -> typing.List[str]: raise ValidationError(f"Invalid user ID format: {user_id}", cls) return value_list + @validator("experimental_alive_check") + @classmethod + def to_pebble_check(cls, value: str) -> typing.Dict[str, typing.Union[str, int]]: + """Convert the experimental_alive_check field to pebble check. + + Args: + value: the input value. + + Returns: + The pebble check. + + Raises: + ValidationError: if experimental_alive_check is invalid. + """ + # expected + # period,threshold,timeout + config_values = value.split(",") + if len(config_values) != 3: + raise ValidationError( + f"Invalid experimental_alive_check, less or more than 3 values: {value}", cls + ) + try: + period = config_values[0].strip().lower() + if period[-1] not in ("s", "m", "h"): + raise ValidationError( + f"Invalid experimental_alive_check, period should end in s/m/h: {value}", cls + ) + threshold = int(config_values[1].strip()) + timeout = config_values[2].strip().lower() + if timeout[-1] not in ("s", "m", "h"): + raise ValidationError( + f"Invalid experimental_alive_check, timeout should end in s/m/h: {value}", cls + ) + return {"period": period, "threshold": threshold, "timeout": timeout} + except ValueError as exc: + raise ValidationError( + f"Invalid experimental_alive_check, threshold should be a number: {value}", cls + ) from exc + @dataclasses.dataclass(frozen=True) class CharmState: # pylint: disable=too-many-instance-attributes @@ -274,23 +347,23 @@ class CharmState: # pylint: disable=too-many-instance-attributes Attributes: synapse_config: synapse configuration. datasource: datasource information. - irc_bridge_datasource: irc bridge datasource information. saml_config: saml configuration. smtp_config: smtp configuration. media_config: media configuration. redis_config: redis configuration. proxy: proxy information. instance_map_config: Instance map configuration with main and worker addresses. + registration_secrets: Registration secrets received via matrix-auth integration. """ synapse_config: SynapseConfig datasource: typing.Optional[DatasourcePostgreSQL] - irc_bridge_datasource: typing.Optional[DatasourcePostgreSQL] saml_config: typing.Optional[SAMLConfiguration] smtp_config: typing.Optional[SMTPConfiguration] media_config: typing.Optional[MediaConfiguration] redis_config: typing.Optional[RedisConfiguration] instance_map_config: typing.Optional[typing.Dict] + registration_secrets: typing.Optional[typing.List] @property def proxy(self) -> "ProxyConfig": @@ -308,34 +381,31 @@ def proxy(self) -> "ProxyConfig": no_proxy=no_proxy, ) - # pylint: disable=too-many-arguments - # this either needs to be refactored or it's fine as is for now - # the disable stems from the additional datasoure for irc bridge - # and that might end up in a separate charm # from_charm receives configuration from all integration so too many arguments. @classmethod - def from_charm( # pylint: disable=too-many-arguments + # pylint: disable=too-many-arguments,too-many-positional-arguments,too-many-locals + def from_charm( cls, charm: ops.CharmBase, datasource: typing.Optional[DatasourcePostgreSQL], - irc_bridge_datasource: typing.Optional[DatasourcePostgreSQL], saml_config: typing.Optional[SAMLConfiguration], smtp_config: typing.Optional[SMTPConfiguration], media_config: typing.Optional[MediaConfiguration], redis_config: typing.Optional[RedisConfiguration], instance_map_config: typing.Optional[typing.Dict], + registration_secrets: typing.Optional[typing.List], ) -> "CharmState": """Initialize a new instance of the CharmState class from the associated charm. Args: charm: The charm instance associated with this state. datasource: datasource information to be used by Synapse. - irc_bridge_datasource: irc bridge datasource information to be used by Synapse. saml_config: saml configuration to be used by Synapse. smtp_config: SMTP configuration to be used by Synapse. media_config: Media configuration to be used by Synapse. redis_config: Redis configuration to be used by Synapse. instance_map_config: Instance map configuration with main and worker addresses. + registration_secrets: Registration secrets received via matrix-auth integration. Return: The CharmState instance created by the provided charm. @@ -344,16 +414,31 @@ def from_charm( # pylint: disable=too-many-arguments CharmConfigInvalidError: if the charm configuration is invalid. """ try: + # Compute public_baseurl if not configured + config = dict(charm.config.items()) + if not config.get("public_baseurl"): + # We use HTTPS here as it's the standard used by matrix-auth + public_baseurl = f"https://{config.get('server_name')}" + # We ignore protected-access here because we want to get the ingress url + # pylint: disable=protected-access + if ingress_url := charm._ingress.url: # type: ignore + public_baseurl = ingress_url + config["public_baseurl"] = public_baseurl + # ignoring because mypy fails with: # "has incompatible type "**dict[str, str]"; expected ..."" - valid_synapse_config = SynapseConfig(**dict(charm.config.items())) # type: ignore + valid_synapse_config = SynapseConfig(**config) # type: ignore + logger.info("parsed synapse config: %s", valid_synapse_config) # remove workers from instance_map if instance_map_config and valid_synapse_config.workers_ignore_list: logger.debug( "Removing %s from instance_map", valid_synapse_config.workers_ignore_list ) workers_to_ignore = [ - w.strip() for w in valid_synapse_config.workers_ignore_list.split(",") + # due to pydantic bump, need to refactor + # pylint: disable=no-member + w.strip() + for w in valid_synapse_config.workers_ignore_list.split(",") ] for worker in workers_to_ignore: if worker in instance_map_config: @@ -371,10 +456,10 @@ def from_charm( # pylint: disable=too-many-arguments return cls( synapse_config=valid_synapse_config, datasource=datasource, - irc_bridge_datasource=irc_bridge_datasource, saml_config=saml_config, smtp_config=smtp_config, media_config=media_config, redis_config=redis_config, instance_map_config=instance_map_config, + registration_secrets=registration_secrets, ) diff --git a/src/charm_types.py b/src/charm_types.py index 2b01c48e..810aea61 100644 --- a/src/charm_types.py +++ b/src/charm_types.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Type definitions for the Synapse charm.""" diff --git a/src/database_client.py b/src/database_client.py index 022a67e3..8f806cb7 100644 --- a/src/database_client.py +++ b/src/database_client.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """The DatabaseClient class.""" diff --git a/src/database_observer.py b/src/database_observer.py index 00e8c1d7..f24a9715 100644 --- a/src/database_observer.py +++ b/src/database_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. # Ignoring for the config change call diff --git a/src/exceptions.py b/src/exceptions.py index 41e88c5e..a33fe2bf 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Exceptions used by the Synapse charm.""" diff --git a/src/grafana_dashboards/synapse.json b/src/grafana_dashboards/synapse.json index 163b709c..3349dfed 100644 --- a/src/grafana_dashboards/synapse.json +++ b/src/grafana_dashboards/synapse.json @@ -439,96 +439,109 @@ } }, { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": "${prometheusds}", "fieldConfig": { "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "line+area" - } - }, - "links": [], - "mappings": [], - "max": 1.5, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "transparent", - "value": null - }, - { - "color": "red", - "value": 1 - } - ] - }, - "unit": "percentunit" + "displayName": "CPU usage", + "links": [] }, "overrides": [] }, + "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, "y": 10 }, + "hiddenSeries": false, "id": 75, + "legend": { + "avg": true, + "current": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 3, "links": [], + "nullPointMode": "null", "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "alertThreshold": true }, + "paceLength": 10, + "percentage": false, "pluginVersion": "9.5.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { "datasource": "${prometheusds}", - "editorMode": "code", - "expr": "rate(process_cpu_user_seconds_total{juju_application=\"synapse\"}[$__rate_interval]) + rate(process_cpu_system_seconds_total{juju_application=\"synapse\"}[$__rate_interval])", - "legendFormat": "__auto", - "range": true, + "expr": "rate(process_cpu_seconds_total{job=~\".*synapse_application.*\",juju_application=~\"$juju_application\",juju_model=~\"$juju_model\",juju_model_uuid=~\"$juju_model_uuid\",juju_unit=~\"$juju_unit\"}[$bucket_size])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} ", "refId": "A" } ], + "thresholds": [ + { + "$$hashKey": "object:566", + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 1, + "yaxis": "left" + } + ], + "timeRegions": [], "title": "CPU usage", - "type": "timeseries" + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:538", + "format": "percentunit", + "label": "", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:539", + "format": "short", + "label": "", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } }, { "datasource": "${prometheusds}", @@ -913,7 +926,7 @@ "uid": "${prometheusds}" }, "editorMode": "code", - "expr": "count_values(\"juju_unit\", up{juju_application=\"synapse\"}) by (juju_model)", + "expr": "count(up{juju_application=\"synapse\", job=~\".*prometheus_scrape_synapse_application.*\", juju_model=~\"$juju_model\"}) by (juju_model)", "legendFormat": "{{juju_model}}", "range": true, "refId": "A" diff --git a/src/grafana_dashboards/synapse_statistics.json b/src/grafana_dashboards/synapse_statistics.json index 6dd244f0..9dbba250 100644 --- a/src/grafana_dashboards/synapse_statistics.json +++ b/src/grafana_dashboards/synapse_statistics.json @@ -1,206 +1,417 @@ { - "__inputs": [ + "annotations": { + "list": [ { - "name": "prometheusds", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" + "builtIn": 1, + "datasource": "${prometheusds}", + "enable": false, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" } - ], - "__elements": {}, - "annotations": { - "list": [ + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 375, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prometheusds}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#5e2750", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisGridShow": true, + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "stepBefore", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.5.3", + "targets": [ { - "builtIn": 1, - "datasource": "${prometheusds}", - "enable": false, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "limit": 100, - "name": "Annotations & Alerts", - "showIn": 0, - "type": "dashboard" + "datasource": { + "type": "prometheus", + "uid": "${prometheusds}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "synapse_total_rooms", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" } - ] + ], + "title": "Rooms", + "type": "timeseries" }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${prometheusds}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" + { + "datasource": { + "type": "prometheus", + "uid": "${prometheusds}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#5e2750", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] + "lineInterpolation": "stepBefore", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "id": 1, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${prometheusds}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "synapse_total_rooms", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] } - ], - "title": "Rooms", - "type": "stat" + }, + "overrides": [] }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.5.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prometheusds}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "synapse_total_users{juju_application=~\"$juju_application\",juju_model=~\"$juju_model\",juju_model_uuid=~\"$juju_model_uuid\",juju_unit=~\"$juju_unit\"}", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Users", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [ + "matrix" + ], + "templating": { + "list": [ { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "hide": 0, + "includeAll": true, + "label": "Loki datasource", + "multi": true, + "name": "lokids", + "options": [], + "query": "loki", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "hide": 0, + "includeAll": true, + "label": "Prometheus datasource", + "multi": true, + "name": "prometheusds", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": { - "type": "prometheus", "uid": "${prometheusds}" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "id": 2, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${prometheusds}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "synapse_total_users", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" - } - ], - "title": "Users", - "type": "stat" + "definition": "label_values(up{juju_model=~\"$juju_model\",juju_model_uuid=~\"$juju_model_uuid\",juju_application=~\"$juju_application\"},juju_unit)", + "hide": 0, + "includeAll": true, + "label": "Juju unit", + "multi": true, + "name": "juju_unit", + "options": [], + "query": { + "query": "label_values(up{juju_model=~\"$juju_model\",juju_model_uuid=~\"$juju_model_uuid\",juju_application=~\"$juju_application\"},juju_unit)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "uid": "${prometheusds}" + }, + "definition": "label_values(up{juju_model=~\"$juju_model\",juju_model_uuid=~\"$juju_model_uuid\"},juju_application)", + "hide": 0, + "includeAll": true, + "label": "Juju application", + "multi": true, + "name": "juju_application", + "options": [], + "query": { + "query": "label_values(up{juju_model=~\"$juju_model\",juju_model_uuid=~\"$juju_model_uuid\"},juju_application)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "uid": "${prometheusds}" + }, + "definition": "label_values(up{juju_model=~\"$juju_model\"},juju_model_uuid)", + "hide": 0, + "includeAll": true, + "label": "Juju model uuid", + "multi": true, + "name": "juju_model_uuid", + "options": [], + "query": { + "query": "label_values(up{juju_model=~\"$juju_model\"},juju_model_uuid)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "uid": "${prometheusds}" + }, + "definition": "label_values(up,juju_model)", + "hide": 0, + "includeAll": true, + "label": "Juju model", + "multi": true, + "name": "juju_model", + "options": [], + "query": { + "query": "label_values(up,juju_model)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } + ] + }, + "time": { + "from": "now-30d", + "to": "now" + }, + "timepicker": { + "now": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" ], - "refresh": false, - "schemaVersion": 37, - "style": "dark", - "tags": [ - "matrix" - ], - "templating": { - "list": [] - }, - "time": { - "from": "now-3h", - "to": "now" - }, - "timepicker": { - "now": true, - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "utc", - "title": "Synapse Statistics", - "uid": null, - "version": 1, - "weekStart": "" - } + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Synapse Statistics", + "version": 2, + "weekStart": "" +} diff --git a/src/irc_bridge.py b/src/irc_bridge.py deleted file mode 100644 index 9a280147..00000000 --- a/src/irc_bridge.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Provide the IRC bridge class to represent the matrix-appservice-app plugin for Synapse.""" - -# disabling due the fact that collect status does many checks -# pylint: disable=too-many-return-statements - -import logging - -import ops -from ops.pebble import APIError, ExecError - -import synapse -from charm_state import CharmState - -logger = logging.getLogger(__name__) - -IRC_SERVICE_NAME = "irc" - - -class PEMCreateError(Exception): - """An exception raised when the PEM file creation fails.""" - - def __init__(self, message: str): - """Initialize a new instance of the PEMCreateError class. - - Args: - message: The error message. - """ - super().__init__(message) - - -def enable_irc_bridge(charm_state: CharmState, container: ops.model.Container) -> None: - """Enable irc service. - - The required steps to enable the IRC bridge are: - - Create the IRC bridge configuration file. - - Generate a PEM file for the IRC bridge. - - Add the IRC bridge application in the homeserver config. - - Args: - charm_state: Instance of CharmState. - container: The container to enable the IRC bridge in. - - """ - if not container.can_connect(): - logger.info("Pebble socket not available. Deferring configuration.") - return - logger.info("Enabling IRC bridge.") - db_connect_string = _get_db_connection(charm_state) - if db_connect_string == "": - logger.info("No database connection string found. Skipping IRC bridge configuration.") - return - synapse.create_irc_bridge_config( - container=container, charm_state=charm_state, db_connect_string=db_connect_string - ) - synapse.create_irc_bridge_app_registration(container=container) - _create_pem_file(container=container) - - -def _create_pem_file(container: ops.model.Container) -> None: - """Create a PEM file for the IRC bridge. - - Args: - container: The container to create the PEM file in. - - Raises: - PEMCreateError: If the PEM file creation fails. - """ - pem_create_command = [ - "/bin/bash", - "-c", - "[[ -f /data/config/irc_passkey.pem ]] || " - + "openssl genpkey -out /data/config/irc_passkey.pem " - + "-outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:2048", - ] - logger.info("Creating PEM file for IRC bridge.") - try: - exec_process = container.exec( - pem_create_command, - ) - stdout, stderr = exec_process.wait_output() - logger.info("PEM create output: %s. %s.", stdout, stderr) - except (APIError, ExecError) as exc: - raise PEMCreateError("PEM creation failed.") from exc - - -def _get_db_connection(charm_state: CharmState) -> str: - """Get the database connection string. - - Args: - charm_state: Instance of CharmState. - - Returns: - The database connection string. - """ - if charm_state.irc_bridge_datasource is None: - return "" - db_connect_string = ( - "postgres://" - f"{charm_state.irc_bridge_datasource['user']}" - f":{charm_state.irc_bridge_datasource['password']}" - f"@{charm_state.irc_bridge_datasource['host']}" - f":{charm_state.irc_bridge_datasource['port']}" - f"/{charm_state.irc_bridge_datasource['db']}" - ) - return db_connect_string diff --git a/src/matrix_auth_observer.py b/src/matrix_auth_observer.py new file mode 100644 index 00000000..1e6498f0 --- /dev/null +++ b/src/matrix_auth_observer.py @@ -0,0 +1,168 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The Matrix Auth relation observer.""" + +import logging +import pathlib +from pathlib import Path +from typing import List, NamedTuple, Optional + +import ops +from charms.synapse.v1.matrix_auth import ( + MatrixAuthProviderData, + MatrixAuthProvides, + MatrixAuthRequirerData, +) +from ops.framework import Object + +import synapse +from charm_state import CharmBaseWithState, CharmState, inject_charm_state + +logger = logging.getLogger(__name__) + + +class MatrixAuthObserver(Object): + """The Matrix Auth relation observer.""" + + def __init__(self, charm: CharmBaseWithState): + """Initialize the observer and register event handlers. + + Args: + charm: The parent charm to attach the observer to. + """ + super().__init__(charm, "matrix-auth-observer") + self._charm = charm + self.matrix_auth = MatrixAuthProvides(self._charm) + # matrix_auth_request_received conflicts with on defined by Redis... + self.framework.observe( + self._charm.on["matrix-auth"].relation_changed, self._on_matrix_auth_relation_changed + ) + self.framework.observe( + self._charm.on["matrix-auth"].relation_departed, self._on_matrix_auth_relation_departed + ) + + def get_charm(self) -> CharmBaseWithState: + """Return the current charm. + + Returns: + The current charm + """ + return self._charm + + def update_matrix_auth_integration(self, charm_state: CharmState) -> None: + """Update matrix auth integration relation data. + + Args: + charm_state: The charm state. + """ + for relation in list(self._charm.model.relations["matrix-auth"]): + if not relation.units: + return + provider_data = self._get_matrix_auth_provider_data(charm_state) + if self._matrix_auth_relation_updated(relation, provider_data): + return + self.matrix_auth.update_relation_data(relation, provider_data) + + def get_requirer_registration_secrets(self) -> Optional[List]: + """Get requirers registration secrets (application services). + + Returns: + dict with filepath and content for creating the secret files. + """ + registration_secrets = [] + + class RegistrationSecret(NamedTuple): + """Define a registration secret. + + Attributes: + file_path: secret path in filesystem. + value: secret content. + """ + + file_path: pathlib.Path + value: str + + for relation in list(self._charm.model.relations["matrix-auth"]): + requirer_data = MatrixAuthRequirerData.from_relation(self.model, relation=relation) + if requirer_data and requirer_data.registration: + registration = requirer_data.registration + filename = f"{relation.name}-{relation.id}" + file_path = ( + Path(synapse.SYNAPSE_CONFIG_DIR) / f"appservice-registration-{filename}.yaml" + ) + # get_secret_value is dynamically created + registration_secrets.append( + RegistrationSecret( + file_path, registration.get_secret_value() # pylint: disable=no-member + ) + ) + return registration_secrets + + def _get_matrix_auth_provider_data( + self, charm_state: CharmState + ) -> Optional[MatrixAuthProviderData]: + """Get Synapse configuration as expected by the matrix auth relation. + + The integration will share homeserver URL and registration shared secret. + + Args: + charm_state: The charm state. + + Returns: + MatrixAuthConfiguration instance. + """ + # future refactor victim: this is repeated with saml + homeserver = ( + charm_state.synapse_config.public_baseurl + if charm_state.synapse_config.public_baseurl is not None + else f"https://{charm_state.synapse_config.server_name}" + ) + # assuming that shared secret is always found + container = self._charm.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME) + shared_secret = synapse.get_registration_shared_secret(container=container) + return MatrixAuthProviderData(homeserver=homeserver, shared_secret=shared_secret) + + def _matrix_auth_relation_updated( + self, relation: ops.Relation, provider_data: MatrixAuthProviderData + ) -> bool: + """Compare current information with the one in the relation. + + This check is done to prevent triggering relation-changed. + + Args: + relation: The matrix-auth relation. + provider_data: current Synapse configuration as MatrixAuthProviderData. + + Returns: + True if current configuration and relation data are the same. + """ + relation_homeserver = relation.data[self._charm.app].get("homeserver", "") + relation_shared_secret = relation.data[self._charm.app].get("shared_secret", "") + if ( + provider_data.homeserver != relation_homeserver + or provider_data.shared_secret != relation_shared_secret + ): + logger.info("matrix-auth relation ID %s is outdated and will be updated", relation.id) + return False + return True + + @inject_charm_state + def _on_matrix_auth_relation_changed(self, _: ops.EventBase, charm_state: CharmState) -> None: + """Handle matrix-auth request received event. + + Args: + charm_state: The charm state. + """ + logger.debug("_on_matrix_auth_relation_changed emitting reconcile") + self._charm.reconcile(charm_state) + + @inject_charm_state + def _on_matrix_auth_relation_departed(self, _: ops.EventBase, charm_state: CharmState) -> None: + """Handle matrix-auth relation departed event. + + Args: + charm_state: The charm state. + """ + logger.debug("_on_matrix_auth_relation_departed emitting reconcile") + self._charm.reconcile(charm_state) diff --git a/src/media_observer.py b/src/media_observer.py index dae6a544..7687005d 100644 --- a/src/media_observer.py +++ b/src/media_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """The media integrator relation observer.""" diff --git a/src/mjolnir.py b/src/mjolnir.py index d16d5685..0c2ee9b0 100644 --- a/src/mjolnir.py +++ b/src/mjolnir.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Provide the Mjolnir class to represent the Mjolnir plugin for Synapse.""" @@ -80,6 +80,11 @@ def _on_collect_status( # noqa: C901 """ if not charm_state.synapse_config.enable_mjolnir: return + container = self._charm.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME) + if not container.can_connect(): + self._charm.unit.status = ops.MaintenanceStatus("Waiting for Synapse pebble") + return + mjolnir_service = container.get_services(MJOLNIR_SERVICE_NAME) # This check is the same done in get_main_unit. It should be refactored # to a place where both Charm and Mjolnir can get it. peer_relation = self._charm.model.relations[synapse.SYNAPSE_PEER_RELATION_NAME] @@ -93,13 +98,12 @@ def _on_collect_status( # noqa: C901 peer_relation[0].data[self._charm.app].get("main_unit_id", self._charm.unit.name) ) if not self._charm.unit.name == main_unit_id: - logger.info("This is not the main unit, skipping Mjolnir configuration") + if mjolnir_service: + logger.info("This is not the main unit, stopping Mjolnir") + container.stop(MJOLNIR_SERVICE_NAME) + else: + logger.info("This is not the main unit, skipping Mjolnir configuration") return - container = self._charm.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME) - if not container.can_connect(): - self._charm.unit.status = ops.MaintenanceStatus("Waiting for Synapse pebble") - return - mjolnir_service = container.get_services(MJOLNIR_SERVICE_NAME) if mjolnir_service: mjolnir_not_active = [ service for service in mjolnir_service.values() if not service.is_running() @@ -212,7 +216,7 @@ def enable_mjolnir(self, charm_state: CharmState, admin_access_token: str) -> No admin_access_token=admin_access_token, room_id=room_id, ) - synapse.create_mjolnir_config( + synapse.generate_mjolnir_config( container=container, access_token=mjolnir_access_token, room_id=room_id ) synapse.override_rate_limit( diff --git a/src/observability.py b/src/observability.py index 8de31f36..fcc83599 100644 --- a/src/observability.py +++ b/src/observability.py @@ -1,9 +1,8 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Provide the Observability class to represent the observability stack for Synapse.""" -import typing import ops from charms.grafana_k8s.v0.grafana_dashboard import GrafanaDashboardProvider @@ -28,14 +27,25 @@ def __init__(self, charm: ops.CharmBase): self._grafana_dashboards = GrafanaDashboardProvider( charm, relation_name="grafana-dashboard" ) - self.targets = [ - f"*:{synapse.PROMETHEUS_MAIN_TARGET_PORT}", + synapse_target = [ + f"*:{synapse.SYNAPSE_EXPORTER_PORT}", + ] + synapse_stats_target = [ f"*:{synapse.STATS_EXPORTER_PORT}", ] self._metrics_endpoint = MetricsEndpointProvider( charm, relation_name="metrics-endpoint", - jobs=[{"static_configs": [{"targets": self.targets}]}], + jobs=[ + { + "job_name": "synapse_application", + "static_configs": [{"targets": synapse_target}], + }, + { + "job_name": "synapse_stats_exporter", + "static_configs": [{"targets": synapse_stats_target}], + }, + ], ) self._logging = LogProxyConsumer( charm, @@ -46,17 +56,3 @@ def __init__(self, charm: ops.CharmBase): }, }, ) - - def update_targets(self, targets: typing.List[str]) -> None: - """Update prometheus targets. - - Args: - targets: new target list. - """ - self.targets.sort() - targets.sort() - if targets != self.targets: - self._metrics_endpoint.update_scrape_job_spec( - jobs=[{"static_configs": [{"targets": targets}]}] - ) - self.targets = targets diff --git a/src/pebble.py b/src/pebble.py index 974ee1ee..8070bb90 100644 --- a/src/pebble.py +++ b/src/pebble.py @@ -1,8 +1,11 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. +# Ignoring for the config change call +# mypy: disable-error-code="attr-defined" + """Class to interact with pebble.""" import logging @@ -15,7 +18,6 @@ import synapse from charm_state import CharmState -from irc_bridge import enable_irc_bridge logger = logging.getLogger(__name__) @@ -38,31 +40,41 @@ def __init__(self, msg: str): self.msg = msg -def check_synapse_ready() -> ops.pebble.CheckDict: - """Return the Synapse container ready check. +def check_synapse_alive(charm_state: CharmState) -> ops.pebble.CheckDict: + """Return the Synapse container alive check. + + Args: + charm_state: Instance of CharmState. Returns: Dict: check object converted to its dict representation. """ - check = Check(synapse.CHECK_READY_NAME) + check = Check(synapse.CHECK_ALIVE_NAME) check.override = "replace" - check.level = "ready" - check.timeout = "10s" - check.period = "1m" - check.http = {"url": f"{synapse.SYNAPSE_URL}/health"} + check.level = "alive" + check.tcp = {"port": synapse.SYNAPSE_PORT} + experimental_alive_check = charm_state.synapse_config.experimental_alive_check + if experimental_alive_check: + # The default values will tolerate failure for ~10 minutes before restarting Synapse + check.period = experimental_alive_check.get("period", "2m") + check.threshold = experimental_alive_check.get("threshold", 5) + check.timeout = experimental_alive_check.get("timeout", "20s") return check.to_dict() -def check_synapse_alive() -> ops.pebble.CheckDict: - """Return the Synapse container alive check. +def check_synapse_ready() -> ops.pebble.CheckDict: + """Return the Synapse container ready check. Returns: Dict: check object converted to its dict representation. """ - check = Check(synapse.CHECK_ALIVE_NAME) + check = Check(synapse.CHECK_READY_NAME) check.override = "replace" - check.level = "alive" - check.tcp = {"port": synapse.SYNAPSE_PORT} + check.level = "ready" + check.timeout = "20s" + check.period = "2m" + check.threshold = 5 + check.http = {"url": f"{synapse.SYNAPSE_URL}/health"} return check.to_dict() @@ -111,19 +123,9 @@ def check_mjolnir_ready() -> ops.pebble.CheckDict: check.override = "replace" check.level = "ready" check.http = {"url": f"http://localhost:{synapse.MJOLNIR_HEALTH_PORT}/healthz"} - return check.to_dict() - - -def check_irc_bridge_ready() -> ops.pebble.CheckDict: - """Return the Synapse IRC bridge service check. - - Returns: - Dict: check object converted to its dict representation. - """ - check = Check(synapse.CHECK_IRC_BRIDGE_READY_NAME) - check.override = "replace" - check.level = "ready" - check.http = {"url": f"http://localhost:{synapse.IRC_BRIDGE_HEALTH_PORT}/health"} + check.timeout = "10s" + check.threshold = 5 + check.period = "1m" return check.to_dict() @@ -139,23 +141,26 @@ def restart_nginx(container: ops.model.Container, main_unit_address: str) -> Non container.restart(synapse.SYNAPSE_NGINX_SERVICE_NAME) -def replan_mjolnir(container: ops.model.Container) -> None: - """Replan Synapse Mjolnir service. +def restart_federation_sender(container: ops.model.Container, charm_state: CharmState) -> None: + """Restart Synapse federation sender service and regenerate configuration. Args: container: Charm container. + charm_state: Instance of CharmState. """ - container.add_layer("synapse-mjolnir", _mjolnir_pebble_layer(), combine=True) - container.replan() + container.add_layer( + "synapse-federation-sender", _pebble_layer_federation_sender(charm_state), combine=True + ) + container.restart(synapse.SYNAPSE_FEDERATION_SENDER_SERVICE_NAME) -def replan_irc_bridge(container: ops.model.Container) -> None: - """Replan Synapse IRC bridge service. +def replan_mjolnir(container: ops.model.Container) -> None: + """Replan Synapse Mjolnir service. Args: container: Charm container. """ - container.add_layer("synapse-irc", _irc_bridge_pebble_layer(), combine=True) + container.add_layer("synapse-mjolnir", _mjolnir_pebble_layer(), combine=True) container.replan() @@ -186,6 +191,21 @@ def replan_stats_exporter(container: ops.model.Container, charm_state: CharmStat logger.exception(str(e)) +def replan_synapse_federation_sender( + container: ops.model.Container, charm_state: CharmState +) -> None: + """Replan Synapse Federation Sender service. + + Args: + container: Charm container. + charm_state: Instance of CharmState. + """ + container.add_layer( + "synapse-federation-sender", _pebble_layer_federation_sender(charm_state), combine=True + ) + container.replan() + + def _get_synapse_config(container: ops.model.Container) -> dict: """Get the current Synapse configuration. @@ -284,17 +304,43 @@ def reconcile( # noqa: C901 pylint: disable=too-many-branches,too-many-statemen synapse.execute_migrate_config(container=container, charm_state=charm_state) existing_synapse_config = _get_synapse_config(container) current_synapse_config = _get_synapse_config(container) + + synapse.set_public_baseurl(current_synapse_config, charm_state) + if charm_state.synapse_config.block_non_admin_invites: + logger.debug("pebble.change_config: Enabling Block non admin invites") + synapse.block_non_admin_invites(current_synapse_config, charm_state=charm_state) synapse.enable_metrics(current_synapse_config) synapse.enable_forgotten_room_retention(current_synapse_config) synapse.enable_media_retention(current_synapse_config) synapse.enable_stale_devices_deletion(current_synapse_config) + synapse.enable_rc_joins_remote_rate(current_synapse_config, charm_state=charm_state) synapse.enable_serve_server_wellknown(current_synapse_config) synapse.enable_replication(current_synapse_config) + if ( + charm_state.synapse_config.invite_checker_policy_rooms + or charm_state.synapse_config.invite_checker_blocklist_allowlist_url + ): + logger.debug("pebble.change_config: Enabling enable_synapse_invite_checker") + synapse.enable_synapse_invite_checker(current_synapse_config, charm_state=charm_state) + if charm_state.synapse_config.limit_remote_rooms_complexity: + logger.debug("pebble.change_config: Enabling limit_remote_rooms_complexity") + synapse.enable_limit_remote_rooms_complexity( + current_synapse_config, charm_state=charm_state + ) if charm_state.instance_map_config is not None: logger.debug("pebble.change_config: Enabling instance_map") synapse.enable_instance_map(current_synapse_config, charm_state=charm_state) logger.debug("pebble.change_config: Enabling stream_writers") synapse.enable_stream_writers(current_synapse_config, charm_state=charm_state) + # the main unit will have an additional layer for running federation sender worker + if is_main: + logging.info("pebble.change_config: Adding Federation Sender layer") + synapse.enable_federation_sender(current_synapse_config) + replan_synapse_federation_sender(container=container, charm_state=charm_state) + if charm_state.registration_secrets: + logger.debug("pebble.change_config: Enabling registration_secrets") + synapse.create_registration_secrets_files(container=container, charm_state=charm_state) + synapse.enable_registration_secrets(current_synapse_config, charm_state=charm_state) if charm_state.saml_config is not None: logger.debug("pebble.change_config: Enabling SAML") synapse.enable_saml(current_synapse_config, charm_state=charm_state) @@ -328,11 +374,6 @@ def reconcile( # noqa: C901 pylint: disable=too-many-branches,too-many-statemen if charm_state.datasource and is_main: logger.info("Synapse Stats Exporter enabled.") replan_stats_exporter(container=container, charm_state=charm_state) - if charm_state.synapse_config.enable_irc_bridge: - logger.info("Synapse IRC bridge enabled.") - enable_irc_bridge(container=container, charm_state=charm_state) - synapse.add_app_service_config_field(current_synapse_config) - replan_irc_bridge(container=container) config_has_changed = DeepDiff( existing_synapse_config, current_synapse_config, @@ -345,47 +386,21 @@ def reconcile( # noqa: C901 pylint: disable=too-many-branches,too-many-statemen # Push worker configuration _push_synapse_config( container, - synapse.generate_worker_config(unit_number), + synapse.generate_worker_config(unit_number, is_main), config_path=synapse.SYNAPSE_WORKER_CONFIG_PATH, ) # Push main configuration _push_synapse_config(container, current_synapse_config) synapse.validate_config(container=container) restart_synapse(container=container, charm_state=charm_state, is_main=is_main) + if is_main and charm_state.instance_map_config is not None: + restart_federation_sender(container=container, charm_state=charm_state) else: logging.info("Configuration has not changed, no action.") except (synapse.WorkloadError, ops.pebble.PathError) as exc: raise PebbleServiceError(str(exc)) from exc -def reset_instance(charm_state: CharmState, container: ops.model.Container) -> None: - """Reset instance. - - Args: - charm_state: Instance of CharmState - container: Charm container. - - Raises: - PebbleServiceError: if something goes wrong while interacting with Pebble. - """ - # This is needed in the case of relation with Postgresql. - # If there is open connections it won't be possible to drop the database. - try: - logger.info("Replan service to not restart") - container.add_layer( - synapse.SYNAPSE_CONTAINER_NAME, - _pebble_layer_without_restart(charm_state), - combine=True, - ) - container.replan() - logger.info("Stop Synapse instance") - container.stop(synapse.SYNAPSE_SERVICE_NAME) - logger.info("Erase Synapse data") - synapse.reset_instance(container) - except ops.pebble.PathError as exc: - raise PebbleServiceError(str(exc)) from exc - - def _pebble_layer(charm_state: CharmState, is_main: bool = True) -> ops.pebble.LayerDict: """Return a dictionary representing a Pebble layer. @@ -417,8 +432,8 @@ def _pebble_layer(charm_state: CharmState, is_main: bool = True) -> ops.pebble.L } }, "checks": { + synapse.CHECK_ALIVE_NAME: check_synapse_alive(charm_state), synapse.CHECK_READY_NAME: check_synapse_ready(), - synapse.CHECK_ALIVE_NAME: check_synapse_alive(), }, } return typing.cast(ops.pebble.LayerDict, layer) @@ -515,51 +530,53 @@ def _cron_pebble_layer(charm_state: CharmState) -> ops.pebble.LayerDict: return typing.cast(ops.pebble.LayerDict, layer) -def _irc_bridge_pebble_layer() -> ops.pebble.LayerDict: - """Generate pebble config for the irc bridge service. +def _stats_exporter_pebble_layer() -> ops.pebble.LayerDict: + """Generate pebble config for the Synapse Stats Exporter service. Returns: - The pebble configuration for the irc bridge service. + The pebble configuration for the Synapse Stats Exporter service. """ - command_params = ( - f"-c {synapse.IRC_BRIDGE_CONFIG_PATH}" - f" -f {synapse.IRC_BRIDGE_REGISTRATION_PATH}" - f" -p {synapse.IRC_BRIDGE_HEALTH_PORT}" - ) layer = { - "summary": "Synapse irc layer", - "description": "Synapse irc layer", + "summary": "Synapse Stats Exporter layer", + "description": "Synapse Stats Exporter", "services": { - synapse.IRC_BRIDGE_SERVICE_NAME: { + STATS_EXPORTER_SERVICE_NAME: { "override": "replace", - "summary": "IRC service", - "command": f"/bin/node /app/app.js {command_params}", - "startup": "enabled", - }, - }, - "checks": { - synapse.CHECK_IRC_BRIDGE_READY_NAME: check_irc_bridge_ready(), + "summary": "Synapse Stats Exporter service", + "command": "synapse-stats-exporter", + "startup": "disabled", + "on-failure": "ignore", + } }, } return typing.cast(ops.pebble.LayerDict, layer) -def _stats_exporter_pebble_layer() -> ops.pebble.LayerDict: - """Generate pebble config for the Synapse Stats Exporter service. +def _pebble_layer_federation_sender(charm_state: CharmState) -> ops.pebble.LayerDict: + """Return a dictionary representing a Pebble layer. + + Args: + charm_state: Instance of CharmState Returns: - The pebble configuration for the Synapse Stats Exporter service. + pebble layer for Synapse federation sender """ + command = ( + f"{synapse.SYNAPSE_COMMAND_PATH} run -m synapse.app.generic_worker " + f"--config-path {synapse.SYNAPSE_CONFIG_PATH} " + f"--config-path {synapse.SYNAPSE_WORKER_CONFIG_PATH}" + ) + layer = { - "summary": "Synapse Stats Exporter layer", - "description": "Synapse Stats Exporter", + "summary": "Synapse Federation Sender layer", + "description": "pebble config layer for Synapse", "services": { - STATS_EXPORTER_SERVICE_NAME: { + synapse.SYNAPSE_FEDERATION_SENDER_SERVICE_NAME: { "override": "replace", - "summary": "Synapse Stats Exporter service", - "command": "synapse-stats-exporter", - "startup": "disabled", - "on-failure": "ignore", + "summary": "Synapse Federation Sender application service", + "startup": "enabled", + "command": command, + "environment": synapse.get_environment(charm_state), } }, } diff --git a/src/redis_observer.py b/src/redis_observer.py index 5f703543..e6162f99 100644 --- a/src/redis_observer.py +++ b/src/redis_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. # Ignoring for the is_main call diff --git a/src/s3_parameters.py b/src/s3_parameters.py index 0c47d724..c923c671 100644 --- a/src/s3_parameters.py +++ b/src/s3_parameters.py @@ -1,11 +1,11 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Provides S3 Parameters configuration.""" from typing import Any, Optional -from pydantic import BaseModel, Field, validator +from pydantic.v1 import BaseModel, Field, validator class S3Parameters(BaseModel): diff --git a/src/saml_observer.py b/src/saml_observer.py index 6beeb7b7..03aa68e0 100644 --- a/src/saml_observer.py +++ b/src/saml_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """The SAML integrator relation observer.""" @@ -8,6 +8,7 @@ import ops from charms.saml_integrator.v0.saml import SamlDataAvailableEvent, SamlRequires +from ops.charm import RelationBrokenEvent from ops.framework import Object from charm_state import CharmBaseWithState, CharmState, inject_charm_state @@ -31,6 +32,9 @@ def __init__(self, charm: CharmBaseWithState): self._charm = charm self.saml = SamlRequires(self._charm) self.framework.observe(self.saml.on.saml_data_available, self._on_saml_data_available) + self.framework.observe( + charm.on[self.saml.relation_name].relation_broken, self._on_relation_broken + ) def get_charm(self) -> CharmBaseWithState: """Return the current charm. @@ -51,6 +55,17 @@ def _on_saml_data_available(self, _: SamlDataAvailableEvent, charm_state: CharmS logger.debug("_on_saml_data_available emitting reconcile") self.get_charm().reconcile(charm_state) + @inject_charm_state + def _on_relation_broken(self, _: RelationBrokenEvent, charm_state: CharmState) -> None: + """Handle SAML data available. + + Args: + charm_state: The charm state. + """ + self.model.unit.status = ops.MaintenanceStatus("Reloading homeserver configuration") + logger.debug("_on_relation_broken emitting reconcile") + self.get_charm().reconcile(charm_state) + def get_relation_as_saml_conf(self) -> typing.Optional[SAMLConfiguration]: """Get SAML data from relation. diff --git a/src/smtp_observer.py b/src/smtp_observer.py index 402172d5..d52aec3d 100644 --- a/src/smtp_observer.py +++ b/src/smtp_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """The SMTP integrator relation observer.""" @@ -15,7 +15,7 @@ TransportSecurity, ) from ops.framework import Object -from pydantic import ValidationError +from pydantic.v1 import ValidationError from charm_state import CharmBaseWithState, CharmConfigInvalidError, CharmState, inject_charm_state from charm_types import SMTPConfiguration diff --git a/src/synapse/__init__.py b/src/synapse/__init__.py index 79bba86f..1ba825fc 100644 --- a/src/synapse/__init__.py +++ b/src/synapse/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse package is used to interact with Synapse instance.""" @@ -33,21 +33,13 @@ ) from .workload import ( # noqa: F401 CHECK_ALIVE_NAME, - CHECK_IRC_BRIDGE_READY_NAME, CHECK_MJOLNIR_READY_NAME, CHECK_NGINX_READY_NAME, CHECK_READY_NAME, COMMAND_MIGRATE_CONFIG, - IRC_BRIDGE_CONFIG_PATH, - IRC_BRIDGE_HEALTH_PORT, - IRC_BRIDGE_REGISTRATION_PATH, - IRC_BRIDGE_RELATION_NAME, - IRC_BRIDGE_SERVICE_NAME, MJOLNIR_CONFIG_PATH, MJOLNIR_HEALTH_PORT, MJOLNIR_SERVICE_NAME, - PROMETHEUS_MAIN_TARGET_PORT, - PROMETHEUS_WORKER_TARGET_PORT, STATS_EXPORTER_PORT, SYNAPSE_COMMAND_PATH, SYNAPSE_CONFIG_DIR, @@ -56,8 +48,9 @@ SYNAPSE_CRON_SERVICE_NAME, SYNAPSE_DATA_DIR, SYNAPSE_DB_RELATION_NAME, + SYNAPSE_EXPORTER_PORT, + SYNAPSE_FEDERATION_SENDER_SERVICE_NAME, SYNAPSE_GROUP, - SYNAPSE_NGINX_CONTAINER_NAME, SYNAPSE_NGINX_PORT, SYNAPSE_NGINX_SERVICE_NAME, SYNAPSE_PEER_RELATION_NAME, @@ -66,21 +59,33 @@ SYNAPSE_WORKER_CONFIG_PATH, ExecResult, WorkloadError, - add_app_service_config_field, - create_irc_bridge_app_registration, - create_irc_bridge_config, - create_mjolnir_config, + create_registration_secrets_files, + execute_migrate_config, + generate_mjolnir_config, + generate_nginx_config, + generate_worker_config, + get_environment, + get_media_store_path, + get_registration_shared_secret, + validate_config, +) +from .workload_configuration import ( # noqa: F401 + block_non_admin_invites, disable_password_config, disable_room_list_search, enable_allow_public_rooms_over_federation, enable_federation_domain_whitelist, + enable_federation_sender, enable_forgotten_room_retention, enable_instance_map, enable_ip_range_whitelist, + enable_limit_remote_rooms_complexity, enable_media, enable_media_retention, enable_metrics, + enable_rc_joins_remote_rate, enable_redis, + enable_registration_secrets, enable_replication, enable_room_list_publication_rules, enable_saml, @@ -88,13 +93,7 @@ enable_smtp, enable_stale_devices_deletion, enable_stream_writers, + enable_synapse_invite_checker, enable_trusted_key_servers, - execute_migrate_config, - generate_nginx_config, - generate_worker_config, - get_environment, - get_media_store_path, - get_registration_shared_secret, - reset_instance, - validate_config, + set_public_baseurl, ) diff --git a/src/synapse/admin.py b/src/synapse/admin.py index e4d8784f..b67b3949 100644 --- a/src/synapse/admin.py +++ b/src/synapse/admin.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Helper module used to manage admin tasks involving Synapse API and Workload.""" diff --git a/src/synapse/api.py b/src/synapse/api.py index 751568d5..04293c20 100644 --- a/src/synapse/api.py +++ b/src/synapse/api.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Helper module used to manage interactions with Synapse API.""" @@ -234,7 +234,7 @@ def register_user( raise RegisterUserError(str(exc)) from exc -def _generate_mac( +def _generate_mac( # pylint: disable=too-many-positional-arguments shared_secret: str, nonce: str, user: str, @@ -373,7 +373,7 @@ def override_rate_limit(user: User, admin_access_token: str, charm_state: CharmS f"{SYNAPSE_URL}/_synapse/admin/v1/users/" f"@{user.username}:{server_name}/override_ratelimit" ) - _do_request("DELETE", rate_limit_url, admin_access_token=admin_access_token) + _do_request("POST", rate_limit_url, admin_access_token=admin_access_token) def get_room_id( diff --git a/src/synapse/workload.py b/src/synapse/workload.py index c0aa4ff7..b4d40079 100644 --- a/src/synapse/workload.py +++ b/src/synapse/workload.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Helper module used to manage interactions with Synapse.""" @@ -28,15 +28,7 @@ MJOLNIR_CONFIG_PATH = f"{SYNAPSE_CONFIG_DIR}/config/production.yaml" MJOLNIR_HEALTH_PORT = 7777 MJOLNIR_SERVICE_NAME = "mjolnir" -IRC_BRIDGE_CONFIG_PATH = f"{SYNAPSE_CONFIG_DIR}/config/irc_bridge.yaml" -IRC_BRIDGE_REGISTRATION_PATH = f"{SYNAPSE_CONFIG_DIR}/config/appservice-registration-irc.yaml" -IRC_BRIDGE_HEALTH_PORT = "5446" -IRC_BRIDGE_SERVICE_NAME = "irc" -IRC_BRIDGE_BOT_NAME = "irc_bot" -IRC_BRIDGE_RELATION_NAME = "irc-bridge-database" -CHECK_IRC_BRIDGE_READY_NAME = "synapse-irc-ready" -PROMETHEUS_MAIN_TARGET_PORT = "9000" -PROMETHEUS_WORKER_TARGET_PORT = 9101 +SYNAPSE_EXPORTER_PORT = "9000" STATS_EXPORTER_PORT = "9877" SYNAPSE_COMMAND_PATH = "/start.py" SYNAPSE_CONFIG_PATH = f"{SYNAPSE_CONFIG_DIR}/homeserver.yaml" @@ -44,8 +36,8 @@ SYNAPSE_CRON_SERVICE_NAME = "synapse-cron" SYNAPSE_DATA_DIR = "/data" SYNAPSE_DEFAULT_MEDIA_STORE_PATH = "/media_store" +SYNAPSE_FEDERATION_SENDER_SERVICE_NAME = "synapse-federation-sender" SYNAPSE_GROUP = "synapse" -SYNAPSE_NGINX_CONTAINER_NAME = "synapse-nginx" SYNAPSE_NGINX_PORT = 8080 SYNAPSE_NGINX_SERVICE_NAME = "synapse-nginx" SYNAPSE_PEER_RELATION_NAME = "synapse-peers" @@ -89,14 +81,6 @@ class CreateMjolnirConfigError(WorkloadError): """Exception raised when something goes wrong while creating mjolnir config.""" -class CreateIRCBridgeConfigError(WorkloadError): - """Exception raised when something goes wrong while creating irc bridge config.""" - - -class CreateIRCBridgeRegistrationError(WorkloadError): - """Exception raised when something goes wrong while creating irc bridge registration.""" - - class EnableSAMLError(WorkloadError): """Exception raised when something goes wrong while enabling SAML.""" @@ -197,9 +181,7 @@ def _check_server_name(container: ops.Container, charm_state: CharmState) -> Non ): msg = ( f"server_name {charm_state.synapse_config.server_name} is different from the existing " - f"one {configured_server_name}. Please revert the config or run the action " - "reset-instance if you want to erase the existing instance and start a new " - "one." + f"one {configured_server_name}. Please revert the config." ) logger.error(msg) raise ServerNameModifiedError( @@ -232,6 +214,41 @@ def _exec( ) +def get_environment(charm_state: CharmState) -> typing.Dict[str, str]: + """Generate a environment dictionary from the charm configurations. + + Args: + charm_state: Instance of CharmState. + + Returns: + A dictionary representing the Synapse environment variables. + """ + environment = { + "SYNAPSE_CONFIG_DIR": SYNAPSE_CONFIG_DIR, + "SYNAPSE_CONFIG_PATH": SYNAPSE_CONFIG_PATH, + "SYNAPSE_DATA_DIR": SYNAPSE_DATA_DIR, + "SYNAPSE_REPORT_STATS": f"{charm_state.synapse_config.report_stats}", + "SYNAPSE_SERVER_NAME": f"{charm_state.synapse_config.server_name}", + # TLS disabled so the listener is HTTP. HTTPS will be handled by Traefik. + # TODO verify support to HTTPS backend before changing this # pylint: disable=fixme + "SYNAPSE_NO_TLS": str(True), + "LD_PRELOAD": "/usr/lib/x86_64-linux-gnu/libjemalloc.so.2", + } + datasource = charm_state.datasource + if datasource is not None: + environment["POSTGRES_DB"] = datasource["db"] + environment["POSTGRES_HOST"] = datasource["host"] + environment["POSTGRES_PORT"] = datasource["port"] + environment["POSTGRES_USER"] = datasource["user"] + environment["POSTGRES_PASSWORD"] = datasource["password"] + for proxy_variable in ("http_proxy", "https_proxy", "no_proxy"): + proxy_value = getattr(charm_state.proxy, proxy_variable) + if proxy_value: + environment[proxy_variable] = str(proxy_value) + environment[proxy_variable.upper()] = str(proxy_value) + return environment + + def execute_migrate_config(container: ops.Container, charm_state: CharmState) -> None: """Run the Synapse command migrate_config. @@ -282,217 +299,78 @@ def validate_config(container: ops.Container) -> None: raise WorkloadError("Validate config failed, please check the logs") -def enable_metrics(current_yaml: dict) -> None: - """Change the Synapse configuration to enable metrics. - - Args: - current_yaml: current configuration. - - Raises: - EnableMetricsError: something went wrong enabling metrics. - """ - try: - metric_listener = { - "port": int(PROMETHEUS_MAIN_TARGET_PORT), - "type": "metrics", - "bind_addresses": ["::"], - } - current_yaml["listeners"].extend([metric_listener]) - current_yaml["enable_metrics"] = True - except KeyError as exc: - raise EnableMetricsError(str(exc)) from exc - - -def enable_replication(current_yaml: dict) -> None: - """Change the Synapse configuration to enable replication. - - Args: - current_yaml: current configuration. - - Raises: - WorkloadError: something went wrong enabling replication. - """ - try: - resources = {"names": ["replication"]} - metric_listener = { - "port": 8034, - "type": "http", - "bind_addresses": ["::"], - "resources": [resources], - } - current_yaml["listeners"].extend([metric_listener]) - except KeyError as exc: - raise WorkloadError(str(exc)) from exc - - -def enable_forgotten_room_retention(current_yaml: dict) -> None: - """Change the Synapse configuration to enable forgotten_room_retention_period. - - Args: - current_yaml: current configuration. - """ - current_yaml["forgotten_room_retention_period"] = "28d" - - -def enable_media_retention(current_yaml: dict) -> None: - """Change the Synapse configuration to enable media retention. - - Args: - current_yaml: current configuration. - """ - current_yaml["media_retention"] = { - "remote_media_lifetime": "14d", - "local_media_lifetime": "28d", - } - - -def enable_stale_devices_deletion(current_yaml: dict) -> None: - """Change the Synapse configuration to delete stale devices. - - Args: - current_yaml: current configuration. - """ - current_yaml["delete_stale_devices_after"] = "1y" - - -def disable_password_config(current_yaml: dict) -> None: - """Change the Synapse configuration to disable password config. - - Args: - current_yaml: current configuration. - """ - current_yaml["password_config"] = {"enabled": False} - - -def disable_room_list_search(current_yaml: dict) -> None: - """Change the Synapse configuration to disable room_list_search. - - Args: - current_yaml: current configuration. - """ - current_yaml["enable_room_list_search"] = False - - -def enable_serve_server_wellknown(current_yaml: dict) -> None: - """Change the Synapse configuration to enable server wellknown file. - - Args: - current_yaml: current configuration. - """ - current_yaml["serve_server_wellknown"] = True - - -def enable_instance_map(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to instance_map config. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - """ - current_yaml["instance_map"] = charm_state.instance_map_config - - -def enable_stream_writers(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to stream_writers config. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - """ - persisters = [] - if charm_state.instance_map_config is not None: - persisters = [key for key in charm_state.instance_map_config.keys() if key != "main"] - persisters.sort() - if persisters is not None: - current_yaml["stream_writers"] = {"events": persisters} - else: - logger.error("Enable stream writers called but no persisters found. Verify peer relation.") - - -def enable_federation_domain_whitelist(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to enable federation_domain_whitelist. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - - Raises: - WorkloadError: something went wrong enabling configuration. - """ - try: - federation_domain_whitelist = charm_state.synapse_config.federation_domain_whitelist - if federation_domain_whitelist is not None: - current_yaml["federation_domain_whitelist"] = _create_tuple_from_string_list( - federation_domain_whitelist - ) - except KeyError as exc: - raise WorkloadError(str(exc)) from exc - +def generate_nginx_config(container: ops.Container, main_unit_address: str) -> None: + """Generate NGINX configuration based on templates. -def enable_trusted_key_servers(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to set trusted_key_servers. + 1. Copy template files as configuration files to be used. + 2. Run sed command to replace string main-unit in configuration files. + 3. Reload NGINX. Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - - Raises: - WorkloadError: something went wrong enabling configuration. + container: Container of the charm. + main_unit_address: Main unit address to be used in configuration. """ - try: - trusted_key_servers = charm_state.synapse_config.trusted_key_servers - if trusted_key_servers is not None: - current_yaml["trusted_key_servers"] = tuple( - {"server_name": f"{item}"} - for item in _create_tuple_from_string_list(trusted_key_servers) - ) - except KeyError as exc: - raise WorkloadError(str(exc)) from exc - + file_loader = FileSystemLoader(Path("./templates"), followlinks=True) + env = Environment(loader=file_loader, autoescape=True) -def enable_allow_public_rooms_over_federation(current_yaml: dict) -> None: - """Change the Synapse configuration to allow public rooms in federation. + # List of templates and their corresponding output files + templates = [ + ("main_location.conf.j2", "main_location.conf"), + ("abuse_report_location.conf.j2", "abuse_report_location.conf"), + ] - Args: - current_yaml: current configuration. - """ - current_yaml["allow_public_rooms_over_federation"] = True + for template_name, output_file in templates: + template = env.get_template(template_name) + output = template.render(main_unit_address=main_unit_address) + container.push(f"/etc/nginx/{output_file}", output, make_dirs=True) -def _create_tuple_from_string_list(string_list: str) -> tuple[str, ...]: - """Format IP range whitelist. +def generate_worker_config(unit_number: str, is_main: bool) -> dict: + """Generate worker configuration. Args: - string_list: comma separated list configuration. + unit_number: Unit number to be used in the worker_name field. + is_main: if unit is main. Returns: - Tuple as expected by Synapse. - """ - return tuple(item.strip() for item in string_list.split(",")) - - -def enable_ip_range_whitelist(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to enable ip_range_whitelist. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - - Raises: - WorkloadError: something went wrong enabling configuration. + Worker configuration. """ - try: - ip_range_whitelist = charm_state.synapse_config.ip_range_whitelist - if ip_range_whitelist is None: - logger.warning("enable_ip_range_whitelist called but config is empty") - return - current_yaml["ip_range_whitelist"] = _create_tuple_from_string_list(ip_range_whitelist) - except KeyError as exc: - raise WorkloadError(str(exc)) from exc + worker_listeners = [ + { + "type": "http", + "bind_addresses": ["::"], + "port": 8034, + "resources": [{"names": ["replication"]}], + } + ] + if not is_main: + worker_listeners.extend( + [ + { + "type": "http", + "bind_addresses": ["::"], + "port": 8008, + "x_forwarded": True, + "resources": [{"names": ["client", "federation"]}], + }, + { + "type": "metrics", + "bind_addresses": ["::"], + "port": int(SYNAPSE_EXPORTER_PORT), + }, + ] + ) + worker_config = { + "worker_app": "synapse.app.generic_worker", + "worker_name": "federationsender1" if is_main else f"worker{unit_number}", + "worker_listeners": worker_listeners, + "worker_log_config": "/data/log.config", + } + return worker_config def _get_mjolnir_config(access_token: str, room_id: str) -> typing.Dict: - """Create config as expected by mjolnir. + """Get config as expected by mjolnir. Args: access_token: access token to be used by the mjolnir bot. @@ -510,8 +388,8 @@ def _get_mjolnir_config(access_token: str, room_id: str) -> typing.Dict: return config -def create_mjolnir_config(container: ops.Container, access_token: str, room_id: str) -> None: - """Create mjolnir configuration. +def generate_mjolnir_config(container: ops.Container, access_token: str, room_id: str) -> None: + """Generate mjolnir configuration. Args: container: Container of the charm. @@ -528,446 +406,14 @@ def create_mjolnir_config(container: ops.Container, access_token: str, room_id: raise CreateMjolnirConfigError(str(exc)) from exc -def _get_irc_bridge_config(charm_state: CharmState, db_connect_string: str) -> typing.Dict: - """Create config as expected by irc bridge. - - Args: - charm_state: Instance of CharmState. - db_connect_string: database connection string. - - Returns: - IRC Bridge configuration - """ - irc_config_file = Path("templates/irc_bridge_production.yaml").read_text(encoding="utf-8") - config = yaml.safe_load(irc_config_file) - config["homeserver"]["url"] = f"https://{charm_state.synapse_config.server_name}" - config["homeserver"]["domain"] = charm_state.synapse_config.server_name - config["database"]["connectionString"] = db_connect_string - if charm_state.synapse_config.irc_bridge_admins: - config["ircService"]["permissions"] = {} - for admin in charm_state.synapse_config.irc_bridge_admins: - config["ircService"]["permissions"][admin] = "admin" - if charm_state.synapse_config.enable_irc_ident: - config["ircService"]["ident"]["enabled"] = charm_state.synapse_config.enable_irc_ident - return config - - -def create_irc_bridge_config( - container: ops.Container, charm_state: CharmState, db_connect_string: str -) -> None: - """Create irc bridge configuration. +def create_registration_secrets_files(container: ops.Container, charm_state: CharmState) -> None: + """Create registration secrets files. Args: container: Container of the charm. charm_state: Instance of CharmState. - db_connect_string: database connection string. - - Raises: - CreateIRCBridgeConfigError: something went wrong creating irc bridge config. """ - try: - config = _get_irc_bridge_config( - charm_state=charm_state, db_connect_string=db_connect_string - ) - container.push(IRC_BRIDGE_CONFIG_PATH, yaml.safe_dump(config), make_dirs=True) - except ops.pebble.PathError as exc: - raise CreateIRCBridgeConfigError(str(exc)) from exc - - -def _get_irc_bridge_app_registration(container: ops.Container) -> None: # pragma: no cover - # the functionality is tested already in unit tests creating files - """Create registration file as expected by irc bridge. - - Args: - container: Container of the charm. - - Raises: - WorkloadError: something went wrong creating irc bridge registration. - """ - registration_result = _exec( - container, - [ - "/bin/bash", - "-c", - f"[[ -f {IRC_BRIDGE_REGISTRATION_PATH} ]] || " - f"/bin/node /app/app.js -r -f {IRC_BRIDGE_REGISTRATION_PATH} " - f"-u http://localhost:{IRC_BRIDGE_HEALTH_PORT} " - f"-c {IRC_BRIDGE_CONFIG_PATH} -l {IRC_BRIDGE_BOT_NAME}", - ], - ) - if registration_result.exit_code: - logger.error( - "creating irc app registration failed, stdout: %s, stderr: %s", - registration_result.stdout, - registration_result.stderr, - ) - raise WorkloadError("Creating irc app registration failed, please check the logs") - - -def create_irc_bridge_app_registration(container: ops.Container) -> None: # pragma: no cover - # the functionality is tested already in unit tests creating files - """Create irc bridge app registration. - - Args: - container: Container of the charm. - - Raises: - CreateIRCBridgeRegistrationError: error creating irc bridge app registration. - """ - try: - _get_irc_bridge_app_registration(container=container) - except ops.pebble.PathError as exc: - raise CreateIRCBridgeRegistrationError(str(exc)) from exc - - -def add_app_service_config_field(current_yaml: dict) -> None: - """Add app_service_config_files to the Synapse configuration. - - Args: - current_yaml: current configuration. - """ - current_yaml["app_service_config_files"] = [IRC_BRIDGE_REGISTRATION_PATH] - - -def _create_pysaml2_config(charm_state: CharmState) -> typing.Dict: - """Create config as expected by pysaml2. - - Args: - charm_state: Instance of CharmState. - - Returns: - Pysaml2 configuration. - - Raises: - EnableSAMLError: if SAML configuration is not found. - """ - if charm_state.saml_config is None: - raise EnableSAMLError( - "SAML Configuration not found. " - "Please verify the integration between SAML Integrator and Synapse." - ) - - saml_config = charm_state.saml_config - entity_id = ( - charm_state.synapse_config.public_baseurl - if charm_state.synapse_config.public_baseurl is not None - else f"https://{charm_state.synapse_config.server_name}" - ) - sp_config = { - "metadata": { - "remote": [ - { - "url": saml_config["metadata_url"], - }, - ], - }, - "allow_unknown_attributes": True, - "service": { - "sp": { - "entityId": entity_id, - "allow_unsolicited": True, - }, - }, - } - # login.staging.ubuntu.com and login.ubuntu.com - # dont send uid in SAMLResponse so this will map - # as expected - if "ubuntu.com" in saml_config["metadata_url"]: - sp_config["attribute_map_dir"] = "/usr/local/attributemaps" - - return sp_config - - -def enable_saml(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to enable SAML. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - - Raises: - EnableSAMLError: something went wrong enabling SAML. - """ - try: - if charm_state.synapse_config.public_baseurl is not None: - current_yaml["public_baseurl"] = charm_state.synapse_config.public_baseurl - # enable x_forwarded to pass expected headers - current_listeners = current_yaml["listeners"] - updated_listeners = [ - { - **item, - "x_forwarded": ( - True - if "x_forwarded" in item and not item["x_forwarded"] - else item.get("x_forwarded", False) - ), - } - for item in current_listeners - ] - current_yaml["listeners"] = updated_listeners - current_yaml["saml2_enabled"] = True - current_yaml["saml2_config"] = {} - current_yaml["saml2_config"]["sp_config"] = _create_pysaml2_config(charm_state) - user_mapping_provider_config = { - "config": { - "mxid_source_attribute": "uid", - "grandfathered_mxid_source_attribute": "uid", - "mxid_mapping": "dotreplace", - }, - } - current_yaml["saml2_config"]["user_mapping_provider"] = user_mapping_provider_config - except KeyError as exc: - raise EnableSAMLError(str(exc)) from exc - - -def enable_smtp(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to enable SMTP. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - - Raises: - EnableSMTPError: something went wrong enabling SMTP. - """ - try: - current_yaml["email"] = {} - current_yaml["email"]["enable_notifs"] = charm_state.synapse_config.enable_email_notifs - current_yaml["email"]["notif_from"] = charm_state.synapse_config.notif_from - - if charm_state.smtp_config is None: - raise EnableSMTPError( - "SMTP Configuration not found. " - "Please verify the integration between SMTP Integrator and Synapse." - ) - - smtp_config = charm_state.smtp_config - current_yaml["email"]["smtp_host"] = smtp_config["host"] - current_yaml["email"]["smtp_port"] = smtp_config["port"] - if charm_state.smtp_config["user"] is not None: - current_yaml["email"]["smtp_user"] = smtp_config["user"] - if charm_state.smtp_config["password"] is not None: - current_yaml["email"]["smtp_pass"] = smtp_config["password"] - current_yaml["email"]["enable_tls"] = smtp_config["enable_tls"] - current_yaml["email"]["force_tls"] = smtp_config["force_tls"] - current_yaml["email"]["require_transport_security"] = smtp_config[ - "require_transport_security" - ] - except KeyError as exc: - raise EnableSMTPError(str(exc)) from exc - - -def enable_media(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to enable S3. - - Args: - current_yaml: Current Configuration. - charm_state: Instance of CharmState. - - Raises: - WorkloadError: something went wrong enabling S3. - """ - try: - if charm_state.media_config is None: - raise WorkloadError( - "Media Configuration not found. " - "Please verify the integration between Media and Synapse." - ) - current_yaml["media_storage_providers"] = [ - { - "module": "s3_storage_provider.S3StorageProviderBackend", - "store_local": True, - "store_remote": True, - "store_synchronous": True, - "config": { - "bucket": charm_state.media_config["bucket"], - "region_name": charm_state.media_config["region_name"], - "endpoint_url": charm_state.media_config["endpoint_url"], - "access_key_id": charm_state.media_config["access_key_id"], - "secret_access_key": charm_state.media_config["secret_access_key"], - "prefix": charm_state.media_config["prefix"], - }, - }, - ] - except KeyError as exc: - raise WorkloadError(str(exc)) from exc - - -def enable_redis(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to enable Redis. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - - Raises: - WorkloadError: something went wrong enabling SMTP. - """ - try: - current_yaml["redis"] = {} - - if charm_state.redis_config is None: - raise WorkloadError( - "Redis Configuration not found. " - "Please verify the integration between Redis and Synapse." - ) - - redis_config = charm_state.redis_config - current_yaml["redis"]["enabled"] = True - current_yaml["redis"]["host"] = redis_config["host"] - current_yaml["redis"]["port"] = redis_config["port"] - except KeyError as exc: - raise WorkloadError(str(exc)) from exc - - -def enable_room_list_publication_rules(current_yaml: dict, charm_state: CharmState) -> None: - """Change the Synapse configuration to enable room_list_publication_rules. - - This configuration is based on publish_rooms_allowlist charm configuration. - Once is set, a deny rule is added to prevent any other user to publish rooms. - - Args: - current_yaml: current configuration. - charm_state: Instance of CharmState. - - Raises: - WorkloadError: something went wrong enabling room_list_publication_rules. - """ - room_list_publication_rules = [] - # checking publish_rooms_allowlist to fix union-attr mypy error - publish_rooms_allowlist = charm_state.synapse_config.publish_rooms_allowlist - if publish_rooms_allowlist: - for user in publish_rooms_allowlist: - rule = {"user_id": user, "alias": "*", "room_id": "*", "action": "allow"} - room_list_publication_rules.append(rule) - - if len(room_list_publication_rules) == 0: - raise WorkloadError("publish_rooms_allowlist has unexpected value. Please, verify it.") - - last_rule = {"user_id": "*", "alias": "*", "room_id": "*", "action": "deny"} - room_list_publication_rules.append(last_rule) - current_yaml["room_list_publication_rules"] = room_list_publication_rules - - -def reset_instance(container: ops.Container) -> None: - """Erase data and config server_name. - - Args: - container: Container of the charm. - - Raises: - PathError: if somethings goes wrong while erasing the Synapse directory. - """ - logging.debug("Erasing directory %s", SYNAPSE_CONFIG_DIR) - try: - container.remove_path(SYNAPSE_CONFIG_DIR, recursive=True) - except PathError as path_error: - # The error "unlinkat //data: device or resource busy" is expected - # when removing the entire directory because it's a volume mount. - # The files will be removed but SYNAPSE_CONFIG_DIR directory will - # remain. - if "device or resource busy" in str(path_error): - pass - else: - logger.exception( - "exception while erasing directory %s: %r", SYNAPSE_CONFIG_DIR, path_error - ) - raise - - -def get_environment(charm_state: CharmState) -> typing.Dict[str, str]: - """Generate a environment dictionary from the charm configurations. - - Args: - charm_state: Instance of CharmState. - - Returns: - A dictionary representing the Synapse environment variables. - """ - environment = { - "SYNAPSE_CONFIG_DIR": SYNAPSE_CONFIG_DIR, - "SYNAPSE_CONFIG_PATH": SYNAPSE_CONFIG_PATH, - "SYNAPSE_DATA_DIR": SYNAPSE_DATA_DIR, - "SYNAPSE_REPORT_STATS": f"{charm_state.synapse_config.report_stats}", - "SYNAPSE_SERVER_NAME": f"{charm_state.synapse_config.server_name}", - # TLS disabled so the listener is HTTP. HTTPS will be handled by Traefik. - # TODO verify support to HTTPS backend before changing this # pylint: disable=fixme - "SYNAPSE_NO_TLS": str(True), - "LD_PRELOAD": "/usr/lib/x86_64-linux-gnu/libjemalloc.so.2", - } - datasource = charm_state.datasource - if datasource is not None: - environment["POSTGRES_DB"] = datasource["db"] - environment["POSTGRES_HOST"] = datasource["host"] - environment["POSTGRES_PORT"] = datasource["port"] - environment["POSTGRES_USER"] = datasource["user"] - environment["POSTGRES_PASSWORD"] = datasource["password"] - for proxy_variable in ("http_proxy", "https_proxy", "no_proxy"): - proxy_value = getattr(charm_state.proxy, proxy_variable) - if proxy_value: - environment[proxy_variable] = str(proxy_value) - environment[proxy_variable.upper()] = str(proxy_value) - return environment - - -def generate_nginx_config(container: ops.Container, main_unit_address: str) -> None: - """Generate NGINX configuration based on templates. - - 1. Copy template files as configuration files to be used. - 2. Run sed command to replace string main-unit in configuration files. - 3. Reload NGINX. - - Args: - container: Container of the charm. - main_unit_address: Main unit address to be used in configuration. - """ - file_loader = FileSystemLoader(Path("./templates"), followlinks=True) - env = Environment(loader=file_loader, autoescape=True) - - # List of templates and their corresponding output files - templates = [ - ("main_location.conf.j2", "main_location.conf"), - ("abuse_report_location.conf.j2", "abuse_report_location.conf"), - ] - - for template_name, output_file in templates: - template = env.get_template(template_name) - output = template.render(main_unit_address=main_unit_address) - container.push(f"/etc/nginx/{output_file}", output, make_dirs=True) - - -def generate_worker_config(unit_number: str) -> dict: - """Generate worker configuration. - - Args: - unit_number: Unit number to be used in the worker_name field. - - Returns: - Worker configuration. - """ - worker_config = { - "worker_app": "synapse.app.generic_worker", - "worker_name": f"worker{unit_number}", - "worker_listeners": [ - { - "type": "http", - "bind_addresses": ["::"], - "port": 8008, - "x_forwarded": True, - "resources": [{"names": ["client", "federation"]}], - }, - { - "type": "http", - "bind_addresses": ["::"], - "port": 8034, - "resources": [{"names": ["replication"]}], - }, - { - "type": "metrics", - "bind_addresses": ["::"], - "port": PROMETHEUS_WORKER_TARGET_PORT, - }, - ], - "worker_log_config": "/data/log.config", - } - return worker_config + container.exec(["rm", "-f", f"{SYNAPSE_CONFIG_DIR}/appservice-registration-*.yaml"]) + if charm_state.registration_secrets: + for registration_secret in charm_state.registration_secrets: + registration_secret.file_path.write_text(registration_secret.value, encoding="utf-8") diff --git a/src/synapse/workload_configuration.py b/src/synapse/workload_configuration.py new file mode 100644 index 00000000..9e61381e --- /dev/null +++ b/src/synapse/workload_configuration.py @@ -0,0 +1,556 @@ +#!/usr/bin/env python3 + +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Helper module used to manage interactions with Synapse homeserver configuration.""" + +import logging +import typing + +from charm_state import CharmState + +from .workload import ( + SYNAPSE_EXPORTER_PORT, + EnableMetricsError, + EnableSAMLError, + EnableSMTPError, + WorkloadError, +) + +logger = logging.getLogger(__name__) + + +def _create_tuple_from_string_list(string_list: str) -> tuple[str, ...]: + """Format IP range whitelist. + + Args: + string_list: comma separated list configuration. + + Returns: + Tuple as expected by Synapse. + """ + return tuple(item.strip() for item in string_list.split(",")) + + +def set_public_baseurl(current_yaml: dict, charm_state: CharmState) -> None: + """Set the homeserver's public address. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + """ + current_yaml["public_baseurl"] = charm_state.synapse_config.public_baseurl + + +def disable_password_config(current_yaml: dict) -> None: + """Change the Synapse configuration to disable password config. + + Args: + current_yaml: current configuration. + """ + current_yaml["password_config"] = {"enabled": False} + + +def disable_room_list_search(current_yaml: dict) -> None: + """Change the Synapse configuration to disable room_list_search. + + Args: + current_yaml: current configuration. + """ + current_yaml["enable_room_list_search"] = False + + +def block_non_admin_invites(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to block non admin room invitations. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + """ + current_yaml["block_non_admin_invites"] = charm_state.synapse_config.block_non_admin_invites + + +def enable_allow_public_rooms_over_federation(current_yaml: dict) -> None: + """Change the Synapse configuration to allow public rooms in federation. + + Args: + current_yaml: current configuration. + """ + current_yaml["allow_public_rooms_over_federation"] = True + + +def enable_federation_domain_whitelist(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable federation_domain_whitelist. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling configuration. + """ + try: + federation_domain_whitelist = charm_state.synapse_config.federation_domain_whitelist + if federation_domain_whitelist is not None: + current_yaml["federation_domain_whitelist"] = _create_tuple_from_string_list( + federation_domain_whitelist + ) + except KeyError as exc: + raise WorkloadError(str(exc)) from exc + + +def enable_federation_sender(current_yaml: dict) -> None: + """Change the Synapse configuration to federation sender config. + + Args: + current_yaml: current configuration. + """ + current_yaml["send_federation"] = True + current_yaml["federation_sender_instances"] = ["federationsender1"] + + +def enable_forgotten_room_retention(current_yaml: dict) -> None: + """Change the Synapse configuration to enable forgotten_room_retention_period. + + Args: + current_yaml: current configuration. + """ + current_yaml["forgotten_room_retention_period"] = "28d" + + +def enable_instance_map(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to instance_map config. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + """ + current_yaml["instance_map"] = charm_state.instance_map_config + + +def enable_ip_range_whitelist(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable ip_range_whitelist. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling configuration. + """ + try: + ip_range_whitelist = charm_state.synapse_config.ip_range_whitelist + if ip_range_whitelist is None: + logger.warning("enable_ip_range_whitelist called but config is empty") + return + current_yaml["ip_range_whitelist"] = _create_tuple_from_string_list(ip_range_whitelist) + except KeyError as exc: + raise WorkloadError(str(exc)) from exc + + +def enable_limit_remote_rooms_complexity(current_yaml: dict, charm_state: CharmState) -> None: + """Enable limit_remote_rooms complexity. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + """ + limit_remote_rooms = { + "enabled": True, + "complexity": charm_state.synapse_config.limit_remote_rooms_complexity, + } + current_yaml["limit_remote_rooms"] = limit_remote_rooms + + +def enable_media(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable S3. + + Args: + current_yaml: Current Configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling S3. + """ + try: + if charm_state.media_config is None: + raise WorkloadError( + "Media Configuration not found. " + "Please verify the integration between Media and Synapse." + ) + current_yaml["media_storage_providers"] = [ + { + "module": "s3_storage_provider.S3StorageProviderBackend", + "store_local": True, + "store_remote": True, + "store_synchronous": True, + "config": { + "bucket": charm_state.media_config["bucket"], + "region_name": charm_state.media_config["region_name"], + "endpoint_url": charm_state.media_config["endpoint_url"], + "access_key_id": charm_state.media_config["access_key_id"], + "secret_access_key": charm_state.media_config["secret_access_key"], + "prefix": charm_state.media_config["prefix"], + }, + }, + ] + except KeyError as exc: + raise WorkloadError(str(exc)) from exc + + +def enable_media_retention(current_yaml: dict) -> None: + """Change the Synapse configuration to enable media retention. + + Args: + current_yaml: current configuration. + """ + current_yaml["media_retention"] = { + "remote_media_lifetime": "14d", + "local_media_lifetime": "28d", + } + + +def enable_metrics(current_yaml: dict) -> None: + """Change the Synapse configuration to enable metrics. + + Args: + current_yaml: current configuration. + + Raises: + EnableMetricsError: something went wrong enabling metrics. + """ + try: + metric_listener = { + "port": int(SYNAPSE_EXPORTER_PORT), + "type": "metrics", + "bind_addresses": ["::"], + } + current_yaml["listeners"].extend([metric_listener]) + current_yaml["enable_metrics"] = True + except KeyError as exc: + raise EnableMetricsError(str(exc)) from exc + + +def enable_rc_joins_remote_rate(current_yaml: dict, charm_state: CharmState) -> None: + """Enable rc_joins remote rate. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + """ + rc_joins = { + "remote": { + "per_second": charm_state.synapse_config.rc_joins_remote_per_second, + "burst_count": charm_state.synapse_config.rc_joins_remote_burst_count, + } + } + current_yaml["rc_joins"] = rc_joins + + +def enable_redis(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable Redis. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling Redis. + """ + try: + current_yaml["redis"] = {} + + if charm_state.redis_config is None: + raise WorkloadError( + "Redis Configuration not found. " + "Please verify the integration between Redis and Synapse." + ) + + redis_config = charm_state.redis_config + current_yaml["redis"]["enabled"] = True + current_yaml["redis"]["host"] = redis_config["host"] + current_yaml["redis"]["port"] = redis_config["port"] + except KeyError as exc: + raise WorkloadError(str(exc)) from exc + + +def enable_registration_secrets(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable registration secrets. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling registration secrets. + """ + try: + if charm_state.registration_secrets is None: + return + current_yaml["app_service_config_files"] = [ + str(registration_secret.file_path) + for registration_secret in charm_state.registration_secrets + ] + except KeyError as exc: + raise WorkloadError(str(exc)) from exc + + +def enable_replication(current_yaml: dict) -> None: + """Change the Synapse configuration to enable replication. + + Args: + current_yaml: current configuration. + + Raises: + WorkloadError: something went wrong enabling replication. + """ + try: + resources = {"names": ["replication"]} + replication_listener = { + "port": 8035, + "type": "http", + "bind_addresses": ["::"], + "resources": [resources], + } + current_yaml["listeners"].extend([replication_listener]) + except KeyError as exc: + raise WorkloadError(str(exc)) from exc + + +def enable_room_list_publication_rules(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable room_list_publication_rules. + + This configuration is based on publish_rooms_allowlist charm configuration. + Once is set, a deny rule is added to prevent any other user to publish rooms. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling room_list_publication_rules. + """ + room_list_publication_rules = [] + # checking publish_rooms_allowlist to fix union-attr mypy error + publish_rooms_allowlist = charm_state.synapse_config.publish_rooms_allowlist + if publish_rooms_allowlist: + for user in publish_rooms_allowlist: + rule = {"user_id": user, "alias": "*", "room_id": "*", "action": "allow"} + room_list_publication_rules.append(rule) + + if len(room_list_publication_rules) == 0: + raise WorkloadError("publish_rooms_allowlist has unexpected value. Please, verify it.") + + last_rule = {"user_id": "*", "alias": "*", "room_id": "*", "action": "deny"} + room_list_publication_rules.append(last_rule) + current_yaml["room_list_publication_rules"] = room_list_publication_rules + + +def enable_synapse_invite_checker(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable synapse_invite_checker. + + Args: + current_yaml: Current Configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling synapse_invite_checker. + """ + try: + if "modules" not in current_yaml: + current_yaml["modules"] = [] + config = {} + if charm_state.synapse_config.invite_checker_blocklist_allowlist_url: + config["blocklist_allowlist_url"] = ( + charm_state.synapse_config.invite_checker_blocklist_allowlist_url + ) + if charm_state.synapse_config.invite_checker_policy_rooms: + config["policy_room_ids"] = charm_state.synapse_config.invite_checker_policy_rooms + current_yaml["modules"].append( + {"module": "synapse_invite_checker.InviteChecker", "config": config}, + ) + except KeyError as exc: + raise WorkloadError(str(exc)) from exc + + +def _create_pysaml2_config(charm_state: CharmState) -> typing.Dict: + """Create config as expected by pysaml2. + + Args: + charm_state: Instance of CharmState. + + Returns: + Pysaml2 configuration. + + Raises: + EnableSAMLError: if SAML configuration is not found. + """ + if charm_state.saml_config is None: + raise EnableSAMLError( + "SAML Configuration not found. " + "Please verify the integration between SAML Integrator and Synapse." + ) + + saml_config = charm_state.saml_config + entity_id = charm_state.synapse_config.public_baseurl + sp_config = { + "metadata": { + "remote": [ + { + "url": saml_config["metadata_url"], + }, + ], + }, + "allow_unknown_attributes": True, + "service": { + "sp": { + "entityId": entity_id, + "allow_unsolicited": True, + }, + }, + } + # login.staging.ubuntu.com and login.ubuntu.com + # dont send uid in SAMLResponse so this will map + # as expected + if "ubuntu.com" in saml_config["metadata_url"]: + sp_config["attribute_map_dir"] = "/usr/local/attributemaps" + + return sp_config + + +def enable_saml(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable SAML. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + EnableSAMLError: something went wrong enabling SAML. + """ + try: + # enable x_forwarded to pass expected headers + current_listeners = current_yaml["listeners"] + updated_listeners = [ + { + **item, + "x_forwarded": ( + True + if "x_forwarded" in item and not item["x_forwarded"] + else item.get("x_forwarded", False) + ), + } + for item in current_listeners + ] + current_yaml["listeners"] = updated_listeners + current_yaml["saml2_enabled"] = True + current_yaml["saml2_config"] = {} + current_yaml["saml2_config"]["sp_config"] = _create_pysaml2_config(charm_state) + user_mapping_provider_config = { + "config": { + "mxid_source_attribute": "uid", + "grandfathered_mxid_source_attribute": "uid", + "mxid_mapping": "dotreplace", + }, + } + current_yaml["saml2_config"]["user_mapping_provider"] = user_mapping_provider_config + except KeyError as exc: + raise EnableSAMLError(str(exc)) from exc + + +def enable_serve_server_wellknown(current_yaml: dict) -> None: + """Change the Synapse configuration to enable server wellknown file. + + Args: + current_yaml: current configuration. + """ + current_yaml["serve_server_wellknown"] = True + + +def enable_smtp(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to enable SMTP. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + EnableSMTPError: something went wrong enabling SMTP. + """ + try: + current_yaml["email"] = {} + current_yaml["email"]["enable_notifs"] = charm_state.synapse_config.enable_email_notifs + current_yaml["email"]["notif_from"] = charm_state.synapse_config.notif_from + + if charm_state.smtp_config is None: + raise EnableSMTPError( + "SMTP Configuration not found. " + "Please verify the integration between SMTP Integrator and Synapse." + ) + + smtp_config = charm_state.smtp_config + current_yaml["email"]["smtp_host"] = smtp_config["host"] + current_yaml["email"]["smtp_port"] = smtp_config["port"] + if charm_state.smtp_config["user"] is not None: + current_yaml["email"]["smtp_user"] = smtp_config["user"] + if charm_state.smtp_config["password"] is not None: + current_yaml["email"]["smtp_pass"] = smtp_config["password"] + current_yaml["email"]["enable_tls"] = smtp_config["enable_tls"] + current_yaml["email"]["force_tls"] = smtp_config["force_tls"] + current_yaml["email"]["require_transport_security"] = smtp_config[ + "require_transport_security" + ] + except KeyError as exc: + raise EnableSMTPError(str(exc)) from exc + + +def enable_stale_devices_deletion(current_yaml: dict) -> None: + """Change the Synapse configuration to delete stale devices. + + Args: + current_yaml: current configuration. + """ + current_yaml["delete_stale_devices_after"] = "1y" + + +def enable_stream_writers(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to stream_writers config. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + """ + persisters = [] + if charm_state.instance_map_config is not None: + persisters = [ + key + for key in charm_state.instance_map_config.keys() + if key not in ["main", "federationsender1"] + ] + persisters.sort() + if persisters is not None: + current_yaml["stream_writers"] = {"events": persisters} + else: + logger.error("Enable stream writers called but no persisters found. Verify peer relation.") + + +def enable_trusted_key_servers(current_yaml: dict, charm_state: CharmState) -> None: + """Change the Synapse configuration to set trusted_key_servers. + + Args: + current_yaml: current configuration. + charm_state: Instance of CharmState. + + Raises: + WorkloadError: something went wrong enabling configuration. + """ + try: + trusted_key_servers = charm_state.synapse_config.trusted_key_servers + if trusted_key_servers is not None: + current_yaml["trusted_key_servers"] = tuple( + {"server_name": f"{item}"} + for item in _create_tuple_from_string_list(trusted_key_servers) + ) + except KeyError as exc: + raise WorkloadError(str(exc)) from exc diff --git a/src/user.py b/src/user.py index b2ff06b6..bb678644 100644 --- a/src/user.py +++ b/src/user.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """User class.""" @@ -10,7 +10,11 @@ import string # pydantic is causing this no-name-in-module problem -from pydantic import BaseModel, Field, validator # pylint: disable=no-name-in-module,import-error +from pydantic.v1 import ( # pylint: disable=no-name-in-module,import-error + BaseModel, + Field, + validator, +) logger = logging.getLogger(__name__) diff --git a/synapse_rock/attributemaps/login_ubuntu.py b/synapse_rock/attributemaps/login_ubuntu.py index 012798e7..76eaa304 100644 --- a/synapse_rock/attributemaps/login_ubuntu.py +++ b/synapse_rock/attributemaps/login_ubuntu.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. MAP = { diff --git a/synapse_rock/cron/cron.weekly/remote_content_empty_directory_cleanup.py b/synapse_rock/cron/cron.weekly/remote_content_empty_directory_cleanup.py index 9952627a..a3780885 100644 --- a/synapse_rock/cron/cron.weekly/remote_content_empty_directory_cleanup.py +++ b/synapse_rock/cron/cron.weekly/remote_content_empty_directory_cleanup.py @@ -1,5 +1,5 @@ #!/usr/bin/python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """ @@ -9,8 +9,8 @@ Related: https://github.com/matrix-org/synapse/issues/7690 """ -import os import json +import os import time # We assume that pyyaml is present thanks to synapse diff --git a/nginx_rock/etc/abuse_report_location.conf b/synapse_rock/etc/abuse_report_location.conf similarity index 100% rename from nginx_rock/etc/abuse_report_location.conf rename to synapse_rock/etc/abuse_report_location.conf diff --git a/nginx_rock/etc/main_location.conf b/synapse_rock/etc/main_location.conf similarity index 100% rename from nginx_rock/etc/main_location.conf rename to synapse_rock/etc/main_location.conf diff --git a/nginx_rock/etc/nginx.conf b/synapse_rock/etc/nginx.conf similarity index 100% rename from nginx_rock/etc/nginx.conf rename to synapse_rock/etc/nginx.conf diff --git a/nginx_rock/etc/worker_location.conf b/synapse_rock/etc/worker_location.conf similarity index 100% rename from nginx_rock/etc/worker_location.conf rename to synapse_rock/etc/worker_location.conf diff --git a/synapse_rock/rockcraft.yaml b/synapse_rock/rockcraft.yaml index ed2eb7eb..1773fbf3 100644 --- a/synapse_rock/rockcraft.yaml +++ b/synapse_rock/rockcraft.yaml @@ -1,14 +1,12 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. name: synapse summary: Synapse rock description: Synapse OCI image for the Synapse charm -version: "1.0" -base: ubuntu@22.04 -# renovate: base: ubuntu:22.04@sha256:19478ce7fc2ffbce89df29fea5725a8d12e57de52eb9ea570890dc5852aac1ac -build-base: ubuntu@22.04 -# renovate: build-base: ubuntu:22.04@sha256:19478ce7fc2ffbce89df29fea5725a8d12e57de52eb9ea570890dc5852aac1ac +version: "3.0" +base: ubuntu@24.04 +build-base: ubuntu@24.04 license: Apache-2.0 platforms: amd64: @@ -41,6 +39,31 @@ parts: overlay-script: | groupadd -R $CRAFT_OVERLAY --gid 991 synapse useradd -R $CRAFT_OVERLAY --system --gid 991 --uid 991 --home /srv/synapse -m synapse + chmod 755 $CRAFT_OVERLAY/etc + groupadd -R $CRAFT_OVERLAY --gid 2000 nginx + useradd -R $CRAFT_OVERLAY --system --gid 2000 --uid 2000 --no-create-home nginx + nginx-conf: + plugin: dump + source: etc + organize: + nginx.conf: etc/nginx/nginx.conf + worker_location.conf: etc/nginx/worker_location.conf + abuse_report_location.conf.template: etc/nginx/abuse_report_location.conf.template + abuse_report_location.conf: etc/nginx/abuse_report_location.conf + main_location.conf.template: etc/nginx/main_location.conf.template + main_location.conf: etc/nginx/main_location.conf + nginx: + stage-packages: + - logrotate + - nginx + - sed + plugin: nil + override-build: | + craftctl default + rm $CRAFT_PART_INSTALL/etc/nginx/nginx.conf + override-prime: | + craftctl default + mkdir run synapse: build-packages: - build-essential @@ -59,16 +82,16 @@ parts: - python3-dev - python3-pip - python3-setuptools + - python3-venv - zlib1g-dev stage-packages: - - bash - coreutils - curl - gosu - gpg - gpg-agent - libffi-dev - - libicu70 + - libicu74 - libjemalloc2 - libjpeg-turbo8 - libpq5 @@ -76,15 +99,15 @@ parts: - libwebp7 - openssl - python3 + - python3-pip - xmlsec1 stage-snaps: - aws-cli - mjolnir/latest/edge - - matrix-appservice-irc/latest/edge plugin: nil source: https://github.com/element-hq/synapse/ source-type: git - source-tag: v1.101.0 + source-tag: v1.121.1 build-environment: - RUST_VERSION: "1.76.0" - POETRY_VERSION: "1.7.1" @@ -97,30 +120,47 @@ parts: mkdir -p /rust /cargo /synapse /install curl -m 30 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain=$RUST_VERSION --profile minimal /rust/toolchains/$RUST_VERSION-x86_64-unknown-linux-gnu/bin/rustc -V > $CRAFT_PART_INSTALL/rust-version - pip3 install -U pip setuptools - pip3 install --root-user-action=ignore "poetry==$POETRY_VERSION" + # + # install synapse requirements + pip3 install --break-system-packages --root-user-action=ignore "poetry==$POETRY_VERSION" cp pyproject.toml poetry.lock /synapse/ - /usr/local/bin/poetry export --extras all -o /synapse/requirements.txt - pip3 install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt + $CRAFT_PART_INSTALL/usr/local/bin/poetry export --extras all -o /synapse/requirements.txt + pip3 install --break-system-packages --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt + # + # install synapse stats exporter git clone https://github.com/canonical/synapse_stats_exporter.git cd synapse_stats_exporter - pip3 install --prefix="/install" --no-warn-script-location -U . + pip3 --python=/usr/bin/python3 install --break-system-packages --prefix="/install" --no-warn-script-location -U . cd .. + # + # install s3 storage provider git clone --branch v1.4.0 --depth 1 https://github.com/matrix-org/synapse-s3-storage-provider cd synapse-s3-storage-provider - pip3 install --prefix="/install" --no-warn-script-location -U . + pip3 install --break-system-packages --prefix="/install" --no-warn-script-location -U . + cd .. + # + # install synapse invite checker + git clone https://git.buechner.me/nbuechner/synapse-invite-checker + cd synapse-invite-checker + git checkout ab6c8b78d78c4cbf31e1a30981ae45c09285b34a + pip3 install --break-system-packages --prefix="/install" --no-warn-script-location -U . cd .. + # + # install synapse cp -r synapse /synapse/ cp -r rust /synapse/ cp pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/ - pip3 install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; + pip3 install --break-system-packages --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; + # fix issue while creating file + # https://github.com/element-hq/synapse/issues/17882 + pip3 install --break-system-packages --prefix="/install" --force-reinstall -v "Twisted==24.7.0" cp docker/start.py $CRAFT_PART_INSTALL/ chmod 755 $CRAFT_PART_INSTALL/start.py cp -r docker/conf $CRAFT_PART_INSTALL/ - cp -r /usr/local $CRAFT_PART_INSTALL/usr/ cp -r /install/local/* $CRAFT_PART_INSTALL/usr/local/ mkdir -p $CRAFT_PART_INSTALL/usr/local/attributemaps chmod 755 $CRAFT_PART_INSTALL/usr/local/attributemaps + rm -rf $CRAFT_PART_INSTALL/aws/dist/awscli/examples/ overlay-packages: - ca-certificates - libjemalloc2 diff --git a/synapse_rock/scripts/run_cron.py b/synapse_rock/scripts/run_cron.py index a52f9c20..d4751e17 100644 --- a/synapse_rock/scripts/run_cron.py +++ b/synapse_rock/scripts/run_cron.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """ diff --git a/templates/irc_bridge_production.yaml b/templates/irc_bridge_production.yaml deleted file mode 100644 index a4fbc40d..00000000 --- a/templates/irc_bridge_production.yaml +++ /dev/null @@ -1,136 +0,0 @@ -homeserver: - url: "https://ubuntu.com" - domain: "ubuntu.com" - enablePresence: true -ircService: - servers: - irc.eu.libera.chat: - name: "LiberaChat" - onlyAdditionalAddresses: false - networkId: "libera" - port: 6697 - ssl: true - sslselfsign: true - sasl: true - allowExpiredCerts: false - sendConnectionMessages: true - quitDebounce: - enabled: false - quitsPerSecond: 5 - delayMinMs: 3600000 # 1h - delayMaxMs: 7200000 # 2h - modePowerMap: - o: 50 - v: 0 - botConfig: - enabled: true - nick: "UbuntuLiberaBot" - username: "ubunbtuliberabot" - joinChannelsIfNoUsers: true - privateMessages: - enabled: true - federate: true - dynamicChannels: - enabled: true - createAlias: true - published: false - useHomeserverDirectory: true - joinRule: public - federate: true - aliasTemplate: "#libera_$CHANNEL" - membershipLists: - enabled: true - floodDelayMs: 10000 - global: - ircToMatrix: - initial: false - incremental: true - requireMatrixJoined: false - matrixToIrc: - initial: false - incremental: true - ignoreIdleUsersOnStartup: - enabled: false - idleForHours: 720 - exclude: "foobar" - matrixClients: - userTemplate: "@libera_$NICK" - displayName: "$NICK" - joinAttempts: -1 - ircClients: - nickTemplate: "$DISPLAY[m]" - allowNickChanges: true - maxClients: 30 - ipv6: - only: false - idleTimeout: 0 - reconnectIntervalMs: 5000 - concurrentReconnectLimit: 50 - lineLimit: 3 - realnameFormat: "mxid" - kickOn: - channelJoinFailure: true - ircConnectionFailure: true - userQuit: true - bridgeInfoState: - enabled: false - initial: false - ident: - enabled: false - port: 1113 - address: "::" - logging: - level: "debug" - logfile: "debug.log" - errfile: "errors.log" - toConsole: true - maxFiles: 5 - metrics: - enabled: false - port: 7001 - host: 127.0.0.1 - userActivityThresholdHours: 72 # 3 days - remoteUserAgeBuckets: - - "1h" - - "1d" - - "1w" - debugApi: - enabled: false - port: 11100 - provisioning: - enabled: false - widget: false - requestTimeoutSeconds: 300 - rules: - userIds: - exempt: - - "@doubleagent:example.com" - conflict: - - "@.*:example.com" - roomLimit: 50 - http: - port: 7700 - host: 127.0.0.1 - passwordEncryptionKeyPath: "/data/config/irc_passkey.pem" - matrixHandler: - eventCacheSize: 4096 - shortReplyTemplate: "$NICK: $REPLY" - longReplyTemplate: "<$NICK> \"$ORIGINAL\" <- $REPLY" - shortReplyTresholdSeconds: 300 - userActivity: - minUserActiveDays: 1 - inactiveAfterDays: 30 - ircHandler: - mapIrcMentionsToMatrix: "on" # This can be "on", "off", "force-off". - powerLevelGracePeriod: 1000 - perRoomConfig: - enabled: false -advanced: - maxHttpSockets: 1000 - maxTxnSize: 10000000 -sentry: - enabled: false - dsn: "https://@sentry.io/" -database: - engine: "postgres" - connectionString: "" diff --git a/tests/__init__.py b/tests/__init__.py index 289a5245..073ccfce 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Tests module.""" diff --git a/tests/conftest.py b/tests/conftest.py index 74449228..b9ce60be 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Fixtures for Synapse charm tests.""" @@ -6,7 +6,6 @@ from pytest import Parser SYNAPSE_IMAGE_PARAM = "--synapse-image" -SYNAPSE_NGINX_IMAGE_PARAM = "--synapse-nginx-image" def pytest_addoption(parser: Parser) -> None: @@ -16,9 +15,6 @@ def pytest_addoption(parser: Parser) -> None: parser: Pytest parser. """ parser.addoption(SYNAPSE_IMAGE_PARAM, action="store", help="Synapse image to be deployed") - parser.addoption( - SYNAPSE_NGINX_IMAGE_PARAM, action="store", help="Synapse NGINX image to be deployed" - ) parser.addoption("--charm-file", action="store", help="Charm file to be deployed") parser.addoption( "--use-existing", diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e3979c0f..dddb292a 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. diff --git a/tests/integration/any_charm.py b/tests/integration/any_charm.py new file mode 100644 index 00000000..b36bc2ac --- /dev/null +++ b/tests/integration/any_charm.py @@ -0,0 +1,62 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +# pylint: disable=import-error,consider-using-with,no-member,too-few-public-methods + +"""This code should be loaded into any-charm which is used for integration tests.""" + +import logging +import typing + +from any_charm_base import AnyCharmBase +from matrix_auth import MatrixAuthRequirerData, MatrixAuthRequires +from ops.framework import Object +from pydantic import SecretStr + +logger = logging.getLogger(__name__) + + +class AnyCharm(AnyCharmBase): + """Execute a simple charm to test the relation.""" + + def __init__(self, *args, **kwargs): + """Initialize the charm and observe the relation events. + + Args: + args: Arguments to pass to the parent class. + kwargs: Keyword arguments to pass to the parent class + """ + super().__init__(*args, **kwargs) + + self.plugin_auth = MatrixAuthRequires(self, relation_name="require-matrix-auth") + self.framework.observe( + self.plugin_auth.on.matrix_auth_request_processed, + self._on_matrix_auth_request_processed, + ) + + def _on_matrix_auth_request_processed(self, _: Object) -> None: + """Handle the matrix auth request processed event.""" + logger.info("Matrix auth request processed") + content = """id: irc +hs_token: 82c7a893d020b5f28eaf7ba31e1d1091b12ebafc5ceb1b6beac2b93defc1b301 +as_token: a66ae41f82b05bebfc9c259135ce1ce35c856000d542ab5d1f01e0212439d534 +namespaces: + users: + - exclusive: true + regex: '@irc_.*:yourhomeserverdomain' + aliases: + - exclusive: true + regex: '#irc_.*:yourhomeserverdomain' +url: 'http://localhost:8090' +sender_localpart: appservice-irc +rate_limited: false +protocols: + - irc""" + registration = typing.cast(SecretStr, content) + any_charm_data = MatrixAuthRequirerData(registration=registration) + relation = self.model.get_relation(self.plugin_auth.relation_name) + if relation: + logger.info("Matrix auth request setting relation data") + self.plugin_auth.update_relation_data( + relation=relation, matrix_auth_requirer_data=any_charm_data + ) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 8b8bcaab..a7e69df9 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Fixtures for Synapse charm integration tests.""" @@ -19,7 +19,7 @@ from pytest import Config from pytest_operator.plugin import OpsTest -from tests.conftest import SYNAPSE_IMAGE_PARAM, SYNAPSE_NGINX_IMAGE_PARAM +from tests.conftest import SYNAPSE_IMAGE_PARAM from tests.integration.helpers import get_access_token, register_user # caused by pytest fixtures, mark does not work in fixtures @@ -27,6 +27,7 @@ # mypy has trouble to inferred types for variables that are initialized in subclasses. ACTIVE_STATUS_NAME = typing.cast(str, ActiveStatus.name) # type: ignore +WAITING_STATUS_NAME = "waiting" @pytest_asyncio.fixture(scope="module", name="server_name") @@ -75,16 +76,6 @@ def synapse_image_fixture(pytestconfig: Config): return synapse_image -@pytest_asyncio.fixture(scope="module", name="synapse_nginx_image") -def synapse_nginx_image_fixture(pytestconfig: Config): - """Get value from parameter synapse-nginx-image.""" - synapse_nginx_image = pytestconfig.getoption(SYNAPSE_NGINX_IMAGE_PARAM) - use_existing = pytestconfig.getoption("--use-existing", default=False) - if not use_existing: - assert synapse_nginx_image, f"{SYNAPSE_NGINX_IMAGE_PARAM} must be set" - return synapse_nginx_image - - @pytest_asyncio.fixture(scope="module", name="synapse_app_name") def synapse_app_name_fixture() -> str: """Get Synapse application name.""" @@ -97,13 +88,13 @@ def synapse_app_charmhub_name_fixture() -> str: return "synapse-charmhub" +# pylint: disable=too-many-positional-arguments @pytest_asyncio.fixture(scope="module", name="synapse_app") async def synapse_app_fixture( ops_test: OpsTest, synapse_app_name: str, synapse_app_charmhub_name: str, synapse_image: str, - synapse_nginx_image: str, model: Model, server_name: str, synapse_charm: str, @@ -117,7 +108,6 @@ async def synapse_app_fixture( return model.applications[synapse_app_name] resources = { "synapse-image": synapse_image, - "synapse-nginx-image": synapse_nginx_image, } app = await model.deploy( f"./{synapse_charm}", @@ -217,30 +207,16 @@ async def nginx_integrator_app_fixture( "nginx-ingress-integrator", application_name=nginx_integrator_app_name, trust=True, - channel="latest/edge", + channel="latest/stable", + revision=121, + ) + # The nginx-ingress-integrator charm goes into "waiting" when waiting for relation + await model.wait_for_idle( + apps=[nginx_integrator_app_name], raise_on_blocked=True, status=WAITING_STATUS_NAME ) - await model.wait_for_idle(raise_on_blocked=True, status=ACTIVE_STATUS_NAME) return app -@pytest_asyncio.fixture(scope="function", name="another_synapse_app") -async def another_synapse_app_fixture( - model: Model, synapse_app: Application, server_name: str, another_server_name: str -): - """Change server_name.""" - # First we guarantee that the first server_name is set - # Then change it. - await synapse_app.set_config({"server_name": server_name}) - - await model.wait_for_idle() - - await synapse_app.set_config({"server_name": another_server_name}) - - await model.wait_for_idle() - - yield synapse_app - - @pytest.fixture(scope="module", name="postgresql_app_name") def postgresql_app_name_app_name_fixture() -> str: """Return the name of the postgresql application deployed for tests.""" @@ -260,88 +236,6 @@ async def postgresql_app_fixture( yield model.applications.get(postgresql_app_name) -@pytest.fixture(scope="module", name="irc_postgresql_app_name") -def irc_postgresql_app_name_app_name_fixture() -> str: - """Return the name of the postgresql application deployed for irc bridge tests.""" - return "irc-postgresql-k8s" - - -@pytest_asyncio.fixture(scope="module", name="irc_postgresql_app") -async def irc_postgresql_app_fixture( - ops_test: OpsTest, - model: Model, - postgresql_app_name: str, - irc_postgresql_app_name: str, - pytestconfig: Config, -): - """Deploy postgresql.""" - use_existing = pytestconfig.getoption("--use-existing", default=False) - if use_existing: - return model.applications[irc_postgresql_app_name] - async with ops_test.fast_forward(): - app = await model.deploy( - postgresql_app_name, - application_name=irc_postgresql_app_name, - channel="14/stable", - trust=True, - ) - await model.wait_for_idle(status=ACTIVE_STATUS_NAME) - return app - - -@pytest.fixture(scope="module", name="grafana_app_name") -def grafana_app_name_fixture() -> str: - """Return the name of the grafana application deployed for tests.""" - return "grafana-k8s" - - -@pytest_asyncio.fixture(scope="module", name="grafana_app") -async def grafana_app_fixture( - ops_test: OpsTest, - model: Model, - grafana_app_name: str, -): - """Deploy grafana.""" - async with ops_test.fast_forward(): - app = await model.deploy( - grafana_app_name, - application_name=grafana_app_name, - channel="latest/edge", - trust=True, - ) - await model.wait_for_idle(raise_on_blocked=True, status=ACTIVE_STATUS_NAME) - - return app - - -@pytest.fixture(scope="module", name="prometheus_app_name") -def prometheus_app_name_fixture() -> str: - """Return the name of the prometheus application deployed for tests.""" - return "prometheus-k8s" - - -@pytest_asyncio.fixture(scope="module", name="prometheus_app") -async def deploy_prometheus_fixture( - ops_test: OpsTest, - model: Model, - prometheus_app_name: str, -): - """Deploy prometheus.""" - async with ops_test.fast_forward(): - app = await model.deploy( - prometheus_app_name, - application_name=prometheus_app_name, - channel="latest/edge", - trust=True, - ) - # Sometimes it comes back after an error. - await model.wait_for_idle( - raise_on_error=False, raise_on_blocked=True, status=ACTIVE_STATUS_NAME - ) - - return app - - @pytest.fixture(scope="module", name="user_username") def user_username_fixture() -> typing.Generator[str, None, None]: """Return the a username to be created for tests.""" diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 1ade47a6..38195bb6 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Helper functions for integration tests.""" diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 54aa8261..ec5594dc 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Core integration tests for Synapse charm.""" @@ -67,6 +67,28 @@ async def test_synapse_validate_configuration(synapse_app: Application): ) +async def test_synapse_configure_roomids(synapse_app: Application): + """ + arrange: build and deploy the Synapse charm. + act: configure invite_checker_policy_rooms with valid room ids. + assert: the Synapse application should be active after setting and + reverting the config. + """ + await synapse_app.set_config( + {"invite_checker_policy_rooms": "a1b2c3d4e5f6g7h8i9j:foo.bar,w1x2y3z4A5B6C7D8E9F:xyz.org"} + ) + + await synapse_app.model.wait_for_idle( + idle_period=30, timeout=120, apps=[synapse_app.name], status="active" + ) + + await synapse_app.reset_config(["invite_checker_policy_rooms"]) + + await synapse_app.model.wait_for_idle( + idle_period=30, timeout=120, apps=[synapse_app.name], status="active" + ) + + async def test_enable_stats_exporter( synapse_app: Application, synapse_app_name: str, @@ -110,31 +132,6 @@ async def test_synapse_scale_blocked(synapse_app: Application): ) -async def test_reset_instance_action( - model: Model, another_synapse_app: Application, another_server_name: str -): - """ - arrange: a deployed Synapse charm in a blocked state due to a server_name change. - act: call the reset_instance action. - assert: the old instance is deleted and the new one configured. - """ - unit = model.applications[another_synapse_app.name].units[0] - # Status string defined in Juju - # https://github.com/juju/juju/blob/2.9/core/status/status.go#L150 - assert unit.workload_status == "blocked" - assert "server_name modification is not allowed" in unit.workload_status_message - action_reset_instance: Action = await another_synapse_app.units[0].run_action( # type: ignore - "reset-instance" - ) - await action_reset_instance.wait() - assert action_reset_instance.status == "completed" - assert action_reset_instance.results["reset-instance"] - assert unit.workload_status == "active" - config = await model.applications[another_synapse_app.name].get_config() - current_server_name = config.get("server_name", {}).get("value") - assert current_server_name == another_server_name - - @pytest.mark.asyncio async def test_workload_version( ops_test: OpsTest, @@ -349,7 +346,9 @@ async def test_nginx_route_integration( act: relate the nginx-integrator charm with the Synapse charm. assert: requesting the charm through nginx-integrator should return a correct response. """ - await model.add_relation(f"{synapse_app_name}", f"{nginx_integrator_app_name}") + await model.add_relation( + f"{synapse_app_name}:nginx-route", f"{nginx_integrator_app_name}:nginx-route" + ) await nginx_integrator_app.set_config({"service-hostname": synapse_app_name}) await model.wait_for_idle(idle_period=30, status=ACTIVE_STATUS_NAME) @@ -390,50 +389,7 @@ async def test_synapse_enable_mjolnir( assert res.status_code == 200 -@pytest.mark.irc -@pytest.mark.usefixtures("synapse_app", "irc_postgresql_app") -async def test_synapse_irc_bridge_is_up( - ops_test: OpsTest, - model: Model, - pytestconfig: pytest.Config, - synapse_app: Application, - irc_postgresql_app: Application, - get_unit_ips: typing.Callable[[str], typing.Awaitable[tuple[str, ...]]], -): - """ - arrange: Build and deploy the Synapse charm. - act: Enable the IRC bridge. - assert: Synapse and IRC bridge health points should return correct responses. - """ - use_existing = pytestconfig.getoption("--use-existing", default=False) - if not use_existing: - await model.add_relation( - irc_postgresql_app.name, f"{synapse_app.name}:irc-bridge-database" - ) - await model.wait_for_idle(apps=[irc_postgresql_app.name], status=ACTIVE_STATUS_NAME) - await synapse_app.set_config({"enable_irc_bridge": "true"}) - await synapse_app.model.wait_for_idle( - idle_period=30, timeout=120, apps=[synapse_app.name], status="active" - ) - synapse_ip = (await get_unit_ips(synapse_app.name))[0] - async with ops_test.fast_forward(): - # using fast_forward otherwise would wait for model config update-status-hook-interval - await synapse_app.model.wait_for_idle( - idle_period=30, apps=[synapse_app.name], status="active" - ) - - response = requests.get( - f"http://{synapse_ip}:{synapse.SYNAPSE_NGINX_PORT}/_matrix/static/", timeout=5 - ) - assert response.status_code == 200 - assert "Welcome to the Matrix" in response.text - - irc_bridge_response = requests.get( - f"http://{synapse_ip}:{synapse.IRC_BRIDGE_HEALTH_PORT}/health", timeout=5 - ) - assert irc_bridge_response.status_code == 200 - - +# pylint: disable=too-many-positional-arguments @pytest.mark.mjolnir async def test_synapse_with_mjolnir_from_refresh_is_up( ops_test: OpsTest, @@ -442,7 +398,6 @@ async def test_synapse_with_mjolnir_from_refresh_is_up( get_unit_ips: typing.Callable[[str], typing.Awaitable[tuple[str, ...]]], synapse_charm: str, synapse_image: str, - synapse_nginx_image: str, ): """ arrange: build and deploy the Synapse charm from charmhub and enable Mjolnir. @@ -463,7 +418,6 @@ async def test_synapse_with_mjolnir_from_refresh_is_up( resources = { "synapse-image": synapse_image, - "synapse-nginx-image": synapse_nginx_image, } await synapse_charmhub_app.refresh(path=f"./{synapse_charm}", resources=resources) async with ops_test.fast_forward(): @@ -483,58 +437,3 @@ async def test_synapse_with_mjolnir_from_refresh_is_up( f"http://{synapse_ip}:{synapse.MJOLNIR_HEALTH_PORT}/healthz", timeout=5 ) assert mjolnir_response.status_code == 200 - - -async def test_admin_token_refresh(model: Model, synapse_app: Application): - """ - arrange: Build and deploy the Synapse charm from charmhub. - Create a user. - Promote it to admin (forces to get the admin token). - Reset the instance (wipes database and so admin token is invalid). - Create another user. - act: Promote the second user to admin. - assert: It should not fail as the admin token is refreshed. - """ - action_register_initial_user: Action = await synapse_app.units[0].run_action( - "register-user", username="initial_user", admin=False - ) - await action_register_initial_user.wait() - assert action_register_initial_user.status == "completed" - assert action_register_initial_user.results.get("register-user") - password = action_register_initial_user.results.get("user-password") - assert password - action_promote_initial_user: Action = await synapse_app.units[0].run_action( # type: ignore - "promote-user-admin", username="initial_user" - ) - await action_promote_initial_user.wait() - assert action_promote_initial_user.status == "completed" - - new_server_name = f"test-admin-token-refresh{token_hex(6)}" - await synapse_app.set_config({"server_name": new_server_name}) - await model.wait_for_idle() - - unit = model.applications[synapse_app.name].units[0] - assert unit.workload_status == "blocked" - assert "server_name modification is not allowed" in unit.workload_status_message - action_reset_instance: Action = await synapse_app.units[0].run_action( # type: ignore - "reset-instance" - ) - await action_reset_instance.wait() - assert action_reset_instance.status == "completed" - assert action_reset_instance.results["reset-instance"] - assert unit.workload_status == "active" - - action_register_after_reset: Action = await synapse_app.units[0].run_action( - "register-user", username="user2", admin=False - ) - await action_register_after_reset.wait() - assert action_register_after_reset.status == "completed" - assert action_register_after_reset.results.get("register-user") - password = action_register_after_reset.results.get("user-password") - assert password - - action_promote_after_reset: Action = await synapse_app.units[0].run_action( # type: ignore - "promote-user-admin", username="user2" - ) - await action_promote_after_reset.wait() - assert action_promote_after_reset.status == "completed" diff --git a/tests/integration/test_matrix_auth.py b/tests/integration/test_matrix_auth.py new file mode 100644 index 00000000..f2cf2968 --- /dev/null +++ b/tests/integration/test_matrix_auth.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Matrix-auth integration tests for Synapse charm.""" +import json +import logging +import pathlib +import typing + +from juju.application import Application +from juju.controller import Controller # type: ignore +from juju.model import Model +from juju.unit import Unit +from ops.model import ActiveStatus +from pytest_operator.plugin import OpsTest + +# caused by pytest fixtures, mark does not work in fixtures +# pylint: disable=too-many-arguments, unused-argument + +# mypy has trouble to inferred types for variables that are initialized in subclasses. +ACTIVE_STATUS_NAME = typing.cast(str, ActiveStatus.name) # type: ignore + +logger = logging.getLogger(__name__) + + +async def test_synapse_cmr_matrix_auth( + ops_test: OpsTest, + model: Model, + synapse_app: Application, +): + """ + arrange: deploy the Synapse charm, create offer, deploy any-charm as consumer + in a different model and consume offer. + act: integrate them via matrix-auth offer. + assert: Synapse set the registration file received via matrix-auth. + """ + await model.wait_for_idle(idle_period=10, status=ACTIVE_STATUS_NAME) + # This workaround was extracted from prometheus-k8s charm. + # Without it, the offer creation fails. + # https://github.com/canonical/prometheus-k8s-operator/blob/5779ecc749ee1582c6be20030a83472d024cd24f/tests/integration/test_remote_write_with_zinc.py#L103 + controller = Controller() + await controller.connect() + await controller.create_offer( + model.uuid, + f"{synapse_app.name}:matrix-auth", + ) + offers = await controller.list_offers(model.name) + await model.block_until( + lambda: all(offer.application_name == synapse_app.name for offer in offers.results) + ) + await model.wait_for_idle(idle_period=10, status=ACTIVE_STATUS_NAME) + await ops_test.track_model( + "consumer", + ) + with ops_test.model_context("consumer") as consumer_model: + any_charm_content = pathlib.Path("tests/integration/any_charm.py").read_text( + encoding="utf-8" + ) + matrix_auth_content = pathlib.Path("lib/charms/synapse/v1/matrix_auth.py").read_text( + encoding="utf-8" + ) + any_charm_src_overwrite = { + "any_charm.py": any_charm_content, + "matrix_auth.py": matrix_auth_content, + } + any_charm_app = await consumer_model.deploy( + "any-charm", + application_name="any-charm1", + channel="beta", + config={ + "python-packages": "pydantic\ncryptography", + "src-overwrite": json.dumps(any_charm_src_overwrite), + }, + ) + await consumer_model.wait_for_idle(apps=[any_charm_app.name]) + await consumer_model.consume(f"admin/{model.name}.{synapse_app.name}", "synapse") + + await consumer_model.relate(any_charm_app.name, "synapse") + await consumer_model.wait_for_idle(idle_period=30, status=ACTIVE_STATUS_NAME) + + unit: Unit = synapse_app.units[0] + ret_code, _, stderr = await ops_test.juju( + "exec", + "--unit", + unit.name, + "grep appservice-irc /data/appservice-registration-matrix-auth-*.yaml", + ) + assert not ret_code, f"Failed to check for application service file, {stderr}" diff --git a/tests/integration/test_nginx.py b/tests/integration/test_nginx.py index 912ac7e5..4070a60c 100644 --- a/tests/integration/test_nginx.py +++ b/tests/integration/test_nginx.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Integration tests for Synapse charm needing the nginx_integrator_app fixture.""" diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 87522f69..93d49428 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -1,9 +1,8 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Integration tests for Synapse charm needing the s3_backup_bucket fixture.""" -import io import logging import typing from secrets import token_hex @@ -101,7 +100,11 @@ async def test_synapse_create_backup_correct( object_key = f"{path}/{backup_action.results['backup-id']}" s3objresp = boto_s3_client.get_object(Bucket=bucket_name, Key=object_key) objbuf = s3objresp["Body"].read() - assert "GPG symmetrically encrypted data (AES256 cipher)" in magic.from_buffer(objbuf) + # GnuPG 2.2.x and earlier outputs "GPG symmetrically encrypted data (AES256 cipher)" + assert ( + "PGP symmetric key encrypted data - AES with 256-bit key salted & iterated - SHA512" + in magic.from_buffer(objbuf) + ) @pytest.mark.s3 @@ -247,7 +250,7 @@ async def test_synapse_backup_delete( @pytest.mark.s3 @pytest.mark.usefixtures("s3_media_bucket") -async def test_synapse_enable_media( +async def test_synapse_enable_media( # pylint: disable=too-many-positional-arguments model: Model, synapse_app: Application, get_unit_ips: typing.Callable[[str], typing.Awaitable[tuple[str, ...]]], @@ -271,15 +274,19 @@ async def test_synapse_enable_media( ) synapse_ip = (await get_unit_ips(synapse_app.name))[0] - headers = {"Authorization": f"Bearer {access_token}"} + headers = { + "Authorization": f"Bearer {access_token}", + "Content-Type": "application/octet-stream", + } media_file = "test_media_file.txt" # boto_s3_media_client.create_bucket(Bucket=s3_media_configuration["bucket"]) # Upload media file response = requests.post( - f"http://{synapse_ip}:8080/_matrix/media/v3/upload?filename={media_file}", + f"http://{synapse_ip}:8080/_matrix/media/v3/upload", headers=headers, - files={"file": (media_file, io.BytesIO(b""))}, + params={"filename": media_file}, + data=b"", timeout=5, ) assert response.status_code == 200 diff --git a/tests/integration/test_scaling.py b/tests/integration/test_scaling.py index 6319a85a..2b082aa0 100644 --- a/tests/integration/test_scaling.py +++ b/tests/integration/test_scaling.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Integration tests for Synapse charm integrated with Redis.""" @@ -13,6 +13,8 @@ from ops.model import ActiveStatus from pytest_operator.plugin import OpsTest +import synapse + # mypy has trouble to inferred types for variables that are initialized in subclasses. ACTIVE_STATUS_NAME = typing.cast(str, ActiveStatus.name) # type: ignore @@ -41,15 +43,13 @@ async def test_synapse_scaling_nginx_configured( ) assert ops_test.model status = await ops_test.model.get_status() - unit = list(status.applications[synapse_app.name].units)[1] + application = typing.cast(Application, status.applications[synapse_app.name]) + unit = list(application.units)[1] address = status["applications"][synapse_app.name]["units"][unit]["address"] - logger.info("Units: %s", list(status.applications[synapse_app.name].units)) - logger.info("Requesting %s", f"http://{address}:8008/") response_worker = requests.get( f"http://{address}:8008/", headers={"Host": synapse_app.name}, timeout=5 ) - logger.info("Requesting %s", f"http://{address}:8080/") response_nginx = requests.get( f"http://{address}:8080/", headers={"Host": synapse_app.name}, timeout=5 ) @@ -81,7 +81,8 @@ async def test_synapse_scaling_down( ) assert ops_test.model status = await ops_test.model.get_status() - for unit in list(status.applications[synapse_app.name].units): + application = typing.cast(Application, status.applications[synapse_app.name]) + for unit in list(application.units): address = status["applications"][synapse_app.name]["units"][unit]["address"] response_worker = requests.get( f"http://{address}:8080/", headers={"Host": synapse_app.name}, timeout=5 @@ -97,9 +98,39 @@ async def test_synapse_scaling_down( ) assert ops_test.model status = await ops_test.model.get_status() - for unit in list(status.applications[synapse_app.name].units): + application = typing.cast(Application, status.applications[synapse_app.name]) + for unit in list(application.units): address = status["applications"][synapse_app.name]["units"][unit]["address"] response_worker = requests.get( f"http://{address}:8080/", headers={"Host": synapse_app.name}, timeout=5 ) assert response_worker.status_code == 200 + + +@pytest.mark.redis +async def test_synapse_prometheus_configured( + model: Model, + synapse_app: Application, + redis_app: Application, + get_unit_ips: typing.Callable[[str], typing.Awaitable[tuple[str, ...]]], +): + """ + arrange: integrate Synapse with Redis and scale 1 unit. + act: get all unit IPs and do a http request via port 9000. + assert: collect metrics should work for all units. + """ + await model.wait_for_idle( + idle_period=30, + apps=[synapse_app.name, redis_app.name], + status=ACTIVE_STATUS_NAME, + ) + await synapse_app.scale(2) + await model.wait_for_idle( + idle_period=30, + apps=[synapse_app.name, redis_app.name], + status=ACTIVE_STATUS_NAME, + ) + for unit_ip in await get_unit_ips(synapse_app.name): + response = requests.get(f"http://{unit_ip}:{synapse.SYNAPSE_EXPORTER_PORT}/", timeout=5) + assert response.status_code == 200 + assert "python_gc_objects_collected_total" in response.text diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index e3979c0f..dddb292a 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 9ff1cf28..80a5bf9a 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """pytest fixtures for the unit test.""" @@ -121,15 +121,14 @@ def harness_fixture(request, monkeypatch) -> typing.Generator[Harness, None, Non monkeypatch.setattr(synapse, "create_admin_user", lambda *_args, **_kwargs: "") monkeypatch.setattr(time, "sleep", lambda *_args, **_kwargs: "") harness = Harness(SynapseCharm) + # Necessary for traefik-k8s.v2.ingress library as it calls binding.network.bind_address + harness.add_network("10.0.0.10") harness.update_config({"server_name": TEST_SERVER_NAME}) harness.set_model_name("testmodel") # needed for testing Traefik synapse_container: ops.Container = harness.model.unit.get_container( synapse.SYNAPSE_CONTAINER_NAME ) harness.set_can_connect(synapse.SYNAPSE_CONTAINER_NAME, True) - harness.set_can_connect( - harness.model.unit.containers[synapse.SYNAPSE_NGINX_CONTAINER_NAME], True - ) synapse_container.make_dir("/data", make_parents=True) synapse_container.push(f"/data/{TEST_SERVER_NAME}.signing.key", "123") # unused-variable disabled to pass constants values to inner function @@ -178,19 +177,21 @@ def start_cmd_handler(argv: list[str]) -> synapse.ExecResult: executable="/usr/bin/python3", handler=lambda _: synapse.ExecResult(0, "", ""), ) - synapse_nginx_container: ops.Container = harness.model.unit.get_container( - synapse.SYNAPSE_NGINX_CONTAINER_NAME - ) harness.register_command_handler( # type: ignore # pylint: disable=no-member - container=synapse_nginx_container, + container=synapse_container, executable="cp", handler=lambda _: synapse.ExecResult(0, "", ""), ) harness.register_command_handler( # type: ignore # pylint: disable=no-member - container=synapse_nginx_container, + container=synapse_container, executable="sed", handler=lambda _: synapse.ExecResult(0, "", ""), ) + harness.register_command_handler( # type: ignore # pylint: disable=no-member + container=synapse_container, + executable="rm", + handler=lambda _: synapse.ExecResult(0, "", ""), + ) yield harness harness.cleanup() diff --git a/tests/unit/test_admin_access_token.py b/tests/unit/test_admin_access_token.py index ba3a3206..f498fdb6 100644 --- a/tests/unit/test_admin_access_token.py +++ b/tests/unit/test_admin_access_token.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Admin Access Token unit tests.""" diff --git a/tests/unit/test_admin_create_user.py b/tests/unit/test_admin_create_user.py index a43f1310..0f6810bd 100644 --- a/tests/unit/test_admin_create_user.py +++ b/tests/unit/test_admin_create_user.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Tests for the create_user function in the synapse.admin module.""" diff --git a/tests/unit/test_anonymize_user_action.py b/tests/unit/test_anonymize_user_action.py index 7d55a1f3..6d9b9c67 100644 --- a/tests/unit/test_anonymize_user_action.py +++ b/tests/unit/test_anonymize_user_action.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Register user action unit tests.""" diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 95310e8a..8c0bc377 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse backup unit tests.""" diff --git a/tests/unit/test_backup_observer.py b/tests/unit/test_backup_observer.py index b1f4637d..304bf05d 100644 --- a/tests/unit/test_backup_observer.py +++ b/tests/unit/test_backup_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse backup observer unit tests.""" @@ -74,7 +74,7 @@ ), ], ) -def test_on_s3_credentials_changed( +def test_on_s3_credentials_changed( # pylint: disable=too-many-positional-arguments harness: Harness, monkeypatch: pytest.MonkeyPatch, relation_data: dict, diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 09b53a72..d5cf0d31 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse charm unit tests.""" @@ -7,6 +7,7 @@ import io import json +import typing from unittest.mock import MagicMock import ops @@ -30,9 +31,11 @@ def test_synapse_pebble_layer(harness: Harness) -> None: harness.set_leader(True) harness.begin_with_initial_hooks() - synapse_layer = harness.get_container_pebble_plan(synapse.SYNAPSE_CONTAINER_NAME).to_dict()[ - "services" - ][synapse.SYNAPSE_SERVICE_NAME] + pebble_plan = harness.get_container_pebble_plan(synapse.SYNAPSE_CONTAINER_NAME).to_dict() + synapse_layer = pebble_plan["services"][synapse.SYNAPSE_SERVICE_NAME] + assert pebble_plan["checks"]["synapse-ready"]["period"] == "2m" + assert pebble_plan["checks"]["synapse-ready"]["threshold"] == 5 + assert pebble_plan["checks"]["synapse-ready"]["timeout"] == "20s" assert isinstance(harness.model.unit.status, ops.ActiveStatus) assert synapse_layer == { "override": "replace", @@ -49,6 +52,33 @@ def test_synapse_pebble_layer(harness: Harness) -> None: }, "startup": "enabled", } + container = harness.model.unit.containers[synapse.SYNAPSE_CONTAINER_NAME] + root = harness.get_filesystem_root(container) + synapse_configuration = (root / "data" / "homeserver.yaml").read_text() + assert f"public_baseurl: https://{TEST_SERVER_NAME}" in synapse_configuration + + +@pytest.mark.skip(reason="harness does not reproduce checks changes") +def test_synapse_pebble_layer_change(harness: Harness) -> None: + """ + arrange: charm deployed. + act: change experimental_alive_check config. + assert: Synapse charm should submit the correct Synapse pebble layer to pebble. + """ + harness.set_leader(True) + harness.container_pebble_ready("synapse") + harness.begin_with_initial_hooks() + pebble_plan = harness.get_container_pebble_plan(synapse.SYNAPSE_CONTAINER_NAME).to_dict() + assert pebble_plan["checks"]["synapse-ready"]["period"] == "2m" + assert pebble_plan["checks"]["synapse-ready"]["threshold"] == 5 + assert pebble_plan["checks"]["synapse-ready"]["timeout"] == "20s" + + harness.update_config({"experimental_alive_check": "1m,3,30s"}) + + pebble_plan = harness.get_container_pebble_plan(synapse.SYNAPSE_CONTAINER_NAME).to_dict() + assert pebble_plan["checks"]["synapse-ready"]["period"] == "1m" + assert pebble_plan["checks"]["synapse-ready"]["threshold"] == 3 + assert pebble_plan["checks"]["synapse-ready"]["timeout"] == "30s" @pytest.mark.parametrize( @@ -98,9 +128,7 @@ def test_restart_nginx_container_down(harness: Harness) -> None: assert: Synapse charm should submit the correct status. """ harness.begin() - harness.set_can_connect( - harness.model.unit.containers[synapse.SYNAPSE_NGINX_CONTAINER_NAME], False - ) + harness.set_can_connect(harness.model.unit.containers[synapse.SYNAPSE_CONTAINER_NAME], False) harness.update_config({"report_stats": True}) assert isinstance(harness.model.unit.status, ops.MaintenanceStatus) assert "Waiting for" in str(harness.model.unit.status) @@ -144,11 +172,9 @@ def test_traefik_integration(harness: Harness) -> None: app_data = harness.get_relation_data(relation_id, app_name) assert app_data == { - "host": f"{app_name}-endpoints.{model_name}.svc.cluster.local", - "model": model_name, - "name": app_name, + "model": f'"{model_name}"', + "name": f'"{app_name}"', "port": str(synapse.SYNAPSE_NGINX_PORT), - "strip-prefix": "true", } @@ -253,8 +279,10 @@ def test_enable_federation_domain_whitelist_is_called( config = io.StringIO(config_content) harness.update_config({"federation_domain_whitelist": "foo"}) harness.begin() + monkeypatch.setattr(synapse, "set_public_baseurl", MagicMock()) monkeypatch.setattr(synapse, "execute_migrate_config", MagicMock()) monkeypatch.setattr(synapse, "enable_metrics", MagicMock()) + monkeypatch.setattr(synapse, "enable_rc_joins_remote_rate", MagicMock()) monkeypatch.setattr(synapse, "enable_replication", MagicMock()) monkeypatch.setattr(synapse, "enable_forgotten_room_retention", MagicMock()) monkeypatch.setattr(synapse, "enable_serve_server_wellknown", MagicMock()) @@ -286,8 +314,10 @@ def test_disable_password_config_is_called( """ harness.update_config({"enable_password_config": False}) harness.begin() + monkeypatch.setattr(synapse, "set_public_baseurl", MagicMock()) monkeypatch.setattr(synapse, "execute_migrate_config", MagicMock()) monkeypatch.setattr(synapse, "enable_metrics", MagicMock()) + monkeypatch.setattr(synapse, "enable_rc_joins_remote_rate", MagicMock()) monkeypatch.setattr(synapse, "enable_replication", MagicMock()) monkeypatch.setattr(synapse, "enable_forgotten_room_retention", MagicMock()) monkeypatch.setattr(synapse, "enable_serve_server_wellknown", MagicMock()) @@ -318,7 +348,6 @@ def test_nginx_replan(harness: Harness, monkeypatch: pytest.MonkeyPatch) -> None monkeypatch.setattr(pebble, "restart_nginx", restart_nginx_mock) harness.container_pebble_ready(synapse.SYNAPSE_CONTAINER_NAME) - harness.container_pebble_ready(synapse.SYNAPSE_NGINX_CONTAINER_NAME) restart_nginx_mock.assert_called_once() @@ -333,10 +362,10 @@ def test_nginx_replan_failure(harness: Harness, monkeypatch: pytest.MonkeyPatch) restart_nginx_mock = MagicMock() monkeypatch.setattr(pebble, "restart_nginx", restart_nginx_mock) - container = harness.model.unit.containers[synapse.SYNAPSE_NGINX_CONTAINER_NAME] + container = harness.model.unit.containers[synapse.SYNAPSE_CONTAINER_NAME] harness.set_can_connect(container, False) # harness.container_pebble_ready cannot be used as it sets the set_can_connect to True - harness.charm.on[synapse.SYNAPSE_NGINX_CONTAINER_NAME].pebble_ready.emit(container) + harness.charm.on[synapse.SYNAPSE_CONTAINER_NAME].pebble_ready.emit(container) restart_nginx_mock.assert_not_called() assert isinstance(harness.model.unit.status, ops.MaintenanceStatus) @@ -351,52 +380,9 @@ def test_nginx_replan_sets_status_to_active(harness: Harness) -> None: harness.begin() harness.container_pebble_ready(synapse.SYNAPSE_CONTAINER_NAME) - harness.container_pebble_ready(synapse.SYNAPSE_NGINX_CONTAINER_NAME) - assert harness.model.unit.status == ops.ActiveStatus() -def test_nginx_replan_with_synapse_container_down( - harness: Harness, monkeypatch: pytest.MonkeyPatch -) -> None: - """ - arrange: start Synapse charm with Synapse container as down, and mock restart_nginx. - act: Fire that NGINX container is ready. - assert: Pebble Service replan NGINX is called but unit is in maintenance - waiting for Synapse pebble. - """ - harness.begin() - restart_nginx_mock = MagicMock() - monkeypatch.setattr(pebble, "restart_nginx", restart_nginx_mock) - - container = harness.model.unit.containers[synapse.SYNAPSE_CONTAINER_NAME] - harness.set_can_connect(container, False) - - harness.container_pebble_ready(synapse.SYNAPSE_NGINX_CONTAINER_NAME) - - restart_nginx_mock.assert_called_once() - assert harness.model.unit.status == ops.MaintenanceStatus("Waiting for Synapse pebble") - - -def test_nginx_replan_with_synapse_service_not_existing( - harness: Harness, monkeypatch: pytest.MonkeyPatch -) -> None: - """ - arrange: start Synapse charm with Synapse container but without synapse service, - and mock restart_nginx. - act: Fire that NGINX container is ready. - assert: Pebble Service replan NGINX is called but unit is in maintenance waiting for Synapse. - """ - harness.begin() - restart_nginx_mock = MagicMock() - monkeypatch.setattr(pebble, "restart_nginx", restart_nginx_mock) - - harness.container_pebble_ready(synapse.SYNAPSE_NGINX_CONTAINER_NAME) - - restart_nginx_mock.assert_called_once() - assert harness.model.unit.status == ops.MaintenanceStatus("Waiting for Synapse") - - def test_redis_relation_success(redis_configured: Harness, monkeypatch: pytest.MonkeyPatch): """ arrange: start the Synapse charm, set server_name, mock synapse.enable_redis. @@ -494,3 +480,22 @@ def test_redis_enabled_reconcile_pebble_error( assert isinstance(harness.model.unit.status, ops.BlockedStatus) assert error_message in str(harness.model.unit.status) + + +def test_saml_on_relation_broken( + saml_configured: Harness, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + arrange: start the Synapse charm with saml integration, set server_name, mock pebble. + act: remove the saml integration. + assert: Synapse charm should correctly reconcile. + """ + harness = saml_configured + harness.begin() + reconcile_mock = MagicMock() + monkeypatch.setattr(pebble, "reconcile", reconcile_mock) + + relation = typing.cast(ops.model.Relation, harness.model.get_relation("saml")) + harness.remove_relation(relation.id) + + reconcile_mock.assert_called_once() diff --git a/tests/unit/test_charm_scaling.py b/tests/unit/test_charm_scaling.py index 0acf065e..2ffbff54 100644 --- a/tests/unit/test_charm_scaling.py +++ b/tests/unit/test_charm_scaling.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse charm scaling unit tests.""" @@ -144,8 +144,6 @@ def test_scaling_instance_map_configured(harness: Harness) -> None: harness.add_relation("redis", "redis", unit_data={"hostname": "redis-host", "port": "1010"}) harness.set_leader(True) - harness.charm.on.config_changed.emit() - root = harness.get_filesystem_root(synapse.SYNAPSE_CONTAINER_NAME) config_path = root / synapse.SYNAPSE_CONFIG_PATH[1:] with open(config_path, encoding="utf-8") as config_file: @@ -153,6 +151,10 @@ def test_scaling_instance_map_configured(harness: Harness) -> None: assert "instance_map" in content assert content["instance_map"] == { "main": { + "host": "synapse-0.synapse-endpoints", + "port": 8035, + }, + "federationsender1": { "host": "synapse-0.synapse-endpoints", "port": 8034, }, @@ -161,6 +163,33 @@ def test_scaling_instance_map_configured(harness: Harness) -> None: "port": 8034, }, } + worker_config_path = root / synapse.SYNAPSE_WORKER_CONFIG_PATH[1:] + with open(worker_config_path, encoding="utf-8") as config_file: + content = yaml.safe_load(config_file) + assert content["worker_name"] == "federationsender1" + + +def test_scaling_instance_restarts_federation_service( + harness: Harness, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + arrange: charm deployed, integrated with Redis, one more unit in peer relation + and set as leader. + act: emit config-changed event. + assert: Synapse charm is configured with instance_map and the federation service is restarted. + """ + rel_id = harness.add_relation(synapse.SYNAPSE_PEER_RELATION_NAME, "synapse") + harness.add_relation_unit(rel_id, "synapse/1") + harness.begin_with_initial_hooks() + federation_container = harness.model.unit.containers[synapse.SYNAPSE_CONTAINER_NAME] + harness.set_can_connect(federation_container, True) + harness.add_relation("redis", "redis", unit_data={"hostname": "redis-host", "port": "1010"}) + harness.set_leader(True) + + restart_federation_mock = MagicMock() + monkeypatch.setattr(pebble, "restart_federation_sender", restart_federation_mock) + harness.update_config({"workers_ignore_list": "worker1"}) + assert restart_federation_mock.called def test_scaling_instance_map_not_configured(harness: Harness) -> None: @@ -221,16 +250,13 @@ def test_scaling_main_unit_changed_nginx_reconfigured( "synapse", app_data={"main_unit_id": "synapse/0"}, ) - harness.begin_with_initial_hooks() - nginx_container = harness.model.unit.containers[synapse.SYNAPSE_NGINX_CONTAINER_NAME] + restart_nginx_mock = MagicMock() + monkeypatch.setattr(pebble, "restart_nginx", restart_nginx_mock) + nginx_container = harness.model.unit.containers[synapse.SYNAPSE_CONTAINER_NAME] harness.set_can_connect(nginx_container, True) + harness.begin_with_initial_hooks() harness.add_relation("redis", "redis", unit_data={"hostname": "redis-host", "port": "1010"}) harness.set_leader(False) - # emit nginx ready - # assert was called with synapse/0 - restart_nginx_mock = MagicMock() - monkeypatch.setattr(pebble, "restart_nginx", restart_nginx_mock) - harness.charm.on.synapse_nginx_pebble_ready.emit(MagicMock()) restart_nginx_mock.assert_called_with(nginx_container, "synapse-0.synapse-endpoints") harness.update_relation_data( @@ -407,6 +433,10 @@ def test_scaling_signing_not_found(harness: Harness, monkeypatch: pytest.MonkeyP "worker1, worker2", { "main": { + "host": "synapse-0.synapse-endpoints", + "port": 8035, + }, + "federationsender1": { "host": "synapse-0.synapse-endpoints", "port": 8034, }, @@ -424,6 +454,10 @@ def test_scaling_signing_not_found(harness: Harness, monkeypatch: pytest.MonkeyP "worker1 ,worker2", { "main": { + "host": "synapse-0.synapse-endpoints", + "port": 8035, + }, + "federationsender1": { "host": "synapse-0.synapse-endpoints", "port": 8034, }, @@ -441,6 +475,10 @@ def test_scaling_signing_not_found(harness: Harness, monkeypatch: pytest.MonkeyP " worker1,worker3 ", { "main": { + "host": "synapse-0.synapse-endpoints", + "port": 8035, + }, + "federationsender1": { "host": "synapse-0.synapse-endpoints", "port": 8034, }, @@ -458,6 +496,10 @@ def test_scaling_signing_not_found(harness: Harness, monkeypatch: pytest.MonkeyP "worker4", { "main": { + "host": "synapse-0.synapse-endpoints", + "port": 8035, + }, + "federationsender1": { "host": "synapse-0.synapse-endpoints", "port": 8034, }, @@ -479,6 +521,10 @@ def test_scaling_signing_not_found(harness: Harness, monkeypatch: pytest.MonkeyP "workerfake", { "main": { + "host": "synapse-0.synapse-endpoints", + "port": 8035, + }, + "federationsender1": { "host": "synapse-0.synapse-endpoints", "port": 8034, }, @@ -527,6 +573,10 @@ def test_scaling_instance_map_configured_ignoring_workers( assert "instance_map" in content assert content["instance_map"] == { "main": { + "host": "synapse-0.synapse-endpoints", + "port": 8035, + }, + "federationsender1": { "host": "synapse-0.synapse-endpoints", "port": 8034, }, diff --git a/tests/unit/test_charm_state.py b/tests/unit/test_charm_state.py index fc822f09..0c2a23ad 100644 --- a/tests/unit/test_charm_state.py +++ b/tests/unit/test_charm_state.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse charm state unit tests.""" @@ -28,16 +28,18 @@ def build_charm_state(self) -> CharmState: Returns: A valid charm state """ - synapse_config = SynapseConfig(server_name="example.com") # type: ignore[call-arg] + synapse_config = SynapseConfig( + server_name="example.com", public_baseurl="https://example.com" + ) # type: ignore[call-arg] return CharmState( synapse_config=synapse_config, datasource=None, - irc_bridge_datasource=None, saml_config=None, smtp_config=None, media_config=None, redis_config=None, instance_map_config=None, + registration_secrets=None, ) def reconcile(self, charm_state) -> None: diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 8b3141a7..2dbea8bf 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Database unit tests.""" diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py index 3807cc97..d4a57ba4 100644 --- a/tests/unit/test_exceptions.py +++ b/tests/unit/test_exceptions.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Exceptions unit tests.""" diff --git a/tests/unit/test_irc_bridge.py b/tests/unit/test_irc_bridge.py deleted file mode 100644 index 7bf18c74..00000000 --- a/tests/unit/test_irc_bridge.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Tests for the irc_bridge module.""" - -from secrets import token_hex -from unittest.mock import MagicMock - -import pytest -from ops.model import Container - -import irc_bridge -import synapse -from charm_state import CharmState, SynapseConfig -from charm_types import DatasourcePostgreSQL -from irc_bridge import enable_irc_bridge -from synapse import ExecResult - - -@pytest.fixture(name="state") -def charm_state_fixture(): - """Construct a CharmState object.""" - - def charm_state_with_db(with_db_config: bool) -> CharmState: - """ - Create a CharmState object with a SynapseConfig and optional DatasourcePostgreSQL object. - - Args: - with_db_config: Whether to include a DatasourcePostgreSQL object in the CharmState. - - Returns: - A CharmState object. - """ - synapse_config = SynapseConfig( # type: ignore - server_name="foo", - ) - db_config = None - if with_db_config: - db_config = DatasourcePostgreSQL( - user="bar", - password=token_hex(16), - host="qux", - port="quux", - db="quuz", - ) - return CharmState( - synapse_config=synapse_config, - datasource=None, - irc_bridge_datasource=db_config, - saml_config=None, - smtp_config=None, - redis_config=None, - media_config=None, - instance_map_config=None, - ) - - return charm_state_with_db - - -@pytest.fixture(name="container_mock") -def container_fixture(): - """Construct a Container object.""" - return MagicMock(spec=Container) - - -def test_enable_irc_bridge_with_pebble_socket_available(state, container_mock, monkeypatch): - """Test enabling the IRC bridge when the Pebble socket is available. - Arrange: - - A container mock with a Pebble socket available. - - A charm state with a SynapseConfig and DatasourcePostgreSQL object. - Act: - - Enable the IRC bridge. - Assert: - - The irc files are pushed to the container. - """ - container_mock.can_connect.return_value = True - monkeypatch.setattr( - synapse.workload, - "_exec", - MagicMock(return_value=ExecResult(exit_code=0, stdout="stdout", stderr="stderr")), - ) - monkeypatch.setattr( - irc_bridge, - "_create_pem_file", - MagicMock(return_value=ExecResult(exit_code=0, stdout="stdout", stderr="stderr")), - ) - - charm = state(True) - enable_irc_bridge(charm, container_mock) - - container_mock.can_connect.assert_called_once() - container_mock.push.assert_called_once() - container_mock.push.assert_called_once() - - -def test_enable_irc_bridge_with_pebble_socket_unavailable(state, container_mock): - """Test enabling the IRC bridge when the Pebble socket is unavailable. - Arrange: - - A container mock with a Pebble socket unavailable. - - A charm state with a SynapseConfig and DatasourcePostgreSQL object. - Act: - - Enable the IRC bridge. - Assert: - - The irc files are not pushed to the container. - """ - container_mock.can_connect.return_value = False - - enable_irc_bridge(state, container_mock) - - container_mock.can_connect.assert_called_once() - container_mock.push.assert_not_called() - - -def test_enable_irc_bridge_with_no_db_connection_string(state, container_mock): - """Test enabling the IRC bridge when there is no db connection string. - Arrange: - - A container mock with a Pebble socket available. - - A charm state with a SynapseConfig and no DatasourcePostgreSQL object. - Act: - - Enable the IRC bridge. - Assert: - - The irc files are not pushed to the container. - """ - container_mock.can_connect.return_value = True - - charm = state(False) - enable_irc_bridge(charm, container_mock) - - container_mock.can_connect.assert_called_once() - container_mock.push.assert_not_called() diff --git a/tests/unit/test_matrix_auth_integration.py b/tests/unit/test_matrix_auth_integration.py new file mode 100644 index 00000000..4f13d076 --- /dev/null +++ b/tests/unit/test_matrix_auth_integration.py @@ -0,0 +1,171 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Synapse charm matrix-auth integration unit tests.""" + +# pylint: disable=protected-access + +from unittest.mock import ANY, MagicMock + +import pytest +import yaml +from charms.synapse.v1.matrix_auth import MatrixAuthRequirerData, encrypt_string +from ops.testing import Harness +from pydantic import SecretStr + +import synapse + +from .conftest import TEST_SERVER_NAME + + +def test_matrix_auth_update_success(harness: Harness, monkeypatch: pytest.MonkeyPatch): + """ + arrange: start the Synapse charm. + act: integrate via matrix-auth. + assert: update_relation_data is called and homeserver has same value as + server_name. + """ + harness.update_config({"server_name": TEST_SERVER_NAME}) + harness.set_can_connect(synapse.SYNAPSE_CONTAINER_NAME, True) + harness.set_leader(True) + harness.begin() + update_relation_data = MagicMock() + monkeypatch.setattr( + harness.charm._matrix_auth.matrix_auth, "update_relation_data", update_relation_data + ) + monkeypatch.setattr( + synapse, "get_registration_shared_secret", MagicMock(return_value="shared_secret") + ) + + rel_id = harness.add_relation("matrix-auth", "maubot") + harness.add_relation_unit(rel_id, "maubot/0") + harness.update_relation_data(rel_id, "maubot", {"foo": "foo"}) + + relation = harness.charm.framework.model.get_relation("matrix-auth", rel_id) + update_relation_data.assert_called_with(relation, ANY) + + assert update_relation_data.call_args[0][1].homeserver == f"https://{TEST_SERVER_NAME}" + + +def test_matrix_auth_update_public_baseurl_success( + harness: Harness, monkeypatch: pytest.MonkeyPatch +): + """ + arrange: start the Synapse charm with public_baseurl set. + act: integrate via matrix-auth. + assert: update_relation_data is called and homeserver has same value as + public_baseurl. + """ + base_url_value = "https://new-server" + harness.update_config({"server_name": TEST_SERVER_NAME, "public_baseurl": base_url_value}) + harness.set_can_connect(synapse.SYNAPSE_CONTAINER_NAME, True) + harness.set_leader(True) + harness.begin() + update_relation_data = MagicMock() + monkeypatch.setattr( + harness.charm._matrix_auth.matrix_auth, "update_relation_data", update_relation_data + ) + monkeypatch.setattr( + synapse, "get_registration_shared_secret", MagicMock(return_value="shared_secret") + ) + + rel_id = harness.add_relation("matrix-auth", "maubot") + harness.add_relation_unit(rel_id, "maubot/0") + harness.update_relation_data(rel_id, "maubot", {"foo": "foo"}) + + relation = harness.charm.framework.model.get_relation("matrix-auth", rel_id) + update_relation_data.assert_called_with(relation, ANY) + + assert update_relation_data.call_args[0][1].homeserver == base_url_value + + +def test_matrix_auth_registration_secret_success( + harness: Harness, monkeypatch: pytest.MonkeyPatch +): + """ + arrange: start the Synapse charm with public_base url set. + act: integrate via matrix-auth with maubot and add registration as relation + data. + assert: update_relation_data is called, homeserver has same value as + public_baseurl and app_service_config_files is set. + """ + base_url_value = "https://new-server" + harness.update_config({"server_name": TEST_SERVER_NAME, "public_baseurl": base_url_value}) + harness.set_can_connect(synapse.SYNAPSE_CONTAINER_NAME, True) + harness.set_leader(True) + harness.begin_with_initial_hooks() + update_relation_data = MagicMock() + monkeypatch.setattr( + harness.charm._matrix_auth.matrix_auth, "update_relation_data", update_relation_data + ) + encryption_key = b"DXnflqjmmM8-UASxTl9oWeM7PWKQoclMFVb_bp9zLGY=" + monkeypatch.setattr( + MatrixAuthRequirerData, "get_encryption_key_secret", MagicMock(return_value=encryption_key) + ) + monkeypatch.setattr( + synapse, "get_registration_shared_secret", MagicMock(return_value="shared_secret") + ) + create_registration_secrets_files_mock = MagicMock() + monkeypatch.setattr( + synapse, "create_registration_secrets_files", create_registration_secrets_files_mock + ) + + rel_id = harness.add_relation("matrix-auth", "maubot") + harness.add_relation_unit(rel_id, "maubot/0") + encrypted_text = encrypt_string(key=encryption_key, plaintext=SecretStr("foo")) + harness.update_relation_data(rel_id, "maubot", {"registration_secret": encrypted_text}) + + relation = harness.charm.framework.model.get_relation("matrix-auth", rel_id) + update_relation_data.assert_called_with(relation, ANY) + assert update_relation_data.call_args[0][1].homeserver == base_url_value + create_registration_secrets_files_mock.assert_called_once() + root = harness.get_filesystem_root(synapse.SYNAPSE_CONTAINER_NAME) + config_path = root / synapse.SYNAPSE_CONFIG_PATH[1:] + with open(config_path, encoding="utf-8") as config_file: + content = yaml.safe_load(config_file) + assert "app_service_config_files" in content + assert content["app_service_config_files"] == [ + "/data/appservice-registration-matrix-auth-1.yaml" + ] + + +def test_matrix_auth_registration_secret_empty(harness: Harness, monkeypatch: pytest.MonkeyPatch): + """ + arrange: start the Synapse charm with public_base url set. + act: integrate via matrix-auth with maubot and add registration as relation + data. + assert: update_relation_data is called, homeserver has same value as + public_baseurl and since registration is empty there are no registration + files. + """ + base_url_value = "https://new-server" + harness.update_config({"server_name": TEST_SERVER_NAME, "public_baseurl": base_url_value}) + harness.set_can_connect(synapse.SYNAPSE_CONTAINER_NAME, True) + harness.set_leader(True) + harness.begin_with_initial_hooks() + update_relation_data = MagicMock() + monkeypatch.setattr( + harness.charm._matrix_auth.matrix_auth, "update_relation_data", update_relation_data + ) + encryption_key = b"DXnflqjmmM8-UASxTl9oWeM7PWKQoclMFVb_bp9zLGY=" + monkeypatch.setattr( + MatrixAuthRequirerData, "get_encryption_key_secret", MagicMock(return_value=encryption_key) + ) + monkeypatch.setattr( + synapse, "get_registration_shared_secret", MagicMock(return_value="shared_secret") + ) + create_registration_secrets_files_mock = MagicMock() + monkeypatch.setattr( + synapse, "create_registration_secrets_files", create_registration_secrets_files_mock + ) + + rel_id = harness.add_relation("matrix-auth", "maubot") + harness.add_relation_unit(rel_id, "maubot/0") + relation = harness.charm.framework.model.get_relation("matrix-auth", rel_id) + harness.charm.on["matrix-auth"].relation_changed.emit( + relation, harness.charm.app, harness.charm.unit + ) + + update_relation_data.assert_called_with(relation, ANY) + assert update_relation_data.call_args[0][1].homeserver == base_url_value + create_registration_secrets_files_mock.assert_not_called() diff --git a/tests/unit/test_matrix_plugins_lib.py b/tests/unit/test_matrix_plugins_lib.py new file mode 100644 index 00000000..1b8fcb2d --- /dev/null +++ b/tests/unit/test_matrix_plugins_lib.py @@ -0,0 +1,334 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +"""MatrixAuth library unit tests""" + +from secrets import token_hex + +import ops +import pytest +from charms.synapse.v0.matrix_auth import ( + MatrixAuthProviderData, + MatrixAuthProvides, + MatrixAuthRequestProcessed, + MatrixAuthRequestReceived, + MatrixAuthRequirerData, + MatrixAuthRequires, +) +from ops.testing import Harness + +REQUIRER_METADATA = """ +name: matrix-auth-consumer +requires: + matrix-auth: + interface: matrix-auth +""" + +PROVIDER_METADATA = """ +name: matrix-auth-producer +provides: + matrix-auth: + interface: matrix-auth +""" + +SAMPLE_PROVIDER_DATA = { + "homeserver": "https://example.com", + "shared_secret_id": "test-secret-id", +} + +SAMPLE_REQUIRER_DATA = { + "registration_secret_id": "test-registration-id", +} + + +class MatrixAuthRequirerCharm(ops.CharmBase): + """Class for requirer charm testing.""" + + def __init__(self, *args): + """Initialise the MatrixAuthRequirerCharm class. + + Args: + args: Arguments to pass to the parent class. + """ + super().__init__(*args) + self.matrix_auth = MatrixAuthRequires(self) + self.events = [] + self.framework.observe( + self.matrix_auth.on.matrix_auth_request_processed, self._record_event + ) + + def _record_event(self, event: ops.EventBase) -> None: + """Record the event. + + Args: + event: The event to record. + """ + self.events.append(event) + + +class MatrixAuthProviderCharm(ops.CharmBase): + """Class for provider charm testing.""" + + def __init__(self, *args): + """Initialise the MatrixAuthProviderCharm class. + + Args: + args: Arguments to pass to the parent class. + """ + super().__init__(*args) + self.matrix_auth = MatrixAuthProvides(self) + self.events = [] + self.framework.observe( + self.matrix_auth.on.matrix_auth_request_received, self._record_event + ) + + def _record_event(self, event: ops.EventBase) -> None: + """Record the event. + + Args: + event: The event to record. + """ + self.events.append(event) + + +# Tests for MatrixAuthRequires + + +def test_matrix_auth_requirer_does_not_emit_event_when_no_data(): + """ + arrange: set up a charm with no relation data to be populated. + act: add a matrix-auth relation. + assert: no events are emitted. + """ + harness = Harness(MatrixAuthRequirerCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.set_leader(True) + harness.add_relation("matrix-auth", "matrix-auth-provider") + relation = harness.charm.framework.model.get_relation("matrix-auth", 0) + harness.charm.on.matrix_auth_relation_changed.emit(relation) + assert len(harness.charm.events) == 0 + + +@pytest.mark.parametrize("is_leader", [True, False]) +def test_matrix_auth_requirer_with_valid_relation_data_emits_event(is_leader, monkeypatch): + """ + arrange: set up a charm. + act: add a matrix-auth relation with valid data. + assert: a MatrixAuthRequestProcessed event containing the relation data is emitted. + """ + harness = Harness(MatrixAuthRequirerCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.set_leader(is_leader) + + # Mock the get_shared_secret method to return a test secret + def mock_get_shared_secret(*args): # pylint: disable=unused-argument + """Mock get_shared_secret method. + + Args: + args: Arguments passed to the method. + + Returns: + str: The shared secret. + """ + return "test-shared-secret" + + monkeypatch.setattr(MatrixAuthProviderData, "get_shared_secret", mock_get_shared_secret) + + harness.add_relation("matrix-auth", "matrix-auth-provider", app_data=SAMPLE_PROVIDER_DATA) + + assert len(harness.charm.events) == 1 + event = harness.charm.events[0] + assert isinstance(event, MatrixAuthRequestProcessed) + + relation_data = event.get_matrix_auth_provider_relation_data() + assert relation_data.homeserver == SAMPLE_PROVIDER_DATA["homeserver"] + assert relation_data.shared_secret.get_secret_value() == "test-shared-secret" + + +@pytest.mark.parametrize("is_leader", [True, False]) +def test_matrix_auth_requirer_with_invalid_relation_data_doesnt_emit_event(is_leader): + """ + arrange: set up a charm. + act: add a matrix-auth relation with invalid data. + assert: a MatrixAuthRequestProcessed event is not emitted. + """ + invalid_relation_data = { + "homeserver": "https://example.com", + # Missing shared_secret_id + } + + harness = Harness(MatrixAuthRequirerCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.set_leader(is_leader) + harness.add_relation("matrix-auth", "matrix-auth-provider", app_data=invalid_relation_data) + + assert len(harness.charm.events) == 0 + + +def test_matrix_auth_requirer_get_remote_relation_data_without_relation(): + """ + arrange: set up a charm without any matrix-auth relation. + act: call get_remote_relation_data function. + assert: get_remote_relation_data should return None. + """ + harness = Harness(MatrixAuthRequirerCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.set_leader(True) + assert harness.charm.matrix_auth.get_remote_relation_data() is None + + +def test_matrix_auth_requirer_get_remote_relation_data_with_valid_data(monkeypatch): + """ + arrange: set up a charm with matrix-auth relation with valid relation data. + act: call get_remote_relation_data function. + assert: get_remote_relation_data should return a valid MatrixAuthProviderData object. + """ + harness = Harness(MatrixAuthRequirerCharm, meta=REQUIRER_METADATA) + harness.begin() + harness.set_leader(True) + + # Mock the get_shared_secret method to return a test secret + def mock_get_shared_secret(*args): # pylint: disable=unused-argument + """Mock get_shared_secret method. + + Args: + args: Arguments passed to the method. + + Returns: + str: The shared secret. + """ + return "test-shared-secret" + + monkeypatch.setattr(MatrixAuthProviderData, "get_shared_secret", mock_get_shared_secret) + + harness.add_relation("matrix-auth", "matrix-auth-provider", app_data=SAMPLE_PROVIDER_DATA) + + relation_data = harness.charm.matrix_auth.get_remote_relation_data() + assert relation_data is not None + assert relation_data.homeserver == SAMPLE_PROVIDER_DATA["homeserver"] + assert relation_data.shared_secret.get_secret_value() == "test-shared-secret" + + +# Tests for MatrixAuthProvides + + +def test_matrix_auth_provider_does_not_emit_event_when_no_data(): + """ + arrange: set up a charm with no relation data to be populated. + act: add a matrix-auth relation. + assert: no events are emitted. + """ + harness = Harness(MatrixAuthProviderCharm, meta=PROVIDER_METADATA) + harness.begin() + harness.set_leader(True) + harness.add_relation("matrix-auth", "matrix-auth-consumer") + relation = harness.charm.framework.model.get_relation("matrix-auth", 0) + harness.charm.on.matrix_auth_relation_changed.emit(relation) + assert len(harness.charm.events) == 0 + + +@pytest.mark.parametrize("is_leader", [True, False]) +def test_matrix_auth_provider_with_valid_relation_data_emits_event(is_leader, monkeypatch): + """ + arrange: set up a charm. + act: add a matrix-auth relation with valid data. + assert: a MatrixAuthRequestReceived event is emitted. + """ + harness = Harness(MatrixAuthProviderCharm, meta=PROVIDER_METADATA) + harness.begin() + harness.set_leader(is_leader) + + # Mock the get_registration method to return a test registration + def mock_get_registration(*args): # pylint: disable=unused-argument + """Mock get_registration method. + + Args: + args: Arguments passed to the method. + + Returns: + str: The registration. + """ + return "test-registration" + + monkeypatch.setattr(MatrixAuthRequirerData, "get_registration", mock_get_registration) + + harness.add_relation("matrix-auth", "matrix-auth-consumer", app_data=SAMPLE_REQUIRER_DATA) + + assert len(harness.charm.events) == 1 + event = harness.charm.events[0] + assert isinstance(event, MatrixAuthRequestReceived) + + +@pytest.mark.parametrize("is_leader", [True, False]) +def test_matrix_auth_provider_with_invalid_relation_data_doesnt_emit_event(is_leader): + """ + arrange: set up a charm. + act: add a matrix-auth relation with invalid data. + assert: a MatrixAuthRequestReceived event is not emitted. + """ + invalid_relation_data: dict[str, str] | None = { + # Missing registration_secret_id + } + + harness = Harness(MatrixAuthProviderCharm, meta=PROVIDER_METADATA) + harness.begin() + harness.set_leader(is_leader) + harness.add_relation("matrix-auth", "matrix-auth-consumer", app_data=invalid_relation_data) + + assert len(harness.charm.events) == 0 + + +def test_matrix_auth_provider_update_relation_data(): + """ + arrange: set up a charm with a matrix-auth relation. + act: update the relation data. + assert: the relation data is updated correctly. + """ + harness = Harness(MatrixAuthProviderCharm, meta=PROVIDER_METADATA) + harness.begin() + harness.set_leader(True) + rel_id = harness.add_relation("matrix-auth", "matrix-auth-consumer") + relation = harness.model.get_relation("matrix-auth", rel_id) + secret = token_hex(16) + + provider_data = MatrixAuthProviderData( + homeserver="https://example.com", + shared_secret=secret, + ) + + harness.charm.matrix_auth.update_relation_data(relation, provider_data) + + relation_data = harness.get_relation_data(rel_id, harness.charm.app.name) + assert relation_data["homeserver"] == "https://example.com" + assert "shared_secret_id" in relation_data # The actual ID will be generated + + +def test_matrix_auth_provider_get_remote_relation_data(monkeypatch): + """ + arrange: set up a charm with a matrix-auth relation and valid requirer data. + act: call get_remote_relation_data function. + assert: get_remote_relation_data returns a valid MatrixAuthRequirerData object. + """ + harness = Harness(MatrixAuthProviderCharm, meta=PROVIDER_METADATA) + harness.begin() + harness.set_leader(True) + + # Mock the get_registration method to return a test registration + def mock_get_registration(*args): # pylint: disable=unused-argument + """Mock get_registration method. + + Args: + args: Arguments passed to the method. + + Returns: + str: The registration. + """ + return "test-registration" + + monkeypatch.setattr(MatrixAuthRequirerData, "get_registration", mock_get_registration) + + harness.add_relation("matrix-auth", "matrix-auth-consumer", app_data=SAMPLE_REQUIRER_DATA) + + relation_data = harness.charm.matrix_auth.get_remote_relation_data() + assert relation_data is not None + assert relation_data.registration.get_secret_value() == "test-registration" diff --git a/tests/unit/test_media_observer.py b/tests/unit/test_media_observer.py index d8bbb161..c969c66b 100644 --- a/tests/unit/test_media_observer.py +++ b/tests/unit/test_media_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse media unit tests.""" diff --git a/tests/unit/test_mjolnir.py b/tests/unit/test_mjolnir.py index ead62a59..05f1dbfe 100644 --- a/tests/unit/test_mjolnir.py +++ b/tests/unit/test_mjolnir.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Mjolnir unit tests.""" @@ -226,8 +226,8 @@ def test_enable_mjolnir(harness: Harness, monkeypatch: pytest.MonkeyPatch) -> No monkeypatch.setattr(synapse, "get_room_id", get_room_id) make_room_admin = MagicMock() monkeypatch.setattr(synapse, "make_room_admin", make_room_admin) - create_mjolnir_config = MagicMock() - monkeypatch.setattr(synapse, "create_mjolnir_config", create_mjolnir_config) + generate_mjolnir_config = MagicMock() + monkeypatch.setattr(synapse, "generate_mjolnir_config", generate_mjolnir_config) override_rate_limit = MagicMock() monkeypatch.setattr(synapse, "override_rate_limit", override_rate_limit) @@ -240,7 +240,7 @@ def test_enable_mjolnir(harness: Harness, monkeypatch: pytest.MonkeyPatch) -> No make_room_admin.assert_called_once_with( user=ANY, server=ANY, admin_access_token=admin_access_token, room_id=room_id ) - create_mjolnir_config.assert_called_once_with( + generate_mjolnir_config.assert_called_once_with( container=ANY, access_token=mjolnir_access_token, room_id=room_id ) override_rate_limit.assert_called_once_with( @@ -272,8 +272,8 @@ def test_enable_mjolnir_room_none(harness: Harness, monkeypatch: pytest.MonkeyPa monkeypatch.setattr(synapse, "create_management_room", create_management_room) make_room_admin = MagicMock() monkeypatch.setattr(synapse, "make_room_admin", make_room_admin) - create_mjolnir_config = MagicMock() - monkeypatch.setattr(synapse, "create_mjolnir_config", create_mjolnir_config) + generate_mjolnir_config = MagicMock() + monkeypatch.setattr(synapse, "generate_mjolnir_config", generate_mjolnir_config) override_rate_limit = MagicMock() monkeypatch.setattr(synapse, "override_rate_limit", override_rate_limit) @@ -288,7 +288,7 @@ def test_enable_mjolnir_room_none(harness: Harness, monkeypatch: pytest.MonkeyPa make_room_admin.assert_called_once_with( user=ANY, server=ANY, admin_access_token=admin_access_token, room_id=room_id ) - create_mjolnir_config.assert_called_once_with( + generate_mjolnir_config.assert_called_once_with( container=ANY, access_token=mjolnir_access_token, room_id=room_id ) override_rate_limit.assert_called_once_with( @@ -465,3 +465,33 @@ def test_on_collect_status_not_main_unit( event_mock.add_status.assert_not_called() enable_mjolnir_mock.assert_not_called() + + +def test_on_collect_status_not_main_unit_and_is_started( + harness: Harness, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + arrange: start the Synapse charm, set server_name, unit has Mjolnir started. + act: call _on_collect_status. + assert: Mjolnir is stopped since this is not the main unit. + """ + harness.add_relation( + synapse.SYNAPSE_PEER_RELATION_NAME, + "synapse", + app_data={"main_unit_id": "synapse/1"}, + ) + harness.update_config({"enable_mjolnir": True}) + harness.begin_with_initial_hooks() + enable_mjolnir_mock = MagicMock() + monkeypatch.setattr(Mjolnir, "enable_mjolnir", enable_mjolnir_mock) + container_mock = MagicMock(spec=ops.Container) + monkeypatch.setattr( + harness.charm.unit, "get_container", MagicMock(return_value=container_mock) + ) + + event_mock = MagicMock() + harness.charm._mjolnir._on_collect_status(event_mock) + + event_mock.add_status.assert_not_called() + enable_mjolnir_mock.assert_not_called() + container_mock.stop.assert_called_with("mjolnir") diff --git a/tests/unit/test_observability.py b/tests/unit/test_observability.py index 6c794449..cc292c4a 100644 --- a/tests/unit/test_observability.py +++ b/tests/unit/test_observability.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse observability unit tests.""" @@ -10,36 +10,37 @@ import synapse -def test_main_prometheus_target(prometheus_configured: Harness) -> None: +def test_prometheus_target(prometheus_configured: Harness) -> None: """ arrange: charm deployed, integrated with Redis and set as a leader. act: start the Synapse charm. - assert: Synapse charm is the main_unit so targets are 9000 (main) and 9877 (stats exporter). + assert: Synapse charm has Prometheus targets 9000 (Synapse) and 9877 (Stats exporter). """ harness = prometheus_configured harness.set_leader(True) harness.begin_with_initial_hooks() assert harness.charm._observability._metrics_endpoint._scrape_jobs == [ - {"metrics_path": "/metrics", "static_configs": [{"targets": ["*:9000", "*:9877"]}]} - ] - - -def test_worker_prometheus_target(prometheus_configured: Harness) -> None: - """ - arrange: charm deployed. - act: start the Synapse charm, set Synapse container to be ready and set server_name. - assert: Synapse charm is worker so target is 9101 (worker). - """ - harness = prometheus_configured - harness.begin() - harness.set_leader(False) - harness.add_relation( - synapse.SYNAPSE_PEER_RELATION_NAME, - harness.charm.app.name, - app_data={"main_unit_id": "synapse/1"}, - ) - - assert harness.charm._observability._metrics_endpoint._scrape_jobs == [ - {"metrics_path": "/metrics", "static_configs": [{"targets": ["*:9101"]}]} + { + "job_name": "synapse_application", + "metrics_path": "/metrics", + "static_configs": [ + { + "targets": [ + f"*:{synapse.SYNAPSE_EXPORTER_PORT}", + ] + } + ], + }, + { + "job_name": "synapse_stats_exporter", + "metrics_path": "/metrics", + "static_configs": [ + { + "targets": [ + f"*:{synapse.STATS_EXPORTER_PORT}", + ] + } + ], + }, ] diff --git a/tests/unit/test_promote_user_admin_action.py b/tests/unit/test_promote_user_admin_action.py index 45c63dc1..c4315e69 100644 --- a/tests/unit/test_promote_user_admin_action.py +++ b/tests/unit/test_promote_user_admin_action.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Register user action unit tests.""" diff --git a/tests/unit/test_register_user_action.py b/tests/unit/test_register_user_action.py index a0137090..1f37bbdb 100644 --- a/tests/unit/test_register_user_action.py +++ b/tests/unit/test_register_user_action.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Register user action unit tests.""" @@ -21,7 +21,7 @@ def test_register_user_action(harness: Harness, monkeypatch: pytest.MonkeyPatch) """ arrange: start the Synapse charm, set Synapse container to be ready and set server_name. act: run register-user action. - assert: Synapse charm should reset the instance. + assert: User is created and the charm is active. """ harness.begin_with_initial_hooks() get_registration_mock = unittest.mock.Mock(return_value="shared_secret") diff --git a/tests/unit/test_reset_instance_action.py b/tests/unit/test_reset_instance_action.py deleted file mode 100644 index 34e98a20..00000000 --- a/tests/unit/test_reset_instance_action.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Reset instance action unit tests.""" - -# pylint: disable=protected-access - -import io -import unittest.mock - -import ops -import pytest -from ops.testing import Harness - -import actions -import synapse -from database_client import DatabaseClient - -from .conftest import TEST_SERVER_NAME - - -def test_reset_instance_action(harness: Harness) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: run reset-instance action. - assert: Synapse charm should reset the instance. - """ - harness.set_leader(True) - harness.begin() - event = unittest.mock.Mock() - - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - - # Disable no-member to allow tests on generated mock attributes - # pylint: disable=no-member - assert event.set_results.call_count == 1 - event.set_results.assert_called_with({"reset-instance": True}) - assert isinstance(harness.model.unit.status, ops.ActiveStatus) - - -def test_reset_instance_action_container_down(harness: Harness) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: run reset-instance action. - assert: Synapse charm should reset the instance. - """ - harness.set_leader(True) - harness.begin() - harness.set_can_connect(harness.model.unit.containers[synapse.SYNAPSE_CONTAINER_NAME], False) - event = unittest.mock.Mock() - - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - - assert event.set_results.call_count == 0 - assert event.fail.call_count == 1 - assert "Failed to connect to the container" == event.fail.call_args[0][0] - - -@pytest.mark.parametrize( - "harness", - [ - pytest.param(1, id="harness_exit_code"), - ], - indirect=True, -) -def test_reset_instance_action_failed(harness: Harness) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should be blocked by error on migrate_config command. - """ - harness.set_leader(True) - harness.begin() - event = unittest.mock.Mock() - - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - - assert event.set_results.call_count == 0 - assert isinstance(harness.model.unit.status, ops.BlockedStatus) - assert "Migrate config failed" in str(harness.model.unit.status) - - -def test_reset_instance_action_path_error_blocked( - container_with_path_error_blocked: unittest.mock.MagicMock, - harness: Harness, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should be blocked by error on remove_path. - """ - harness.set_leader(True) - harness.begin() - harness.charm.unit.get_container = unittest.mock.MagicMock( - return_value=container_with_path_error_blocked - ) - event = unittest.mock.MagicMock() - monkeypatch.setattr(DatabaseClient, "erase", unittest.mock.MagicMock()) - - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - - assert container_with_path_error_blocked.remove_path.call_count == 1 - assert isinstance(harness.model.unit.status, ops.BlockedStatus) - assert "Error erasing" in str(harness.model.unit.status) - - -def test_reset_instance_action_path_error_pass( - container_with_path_error_pass: unittest.mock.MagicMock, - harness: Harness, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should reset the instance. - """ - harness.set_leader(True) - harness.begin() - content = io.StringIO(f'server_name: "{TEST_SERVER_NAME}"') - pull_mock = unittest.mock.MagicMock(return_value=content) - monkeypatch.setattr(container_with_path_error_pass, "pull", pull_mock) - harness.charm.unit.get_container = unittest.mock.MagicMock( - return_value=container_with_path_error_pass - ) - event = unittest.mock.MagicMock() - monkeypatch.setattr(DatabaseClient, "erase", unittest.mock.MagicMock()) - - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - - assert container_with_path_error_pass.remove_path.call_count == 1 - assert isinstance(harness.model.unit.status, ops.ActiveStatus) - - -def test_reset_instance_action_no_leader( - harness: Harness, -) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: change server_name and run reset-instance action. - assert: Synapse charm should take no action if no leader. - """ - harness.begin() - harness.set_leader(False) - - event = unittest.mock.MagicMock() - # Calling to test the action since is not possible calling via harness - harness.charm._on_reset_instance_action(event) - - # Disable no-member to allow tests on generated mock attributes - # pylint: disable=no-member - assert event.fail.call_count == 1 - assert "Only the juju leader unit can run reset instance action" == event.fail.call_args[0][0] - - -def test_reset_instance_action_erase_database( - harness: Harness, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """ - arrange: start the Synapse charm, set Synapse container to be ready and set server_name. - act: run reset-instance action. - assert: since there is a datasource, erase should be called. - """ - harness.begin() - db_erase_mock = unittest.mock.MagicMock() - monkeypatch.setattr(DatabaseClient, "erase", db_erase_mock) - monkeypatch.setattr("synapse.execute_migrate_config", unittest.mock.MagicMock()) - - actions.reset_instance( - container=unittest.mock.MagicMock(), - charm_state=harness.charm.build_charm_state(), - datasource=unittest.mock.MagicMock(), - ) - - db_erase_mock.assert_called_once() diff --git a/tests/unit/test_smtp_observer.py b/tests/unit/test_smtp_observer.py index 2eefad8e..2147211c 100644 --- a/tests/unit/test_smtp_observer.py +++ b/tests/unit/test_smtp_observer.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """SMTPObserver unit tests.""" diff --git a/tests/unit/test_synapse_api.py b/tests/unit/test_synapse_api.py index 4dd19e16..45aa8427 100644 --- a/tests/unit/test_synapse_api.py +++ b/tests/unit/test_synapse_api.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse API unit tests.""" @@ -220,16 +220,18 @@ def test_override_rate_limit_success(monkeypatch: pytest.MonkeyPatch): admin_access_token = token_hex(16) server = token_hex(16) # while using Pydantic, mypy ignores default values - synapse_config = SynapseConfig(server_name=server) # type: ignore[call-arg] + synapse_config = SynapseConfig( + server_name=server, public_baseurl="https://example.com" + ) # type: ignore[call-arg] charm_state = CharmState( synapse_config=synapse_config, datasource=None, - irc_bridge_datasource=None, saml_config=None, smtp_config=None, media_config=None, redis_config=None, instance_map_config=None, + registration_secrets=None, ) expected_url = ( f"http://localhost:8008/_synapse/admin/v1/users/@any-user:{server}/override_ratelimit" @@ -242,7 +244,7 @@ def test_override_rate_limit_success(monkeypatch: pytest.MonkeyPatch): ) do_request_mock.assert_called_once_with( - "DELETE", expected_url, admin_access_token=admin_access_token + "POST", expected_url, admin_access_token=admin_access_token ) @@ -257,16 +259,18 @@ def test_override_rate_limit_error(monkeypatch: pytest.MonkeyPatch): admin_access_token = token_hex(16) server = token_hex(16) # while using Pydantic, mypy ignores default values - synapse_config = SynapseConfig(server_name=server) # type: ignore[call-arg] + synapse_config = SynapseConfig( + server_name=server, public_baseurl="https://example.com" + ) # type: ignore[call-arg] charm_state = CharmState( synapse_config=synapse_config, datasource=None, - irc_bridge_datasource=None, saml_config=None, smtp_config=None, media_config=None, redis_config=None, instance_map_config=None, + registration_secrets=None, ) expected_error_msg = "Failed to connect" do_request_mock = mock.MagicMock(side_effect=synapse.APIError(expected_error_msg)) diff --git a/tests/unit/test_synapse_workload.py b/tests/unit/test_synapse_workload.py index 15ad7f9d..6036357d 100644 --- a/tests/unit/test_synapse_workload.py +++ b/tests/unit/test_synapse_workload.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Synapse workload unit tests.""" @@ -15,7 +15,7 @@ import pytest import yaml from ops.testing import Harness -from pydantic import ValidationError +from pydantic.v1 import ValidationError import synapse from charm import SynapseCharm @@ -241,20 +241,23 @@ def test_enable_trusted_key_servers_no_action(config_content: dict[str, typing.A """ content = config_content - config = {"server_name": "foo"} + config = { + "server_name": "foo", + "public_baseurl": "https://foo", + } synapse_config = SynapseConfig(**config) # type: ignore[arg-type] synapse.enable_trusted_key_servers( content, CharmState( # pylint: disable=duplicate-code datasource=None, - irc_bridge_datasource=None, saml_config=None, smtp_config=None, media_config=None, redis_config=None, synapse_config=synapse_config, instance_map_config=None, + registration_secrets=None, ), ) @@ -381,7 +384,6 @@ def test_enable_saml_success(): "listeners": [ {"type": "http", "x_forwarded": True, "port": 8080, "bind_addresses": ["::"]} ], - "public_baseurl": TEST_SERVER_NAME, "saml2_enabled": True, "saml2_config": { "sp_config": { @@ -446,7 +448,6 @@ def test_enable_saml_success_no_ubuntu_url(): "listeners": [ {"type": "http", "x_forwarded": True, "port": 8080, "bind_addresses": ["::"]} ], - "public_baseurl": TEST_SERVER_NAME, "saml2_enabled": True, "saml2_config": { "sp_config": { @@ -486,10 +487,10 @@ def test_get_mjolnir_config_success(): assert config["managementRoom"] == room_id -def test_create_mjolnir_config_success(monkeypatch: pytest.MonkeyPatch): +def test_generate_mjolnir_config_success(monkeypatch: pytest.MonkeyPatch): """ arrange: set container, access token and room id parameters. - act: call create_mjolnir_config. + act: call generate_mjolnir_config. assert: file is pushed as expected. """ access_token = token_hex(16) @@ -498,7 +499,7 @@ def test_create_mjolnir_config_success(monkeypatch: pytest.MonkeyPatch): container_mock = MagicMock() monkeypatch.setattr(container_mock, "push", push_mock) - synapse.create_mjolnir_config( + synapse.generate_mjolnir_config( container=container_mock, access_token=access_token, room_id=room_id ) @@ -530,17 +531,18 @@ def test_enable_smtp_success(config_content: dict[str, typing.Any]): synapse_with_notif_config = { "notif_from": "noreply@example.com", "server_name": "example.com", + "public_baseurl": "https://example.com", } synapse_config = SynapseConfig(**synapse_with_notif_config) # type: ignore[arg-type] charm_state = CharmState( datasource=None, - irc_bridge_datasource=None, saml_config=None, smtp_config=SMTP_CONFIGURATION, media_config=None, redis_config=None, instance_map_config=None, synapse_config=synapse_config, + registration_secrets=None, ) synapse.enable_smtp(config_content, charm_state) @@ -697,6 +699,41 @@ def test_http_proxy( assert env.get(env_name) == env.get(env_name.upper()) == env_value +def test_block_non_admin_invites(config_content: dict[str, typing.Any]): + """ + arrange: set mock container with file. + act: update block_non_admin_invites config to true. + assert: new configuration file is pushed and block_non_admin_invites is enabled. + """ + block_non_admin_invites = { + "block_non_admin_invites": True, + "server_name": "example.com", + "public_baseurl": "https://example.com", + } + synapse_config = SynapseConfig(**block_non_admin_invites) # type: ignore[arg-type] + charm_state = CharmState( + datasource=None, + saml_config=None, + smtp_config=SMTP_CONFIGURATION, + redis_config=None, + synapse_config=synapse_config, + media_config=None, + instance_map_config=None, + registration_secrets=None, + ) + + synapse.block_non_admin_invites(config_content, charm_state) + + expected_config_content = { + "block_non_admin_invites": True, + "listeners": [ + {"type": "http", "port": 8080, "bind_addresses": ["::"]}, + ], + } + + assert yaml.safe_dump(config_content) == yaml.safe_dump(expected_config_content) + + def test_publish_rooms_allowlist_success(config_content: dict[str, typing.Any]): """ arrange: mock Synapse current configuration with config_content and @@ -707,17 +744,18 @@ def test_publish_rooms_allowlist_success(config_content: dict[str, typing.Any]): synapse_with_notif_config = { "publish_rooms_allowlist": "user1:domainX.com,user2:domainY.com", "server_name": "example.com", + "public_baseurl": "https://example.com", } synapse_config = SynapseConfig(**synapse_with_notif_config) # type: ignore[arg-type] charm_state = CharmState( datasource=None, - irc_bridge_datasource=None, saml_config=None, smtp_config=SMTP_CONFIGURATION, redis_config=None, synapse_config=synapse_config, media_config=None, instance_map_config=None, + registration_secrets=None, ) synapse.enable_room_list_publication_rules(config_content, charm_state) @@ -755,8 +793,140 @@ def test_publish_rooms_allowlist_error(invalid_config): synapse_with_notif_config = { "publish_rooms_allowlist": invalid_config, "server_name": "example.com", + "public_baseurl": "https://example.com", } with pytest.raises(ValidationError): # Prevent mypy error: # Argument 1 to "SynapseConfig" has incompatible type "**dict[str, str]"; expected "bool" SynapseConfig(**synapse_with_notif_config) # type: ignore[arg-type] + + +def test_enable_rc_joins_remote_rate( + harness: Harness, + config_content: dict[str, typing.Any], +): + """ + arrange: set mock container with file. + act: update rc_joins_remote_rate config and call rc_joins_remote_rate. + assert: new configuration file is pushed and rc_joins_remote_rate is enabled. + """ + config = config_content + + harness.update_config({"rc_joins_remote_burst_count": 10, "rc_joins_remote_per_second": 0.2}) + harness.begin() + synapse.enable_rc_joins_remote_rate(config, harness.charm.build_charm_state()) + + expected_config_content = { + "listeners": [ + {"type": "http", "port": 8080, "bind_addresses": ["::"]}, + ], + "rc_joins": {"remote": {"burst_count": 10, "per_second": 0.2}}, + } + assert yaml.safe_dump(config) == yaml.safe_dump(expected_config_content) + + +def test_enable_limit_remote_rooms_complexity( + harness: Harness, + config_content: dict[str, typing.Any], +): + """ + arrange: set mock container with file. + act: update limit_remote_rooms_complexity config and call limit_remote_rooms_complexity. + assert: new configuration file is pushed and limit_remote_rooms_complexity is enabled. + """ + config = config_content + + harness.update_config({"limit_remote_rooms_complexity": 0.2}) + harness.begin() + synapse.enable_limit_remote_rooms_complexity(config, harness.charm.build_charm_state()) + + expected_config_content = { + "listeners": [ + {"type": "http", "port": 8080, "bind_addresses": ["::"]}, + ], + "limit_remote_rooms": {"enabled": True, "complexity": 0.2}, + } + assert yaml.safe_dump(config) == yaml.safe_dump(expected_config_content) + + +def test_invite_checker_policy_rooms(config_content: dict[str, typing.Any]): + """ + arrange: set mock container with file. + act: update invite_checker_policy_rooms config. + assert: new configuration file is pushed and invite_checker_policy_rooms is enabled. + """ + invite_checker_policy_rooms = { + "invite_checker_policy_rooms": "foo:foo.com,foo1:foo1.com,foo2:foo2.foo1.com", + "server_name": "example.com", + "public_baseurl": "https://example.com", + } + synapse_config = SynapseConfig(**invite_checker_policy_rooms) # type: ignore[arg-type] + charm_state = CharmState( + datasource=None, + saml_config=None, + smtp_config=SMTP_CONFIGURATION, + redis_config=None, + synapse_config=synapse_config, + media_config=None, + instance_map_config=None, + registration_secrets=None, + ) + + synapse.enable_synapse_invite_checker(config_content, charm_state) + + expected_config_content = { + "listeners": [ + {"type": "http", "port": 8080, "bind_addresses": ["::"]}, + ], + "modules": [ + { + "config": { + "policy_room_ids": ["!foo:foo.com", "!foo1:foo1.com", "!foo2:foo2.foo1.com"] + }, + "module": "synapse_invite_checker.InviteChecker", + } + ], + } + + assert yaml.safe_dump(config_content) == yaml.safe_dump(expected_config_content) + + +def test_invite_checker_blocklist_allowlist_url(config_content: dict[str, typing.Any]): + """ + arrange: set mock container with file. + act: update invite_checker_blocklist_allowlist_url config. + assert: new configuration file is pushed and invite_checker_blocklist_allowlist_url is enabled. + """ + invite_checker_blocklist_allowlist_url = { + "invite_checker_blocklist_allowlist_url": "https://example.com/file", + "server_name": "example.com", + "public_baseurl": "https://example.com", + } + # pylint: disable=line-too-long + synapse_config = SynapseConfig(**invite_checker_blocklist_allowlist_url) # type: ignore[arg-type] # noqa: E501 + charm_state = CharmState( + datasource=None, + saml_config=None, + smtp_config=SMTP_CONFIGURATION, + redis_config=None, + synapse_config=synapse_config, + media_config=None, + instance_map_config=None, + registration_secrets=None, + ) + + synapse.enable_synapse_invite_checker(config_content, charm_state) + + expected_config_content = { + "listeners": [ + {"type": "http", "port": 8080, "bind_addresses": ["::"]}, + ], + "modules": [ + { + "config": {"blocklist_allowlist_url": "https://example.com/file"}, + "module": "synapse_invite_checker.InviteChecker", + } + ], + } + + assert yaml.safe_dump(config_content) == yaml.safe_dump(expected_config_content) diff --git a/tox.ini b/tox.ini index 96558bc5..1065796e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. [tox] @@ -48,7 +48,7 @@ description = Check code against coding style standards deps = black codespell - flake8<6.0.0 + flake8 flake8-builtins flake8-copyright<6.0.0 flake8-docstrings>=1.6.0 @@ -59,7 +59,7 @@ deps = pep8-naming pydocstyle>=2.10 pylint - pyproject-flake8<6.0.0 + pyproject-flake8 pytest pytest-asyncio pytest-operator diff --git a/trivy.yaml b/trivy.yaml index c895d699..8efe2695 100644 --- a/trivy.yaml +++ b/trivy.yaml @@ -1,3 +1,5 @@ timeout: 20m scan: offline-scan: true +db: + repository: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db