diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 839b895c82ca..4c7b0335e609 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -8,21 +8,21 @@ # If ignoring a pull request that was not squash merged, only the merge # commit needs to be put here. Child commits will be resolved from it. -# Run black (#3679). +# Run black (https://github.com/matrix-org/synapse/pull/3679). 8b3d9b6b199abb87246f982d5db356f1966db925 -# Black reformatting (#5482). +# Black reformatting (https://github.com/matrix-org/synapse/pull/5482). 32e7c9e7f20b57dd081023ac42d6931a8da9b3a3 -# Target Python 3.5 with black (#8664). +# Target Python 3.5 with black (https://github.com/matrix-org/synapse/pull/8664). aff1eb7c671b0a3813407321d2702ec46c71fa56 -# Update black to 20.8b1 (#9381). +# Update black to 20.8b1 (https://github.com/matrix-org/synapse/pull/9381). 0a00b7ff14890987f09112a2ae696c61001e6cf1 -# Convert tests/rest/admin/test_room.py to unix file endings (#7953). +# Convert tests/rest/admin/test_room.py to unix file endings (https://github.com/matrix-org/synapse/pull/7953). c4268e3da64f1abb5b31deaeb5769adb6510c0a7 -# Update black to 23.1.0 (#15103) +# Update black to 23.1.0 (https://github.com/matrix-org/synapse/pull/15103) 9bb2eac71962970d02842bca441f4bcdbbf93a11 diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index 3704bd66e2ce..9cf3d340a419 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -6,6 +6,7 @@ on: - docs/** - book.toml - .github/workflows/docs-pr.yaml + - scripts-dev/schema_versions.py jobs: pages: @@ -13,12 +14,22 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + # Fetch all history so that the schema_versions script works. + fetch-depth: 0 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 with: mdbook-version: '0.4.17' + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: "3.x" + + - run: "pip install 'packaging>=20.0' 'GitPython>=3.1.20'" + - name: Build the documentation # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md. # However, we're using docs/README.md for other purposes and need to pick a new page diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 63c7ee9cab9e..6b9135a627db 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -51,6 +51,9 @@ jobs: - pre steps: - uses: actions/checkout@v4 + with: + # Fetch all history so that the schema_versions script works. + fetch-depth: 0 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 @@ -60,6 +63,13 @@ jobs: - name: Set version of docs run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: "3.x" + + - run: "pip install 'packaging>=20.0' 'GitPython>=3.1.20'" + - name: Build the documentation # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md. # However, we're using docs/README.md for other purposes and need to pick a new page diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml new file mode 100644 index 000000000000..f1e35fcd99d9 --- /dev/null +++ b/.github/workflows/fix_lint.yaml @@ -0,0 +1,52 @@ +# A helper workflow to automatically fixup any linting errors on a PR. Must be +# triggered manually. + +name: Attempt to automatically fix linting errors + +on: + workflow_dispatch: + +jobs: + fixup: + name: Fix up + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + # We use nightly so that `fmt` correctly groups together imports, and + # clippy correctly fixes up the benchmarks. + toolchain: nightly-2022-12-01 + components: rustfmt + - uses: Swatinem/rust-cache@v2 + + - name: Setup Poetry + uses: matrix-org/setup-python-poetry@v1 + with: + install-project: "false" + + - name: Import order (isort) + continue-on-error: true + run: poetry run isort . + + - name: Code style (black) + continue-on-error: true + run: poetry run black . + + - name: Semantic checks (ruff) + continue-on-error: true + run: poetry run ruff --fix . + + - run: cargo clippy --all-features --fix -- -D warnings + continue-on-error: true + + - run: cargo fmt + continue-on-error: true + + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: "Attempt to fix linting" diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index c9ec70abe981..cb801afcbf26 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -197,11 +197,14 @@ jobs: with: path: synapse - - uses: actions/setup-go@v4 - - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh + - uses: actions/setup-go@v4 + with: + cache-dependency-path: complement/go.sum + go-version-file: complement/go.mod + - run: | set -o pipefail TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index fed3a4158653..8019f4c250af 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -130,7 +130,7 @@ jobs: python-version: "3.x" - name: Install cibuildwheel - run: python -m pip install cibuildwheel==2.9.0 + run: python -m pip install cibuildwheel==2.16.2 - name: Set up QEMU to emulate aarch64 if: matrix.arch == 'aarch64' diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 13746608d4b0..a1f714da2338 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -633,14 +633,18 @@ jobs: uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - - uses: actions/setup-go@v4 - - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh + - uses: actions/setup-go@v4 + with: + cache-dependency-path: complement/go.sum + go-version-file: complement/go.mod + + # use p=1 concurrency as GHA boxes are underpowered and don't like running tons of synapses at once. - run: | set -o pipefail - COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt + COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | synapse/.ci/scripts/gotestfmt shell: bash env: POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }} diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 062f782e8b77..1011a15390af 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -168,11 +168,14 @@ jobs: with: path: synapse - - uses: actions/setup-go@v4 - - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh + - uses: actions/setup-go@v4 + with: + cache-dependency-path: complement/go.sum + go-version-file: complement/go.mod + # This step is specific to the 'Twisted trunk' test run: - name: Patch dependencies run: | diff --git a/CHANGES.md b/CHANGES.md index 0cabf8a6ece7..bb34267af331 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,186 @@ +# Synapse 1.97.0 (2023-11-28) + +Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for +proprietary dual licensing). You can read more about this here: + + - https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ + - https://element.io/blog/element-to-adopt-agplv3/ + +The Matrix.org Foundation copy of the project will be archived. Any changes needed +by server administrators will be communicated via our usual announcements channels, +but we are striving to make this as seamless as possible. + + +No significant changes since 1.97.0rc1. + + +# Synapse 1.97.0rc1 (2023-11-21) + +### Features + +- Add support for asynchronous uploads as defined by [MSC2246](https://github.com/matrix-org/matrix-spec-proposals/pull/2246). Contributed by @sumnerevans at @beeper. ([\#15503](https://github.com/matrix-org/synapse/issues/15503)) +- Improve the performance of some operations in multi-worker deployments. ([\#16613](https://github.com/matrix-org/synapse/issues/16613), [\#16616](https://github.com/matrix-org/synapse/issues/16616)) + +### Bugfixes + +- Fix a long-standing bug where some queries updated the same row twice. Introduced in Synapse 1.57.0. ([\#16609](https://github.com/matrix-org/synapse/issues/16609)) +- Fix a long-standing bug where Synapse would not unbind third-party identifiers for Application Service users when deactivated and would not emit a compliant response. ([\#16617](https://github.com/matrix-org/synapse/issues/16617)) +- Fix sending out of order `POSITION` over replication, causing additional database load. ([\#16639](https://github.com/matrix-org/synapse/issues/16639)) + +### Improved Documentation + +- Note that the option [`outbound_federation_restricted_to`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#outbound_federation_restricted_to) was added in Synapse 1.89.0, and fix a nearby formatting error. ([\#16628](https://github.com/matrix-org/synapse/issues/16628)) +- Update parameter information for the `/timestamp_to_event` admin API. ([\#16631](https://github.com/matrix-org/synapse/issues/16631)) +- Provide an example for a common encrypted media response from the admin user media API and mention possible null values. ([\#16654](https://github.com/matrix-org/synapse/issues/16654)) + +### Internal Changes + +- Remove whole table locks on push rule modifications. Contributed by Nick @ Beeper (@fizzadar). ([\#16051](https://github.com/matrix-org/synapse/issues/16051)) +- Support reactor tick timings on more types of event loops. ([\#16532](https://github.com/matrix-org/synapse/issues/16532)) +- Improve type hints. ([\#16564](https://github.com/matrix-org/synapse/issues/16564), [\#16611](https://github.com/matrix-org/synapse/issues/16611), [\#16612](https://github.com/matrix-org/synapse/issues/16612)) +- Avoid executing no-op queries. ([\#16583](https://github.com/matrix-org/synapse/issues/16583)) +- Simplify persistence code to be per-room. ([\#16584](https://github.com/matrix-org/synapse/issues/16584)) +- Use standard SQL helpers in persistence code. ([\#16585](https://github.com/matrix-org/synapse/issues/16585)) +- Avoid updating the stream cache unnecessarily. ([\#16586](https://github.com/matrix-org/synapse/issues/16586)) +- Improve performance when using opentracing. ([\#16589](https://github.com/matrix-org/synapse/issues/16589)) +- Run push rule evaluator setup in parallel. ([\#16590](https://github.com/matrix-org/synapse/issues/16590)) +- Improve tests of the SQL generator. ([\#16596](https://github.com/matrix-org/synapse/issues/16596)) +- Use more generic database methods. ([\#16615](https://github.com/matrix-org/synapse/issues/16615)) +- Use `dbname` instead of the deprecated `database` connection parameter for psycopg2. ([\#16618](https://github.com/matrix-org/synapse/issues/16618)) +- Add an internal [Admin API endpoint](https://matrix-org.github.io/synapse/v1.97/usage/configuration/config_documentation.html#allow-replacing-master-cross-signing-key-without-user-interactive-auth) to temporarily grant the ability to update an existing cross-signing key without UIA. ([\#16634](https://github.com/matrix-org/synapse/issues/16634)) +- Improve references to GitHub issues. ([\#16637](https://github.com/matrix-org/synapse/issues/16637), [\#16638](https://github.com/matrix-org/synapse/issues/16638)) +- More efficiently handle no-op `POSITION` over replication. ([\#16640](https://github.com/matrix-org/synapse/issues/16640), [\#16655](https://github.com/matrix-org/synapse/issues/16655)) +- Speed up deleting of device messages when deleting a device. ([\#16643](https://github.com/matrix-org/synapse/issues/16643)) +- Speed up persisting large number of outliers. ([\#16649](https://github.com/matrix-org/synapse/issues/16649)) +- Reduce max concurrency of background tasks, reducing potential max DB load. ([\#16656](https://github.com/matrix-org/synapse/issues/16656), [\#16660](https://github.com/matrix-org/synapse/issues/16660)) +- Speed up purge room by adding an index to `event_push_summary`. ([\#16657](https://github.com/matrix-org/synapse/issues/16657)) + + + +### Updates to locked dependencies + +* Bump prometheus-client from 0.17.1 to 0.18.0. ([\#16626](https://github.com/matrix-org/synapse/issues/16626)) +* Bump pyicu from 2.11 to 2.12. ([\#16603](https://github.com/matrix-org/synapse/issues/16603)) +* Bump requests-toolbelt from 0.10.1 to 1.0.0. ([\#16659](https://github.com/matrix-org/synapse/issues/16659)) +* Bump ruff from 0.0.292 to 0.1.4. ([\#16600](https://github.com/matrix-org/synapse/issues/16600)) +* Bump serde from 1.0.190 to 1.0.192. ([\#16627](https://github.com/matrix-org/synapse/issues/16627)) +* Bump serde_json from 1.0.107 to 1.0.108. ([\#16604](https://github.com/matrix-org/synapse/issues/16604)) +* Bump setuptools-rust from 1.8.0 to 1.8.1. ([\#16601](https://github.com/matrix-org/synapse/issues/16601)) +* Bump towncrier from 23.6.0 to 23.11.0. ([\#16622](https://github.com/matrix-org/synapse/issues/16622)) +* Bump treq from 22.2.0 to 23.11.0. ([\#16623](https://github.com/matrix-org/synapse/issues/16623)) +* Bump twisted from 23.8.0 to 23.10.0. ([\#16588](https://github.com/matrix-org/synapse/issues/16588)) +* Bump types-bleach from 6.1.0.0 to 6.1.0.1. ([\#16624](https://github.com/matrix-org/synapse/issues/16624)) +* Bump types-jsonschema from 4.19.0.3 to 4.19.0.4. ([\#16599](https://github.com/matrix-org/synapse/issues/16599)) +* Bump types-pyopenssl from 23.2.0.2 to 23.3.0.0. ([\#16625](https://github.com/matrix-org/synapse/issues/16625)) +* Bump types-pyyaml from 6.0.12.11 to 6.0.12.12. ([\#16602](https://github.com/matrix-org/synapse/issues/16602)) + +# Synapse 1.96.1 (2023-11-17) + +Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for +proprietary dual licensing). You can read more about this here: + +* https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ +* https://element.io/blog/element-to-adopt-agplv3/ + +The Matrix.org Foundation copy of the project will be archived. Any changes needed +by server administrators will be communicated via our usual +[announcements channels](https://matrix.to/#/#homeowners:matrix.org), but we are +striving to make this as seamless as possible. + +This minor release was needed only because of CI-related trouble on [v1.96.0](https://github.com/matrix-org/synapse/releases/tag/v1.96.0), which was never released. + +### Internal Changes + +- Fix building of wheels in CI. ([\#16653](https://github.com/matrix-org/synapse/issues/16653)) + +# Synapse 1.96.0 (2023-11-16) + +### Bugfixes + +- Fix "'int' object is not iterable" error in `set_device_id_for_pushers` background update introduced in Synapse 1.95.0. ([\#16594](https://github.com/matrix-org/synapse/issues/16594)) + +# Synapse 1.96.0rc1 (2023-10-31) + +### Features + +- Add experimental support to allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) +- Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544)) +- Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549)) +- Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570)) + +### Bugfixes + +- Fixed a bug in the example Grafana dashboard that prevents it from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) +- Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580)) +- Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485)) +- Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504)) +- Force TLS certificate verification in user registration script. ([\#16530](https://github.com/matrix-org/synapse/issues/16530)) +- Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. ([\#16540](https://github.com/matrix-org/synapse/issues/16540)) +- Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. ([\#16558](https://github.com/matrix-org/synapse/issues/16558)) +- Fix a long-standing bug where invited/knocking users would not leave during a room purge. ([\#16559](https://github.com/matrix-org/synapse/issues/16559)) + +### Improved Documentation + +- Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529)) +- Add a sentence to the [opentracing docs](https://matrix-org.github.io/synapse/latest/opentracing.html) on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) +- Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541)) +- Pin the recommended poetry version in [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) +- Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569)) + +### Internal Changes + +- Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). ([\#16492](https://github.com/matrix-org/synapse/issues/16492)) +- Reduce memory allocations. ([\#16505](https://github.com/matrix-org/synapse/issues/16505)) +- Improve replication performance when purging rooms. ([\#16510](https://github.com/matrix-org/synapse/issues/16510)) +- Run tests against Python 3.12. ([\#16511](https://github.com/matrix-org/synapse/issues/16511)) +- Run trial & integration tests in continuous integration when `.ci` directory is modified. ([\#16512](https://github.com/matrix-org/synapse/issues/16512)) +- Remove duplicate call to mark remote server 'awake' when using a federation sending worker. ([\#16515](https://github.com/matrix-org/synapse/issues/16515)) +- Enable dirty runs on Complement CI, which is significantly faster. ([\#16520](https://github.com/matrix-org/synapse/issues/16520)) +- Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521)) +- Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551)) +- Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528)) +- Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555)) +- Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563)) +- Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567)) +- Allow building with `setuptools_rust` 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) + +### Updates to locked dependencies + +* Bump black from 23.10.0 to 23.10.1. ([\#16575](https://github.com/matrix-org/synapse/issues/16575)) +* Bump black from 23.9.1 to 23.10.0. ([\#16538](https://github.com/matrix-org/synapse/issues/16538)) +* Bump cryptography from 41.0.4 to 41.0.5. ([\#16572](https://github.com/matrix-org/synapse/issues/16572)) +* Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534)) +* Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576)) +* Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535)) +- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) +* Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577)) +* Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) +* Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536)) +* Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15. ([\#16573](https://github.com/matrix-org/synapse/issues/16573)) +* Bump types-requests from 2.31.0.2 to 2.31.0.10. ([\#16537](https://github.com/matrix-org/synapse/issues/16537)) +* Bump urllib3 from 1.26.17 to 1.26.18. ([\#16516](https://github.com/matrix-org/synapse/issues/16516)) + +# Synapse 1.95.1 (2023-10-31) + +## Security advisory + +The following issue is fixed in 1.95.1. + +- [GHSA-mp92-3jfm-3575](https://github.com/matrix-org/synapse/security/advisories/GHSA-mp92-3jfm-3575) / [CVE-2023-43796](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-43796) — Moderate Severity + + Cached device information of remote users can be queried from Synapse. This can be used to enumerate the remote users known to a homeserver. + +See the advisory for more details. If you have any questions, email security@matrix.org. + + + +# Synapse 1.95.0 (2023-10-24) + +### Internal Changes + +- Build Debian packages for [Ubuntu 23.10 Mantic Minotaur](https://canonical.com/blog/canonical-releases-ubuntu-23-10-mantic-minotaur). ([\#16524](https://github.com/matrix-org/synapse/issues/16524)) + + # Synapse 1.95.0rc1 (2023-10-17) ### Bugfixes diff --git a/Cargo.lock b/Cargo.lock index 5acf47cea87c..d5e77297f4a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,6 +90,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "hex" version = "0.4.3" @@ -98,9 +104,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "indoc" -version = "1.0.7" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3" +checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" [[package]] name = "itoa" @@ -191,9 +197,9 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e681a6cfdc4adcc93b4d3cf993749a4552018ee0a9b65fc0ccfad74352c72a38" +checksum = "04e8453b658fe480c3e70c8ed4e3d3ec33eb74988bd186561b0cc66b85c3bc4b" dependencies = [ "anyhow", "cfg-if", @@ -209,9 +215,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076c73d0bc438f7a4ef6fdd0c3bb4732149136abd952b110ac93e4edb13a6ba5" +checksum = "a96fe70b176a89cff78f2fa7b3c930081e163d5379b4dcdf993e3ae29ca662e5" dependencies = [ "once_cell", "target-lexicon", @@ -219,9 +225,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e53cee42e77ebe256066ba8aa77eff722b3bb91f3419177cf4cd0f304d3284d9" +checksum = "214929900fd25e6604661ed9cf349727c8920d47deff196c4e28165a6ef2a96b" dependencies = [ "libc", "pyo3-build-config", @@ -229,9 +235,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.8.4" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c09c2b349b6538d8a73d436ca606dab6ce0aaab4dad9e6b7bdd57a4f556c3bc3" +checksum = "4c10808ee7250403bedb24bc30c32493e93875fef7ba3e4292226fe924f398bd" dependencies = [ "arc-swap", "log", @@ -240,32 +246,33 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfeb4c99597e136528c6dd7d5e3de5434d1ceaf487436a3f03b2d56b6fc9efd1" +checksum = "dac53072f717aa1bfa4db832b39de8c875b7c7af4f4a6fe93cdbf9264cf8383b" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn 1.0.104", + "syn", ] [[package]] name = "pyo3-macros-backend" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "947dc12175c254889edc0c02e399476c2f652b4b9ebd123aa655c224de259536" +checksum = "7774b5a8282bd4f25f803b1f0d945120be959a36c72e08e7cd031c792fdfd424" dependencies = [ + "heck", "proc-macro2", "quote", - "syn 1.0.104", + "syn", ] [[package]] name = "pythonize" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e35b716d430ace57e2d1b4afb51c9e5b7c46d2bce72926e07f9be6a98ced03e" +checksum = "ffd1c3ef39c725d63db5f9bc455461bafd80540cb7824c61afb823501921a850" dependencies = [ "pyo3", "serde", @@ -332,29 +339,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", @@ -373,17 +380,6 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" -[[package]] -name = "syn" -version = "1.0.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.28" @@ -432,9 +428,9 @@ checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unindent" -version = "0.1.10" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112" +checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" [[package]] name = "version_check" diff --git a/README.rst b/README.rst index d116cd51fbcf..4a904296476e 100644 --- a/README.rst +++ b/README.rst @@ -122,7 +122,7 @@ You will need to change the server you are logging into from ``matrix.org`` and instead specify a Homeserver URL of ``https://:8448`` (or just ``https://`` if you are using a reverse proxy). If you prefer to use another client, refer to our -`client breakdown `_. +`client breakdown `_. If all goes well you should at least be able to log in, create a room, and start sending messages. diff --git a/book.toml b/book.toml index 10afe45dc48e..977a8450bc3a 100644 --- a/book.toml +++ b/book.toml @@ -41,4 +41,7 @@ additional-js = [ "docs/website_files/version-picker.js", "docs/website_files/version.js", ] -theme = "docs/website_files/theme" \ No newline at end of file +theme = "docs/website_files/theme" + +[preprocessor.schema_versions] +command = "./scripts-dev/schema_versions.py" diff --git a/changelog.d/16485.bugfix b/changelog.d/16485.bugfix deleted file mode 100644 index 3cd7e1877fb9..000000000000 --- a/changelog.d/16485.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. diff --git a/changelog.d/16492.misc b/changelog.d/16492.misc deleted file mode 100644 index ecb3356bdd64..000000000000 --- a/changelog.d/16492.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). diff --git a/changelog.d/16510.misc b/changelog.d/16510.misc deleted file mode 100644 index 5556b5d74c74..000000000000 --- a/changelog.d/16510.misc +++ /dev/null @@ -1 +0,0 @@ -Improve replication performance when purging rooms. diff --git a/changelog.d/16511.misc b/changelog.d/16511.misc deleted file mode 100644 index 7b7d9ee5b8c6..000000000000 --- a/changelog.d/16511.misc +++ /dev/null @@ -1 +0,0 @@ -Run tests against Python 3.12. diff --git a/changelog.d/16512.misc b/changelog.d/16512.misc deleted file mode 100644 index dcc53510c41d..000000000000 --- a/changelog.d/16512.misc +++ /dev/null @@ -1 +0,0 @@ -Run trial & integration tests in continuous integration when `.ci` directory is modified. diff --git a/changelog.d/16529.doc b/changelog.d/16529.doc deleted file mode 100644 index 0f8a87f2935b..000000000000 --- a/changelog.d/16529.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation of presence router. diff --git a/changelog.d/16552.misc b/changelog.d/16552.misc new file mode 100644 index 000000000000..73a0376df097 --- /dev/null +++ b/changelog.d/16552.misc @@ -0,0 +1 @@ +Reduce a little database load while processing state auth chains. diff --git a/changelog.d/16642.bugfix b/changelog.d/16642.bugfix new file mode 100644 index 000000000000..e83c8b4f9d58 --- /dev/null +++ b/changelog.d/16642.bugfix @@ -0,0 +1 @@ +Enable refreshable tokens on the admin registration endpoint. diff --git a/changelog.d/16661.doc b/changelog.d/16661.doc new file mode 100644 index 000000000000..74f8fc84b8f7 --- /dev/null +++ b/changelog.d/16661.doc @@ -0,0 +1 @@ +Add schema rollback information to documentation. diff --git a/changelog.d/16667.misc b/changelog.d/16667.misc new file mode 100644 index 000000000000..51aeca924397 --- /dev/null +++ b/changelog.d/16667.misc @@ -0,0 +1 @@ +Reduce database load of pruning old `user_ips`. diff --git a/changelog.d/16670.bugfix b/changelog.d/16670.bugfix new file mode 100644 index 000000000000..f1369abc0607 --- /dev/null +++ b/changelog.d/16670.bugfix @@ -0,0 +1 @@ +Consistently bypass rate limits when using the server notice admin API. diff --git a/changelog.d/16672.feature b/changelog.d/16672.feature new file mode 100644 index 000000000000..05ecf2220733 --- /dev/null +++ b/changelog.d/16672.feature @@ -0,0 +1 @@ +Restore tracking of requests and monthly active users when delegating authentication to an [MSC3861](https://github.com/matrix-org/synapse/pull/16672) OIDC provider. diff --git a/changelog.d/16673.misc b/changelog.d/16673.misc new file mode 100644 index 000000000000..8b274ede71f8 --- /dev/null +++ b/changelog.d/16673.misc @@ -0,0 +1 @@ +Bump pyo3 (0.19.2→0.20.0), pythonize (0.19.0→0.20.0) and pyo3-log (0.8.1→0.9.0). diff --git a/changelog.d/16677.misc b/changelog.d/16677.misc new file mode 100644 index 000000000000..20c37851c396 --- /dev/null +++ b/changelog.d/16677.misc @@ -0,0 +1 @@ +Ignore `encryption_enabled_by_default_for_room_type` setting when creating server notices room, since the notices will be send unencrypted anyway. diff --git a/changelog.d/16682.misc b/changelog.d/16682.misc new file mode 100644 index 000000000000..071715e83a80 --- /dev/null +++ b/changelog.d/16682.misc @@ -0,0 +1 @@ +Correctly read the to-device stream ID on startup using SQLite. diff --git a/changelog.d/16684.misc b/changelog.d/16684.misc new file mode 100644 index 000000000000..6fb55c08a57b --- /dev/null +++ b/changelog.d/16684.misc @@ -0,0 +1 @@ +Reoranganise test files. diff --git a/changelog.d/16695.doc b/changelog.d/16695.doc new file mode 100644 index 000000000000..6cb284c50114 --- /dev/null +++ b/changelog.d/16695.doc @@ -0,0 +1 @@ +Fix poetry version typo in [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). diff --git a/changelog.d/16697.misc b/changelog.d/16697.misc new file mode 100644 index 000000000000..02fd236ab43e --- /dev/null +++ b/changelog.d/16697.misc @@ -0,0 +1 @@ +Remove old full schema dumps which are no longer used. diff --git a/changelog.d/16704.misc b/changelog.d/16704.misc new file mode 100644 index 000000000000..4dafb27fd8c1 --- /dev/null +++ b/changelog.d/16704.misc @@ -0,0 +1 @@ +Add a workflow to try and automatically fixup linting in a PR. diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 90f449aa7678..188597c8dd67 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -1,14 +1,4 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], "__elements": {}, "__requires": [ { @@ -47,7 +37,7 @@ { "builtIn": 1, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "enable": false, "hide": true, @@ -93,7 +83,7 @@ "collapsed": false, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -107,7 +97,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -129,7 +119,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -203,7 +193,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -235,7 +225,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -333,7 +323,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -343,7 +333,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -354,7 +344,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -364,7 +354,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -374,7 +364,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.25, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "legendFormat": "25%", @@ -382,7 +372,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.05, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "legendFormat": "5%", @@ -390,7 +380,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))", "legendFormat": "Average", @@ -398,7 +388,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))", "hide": false, @@ -468,7 +458,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -515,7 +505,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -575,7 +565,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -625,7 +615,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -638,7 +628,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "hide": true, @@ -776,7 +766,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -831,7 +821,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -844,7 +834,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -893,7 +883,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -910,7 +900,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -973,7 +963,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", @@ -987,7 +977,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -1217,7 +1207,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -1267,7 +1257,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -1280,7 +1270,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "interval": "", @@ -1326,7 +1316,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1379,7 +1369,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -1432,7 +1422,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1487,7 +1477,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", @@ -1500,7 +1490,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1", @@ -1546,7 +1536,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1592,7 +1582,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -1604,7 +1594,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -1664,7 +1654,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1710,7 +1700,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_client_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -1720,7 +1710,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_matrixfederationclient_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -1857,7 +1847,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -1869,7 +1859,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -1893,7 +1883,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1967,7 +1957,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -1998,7 +1988,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "editable": true, @@ -2049,7 +2039,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size])) without (job,index)", "format": "time_series", @@ -2099,7 +2089,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2140,7 +2130,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -2187,7 +2177,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2228,7 +2218,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -2278,7 +2268,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2322,7 +2312,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -2370,7 +2360,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2414,7 +2404,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "sum(rate(synapse_storage_events_persisted_events_sep_total{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)", @@ -2614,7 +2604,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "CPU and DB time spent on most expensive state resolution in a room, summed over all workers. This is a very rough proxy for \"how fast is state res\", but it doesn't accurately represent the system load (e.g. it completely ignores cheap state resolutions).\n", "fieldConfig": { @@ -2692,7 +2682,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "exemplar": false, "expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))", @@ -2706,7 +2696,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "exemplar": false, "expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))", @@ -2726,7 +2716,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -2738,7 +2728,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -2755,7 +2745,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -2808,7 +2798,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -2877,7 +2867,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -2926,7 +2916,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})", "format": "time_series", @@ -2976,7 +2966,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3029,7 +3019,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3098,7 +3088,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3151,7 +3141,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3220,7 +3210,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3272,7 +3262,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3321,7 +3311,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3374,7 +3364,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "(sum(rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))/(sum(rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))", "format": "time_series", @@ -3422,7 +3412,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3475,7 +3465,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "format": "time_series", @@ -3486,7 +3476,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(avg_over_time(synapse_http_server_in_flight_requests_count{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "interval": "", @@ -3529,7 +3519,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -3541,7 +3531,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -3557,7 +3547,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3604,7 +3594,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3650,7 +3640,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3697,7 +3687,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_background_process_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3743,7 +3733,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3788,7 +3778,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_background_process_in_flight_count{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "legendFormat": "{{job}}-{{index}} {{name}}", @@ -3830,7 +3820,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -3842,7 +3832,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -3858,7 +3848,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3905,7 +3895,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -3915,7 +3905,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_util_metrics_block_count_total{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))", "legendFormat": "failed txn rate", @@ -3958,7 +3948,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4005,7 +3995,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_server_received_pdus_total{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -4015,7 +4005,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_server_received_edus_total{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -4061,7 +4051,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4108,7 +4098,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations_count_total{instance=\"$instance\"}[$bucket_size]))", @@ -4121,7 +4111,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_client_sent_edus_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -4167,7 +4157,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4214,7 +4204,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_federation_client_sent_edus_by_type_total{instance=\"$instance\"}[$bucket_size])", @@ -4509,7 +4499,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "The number of events in the in-memory queues ", "fieldConfig": { @@ -4556,7 +4546,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", @@ -4568,7 +4558,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", @@ -4617,7 +4607,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Number of events queued up on the master process for processing by the federation sender", "fieldConfig": { @@ -4665,7 +4655,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_presence_changed_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4676,7 +4666,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_presence_map_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4688,7 +4678,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_presence_destinations_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4700,7 +4690,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_keyed_edu_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4712,7 +4702,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_edus_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4724,7 +4714,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_pos_time_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4780,7 +4770,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4857,7 +4847,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_event_processing_lag_by_event_bucket{instance=\"$instance\",name=\"federation_sender\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -4892,7 +4882,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4981,7 +4971,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -4992,7 +4982,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -5003,7 +4993,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -5014,7 +5004,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -5025,7 +5015,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.25, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "interval": "", @@ -5034,7 +5024,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.05, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "interval": "", @@ -5043,7 +5033,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_event_processing_lag_by_event_sum{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_event_processing_lag_by_event_count{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "interval": "", @@ -5116,7 +5106,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -5193,7 +5183,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_server_pdu_process_time_bucket{instance=\"$instance\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -5229,7 +5219,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -5279,7 +5269,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", @@ -5333,7 +5323,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -5383,7 +5373,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", @@ -5437,7 +5427,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -5477,7 +5467,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_soft_failed_events_total{instance=\"$instance\"}[$bucket_size]))", "interval": "", @@ -5522,7 +5512,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -5903,7 +5893,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -6008,7 +5998,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "histogram_quantile(0.9995, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", @@ -6021,7 +6011,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "histogram_quantile(0.99, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", @@ -6033,7 +6023,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -6044,7 +6034,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -6054,7 +6044,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -6064,7 +6054,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.25, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "legendFormat": "25%", @@ -6072,7 +6062,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.05, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "legendFormat": "5%", @@ -6080,7 +6070,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_rate_limit_queue_wait_time_seconds_sum{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_rate_limit_queue_wait_time_seconds_count{index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "legendFormat": "Average", @@ -6267,7 +6257,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -6280,7 +6270,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -6359,7 +6349,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", @@ -6373,7 +6363,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", @@ -6394,7 +6384,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -6441,7 +6431,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10,synapse_pushers{job=~\"$job\",index=~\"$index\", instance=\"$instance\"})", "legendFormat": "{{kind}} {{app_id}}", @@ -6483,7 +6473,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -6495,7 +6485,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -6662,7 +6652,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "exemplar": true, "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", @@ -7077,7 +7067,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -7089,7 +7079,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -7101,7 +7091,7 @@ "panels": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -7179,7 +7169,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", "format": "time_series", @@ -7198,7 +7188,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", "fieldConfig": { @@ -7247,7 +7237,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7259,7 +7249,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7269,7 +7259,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7279,7 +7269,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -7327,7 +7317,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7379,7 +7369,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10, rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7427,7 +7417,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7479,7 +7469,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -7527,7 +7517,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7579,7 +7569,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -7627,7 +7617,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -7673,7 +7663,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7683,7 +7673,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7693,7 +7683,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7703,7 +7693,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7751,7 +7741,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -7763,7 +7753,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -7779,7 +7769,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7830,7 +7820,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])", "format": "time_series", @@ -7877,7 +7867,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7928,7 +7918,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])) / rate(synapse_util_metrics_block_count_total[$bucket_size])", "format": "time_series", @@ -8079,7 +8069,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "The time each database transaction takes to execute, on average, broken down by metrics block.", "editable": true, @@ -8131,7 +8121,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8178,7 +8168,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8228,7 +8218,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8275,7 +8265,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8325,7 +8315,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_time_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8374,7 +8364,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -8414,7 +8404,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "interval": "", @@ -8457,7 +8447,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -8469,7 +8459,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -8485,7 +8475,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 2, "editable": true, @@ -8538,7 +8528,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -8588,7 +8578,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8639,7 +8629,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_util_caches_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -8688,7 +8678,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8739,7 +8729,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -8787,7 +8777,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -8839,7 +8829,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10, rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -8888,7 +8878,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -8935,7 +8925,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_cache_evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8981,7 +8971,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -8993,7 +8983,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -9009,7 +8999,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9055,7 +9045,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_util_caches_response_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", @@ -9099,7 +9089,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9145,7 +9135,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_response_cache_hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])", "interval": "", @@ -9154,7 +9144,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "", "interval": "", @@ -9199,7 +9189,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -9211,7 +9201,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -9227,7 +9217,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9274,7 +9264,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])", "format": "time_series", @@ -9321,7 +9311,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 3, "editable": true, @@ -9373,7 +9363,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])", "format": "time_series", @@ -9420,7 +9410,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.", "fieldConfig": { @@ -9475,7 +9465,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", @@ -9522,7 +9512,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9569,7 +9559,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -9614,7 +9604,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9661,7 +9651,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -9772,7 +9762,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -9784,7 +9774,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -9801,7 +9791,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9848,7 +9838,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", "format": "time_series", @@ -9893,7 +9883,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9991,7 +9981,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10090,7 +10080,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10288,7 +10278,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10335,7 +10325,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_replication_tcp_protocol_close_reason_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -10382,7 +10372,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10429,7 +10419,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", @@ -10439,7 +10429,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", @@ -10484,7 +10474,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -10496,7 +10486,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -10512,7 +10502,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10559,7 +10549,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - on() group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -10607,7 +10597,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10654,7 +10644,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -10702,7 +10692,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10750,7 +10740,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "deriv(synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/1000 - 1", "format": "time_series", @@ -10797,7 +10787,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -10809,7 +10799,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -10833,7 +10823,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { @@ -10909,7 +10899,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", @@ -10941,7 +10931,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { @@ -10989,7 +10979,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} > 0", "format": "heatmap", @@ -11044,7 +11034,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.", "fieldConfig": { @@ -11120,7 +11110,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", @@ -11152,7 +11142,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.", "fieldConfig": { @@ -11199,7 +11189,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11209,7 +11199,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11219,7 +11209,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11229,7 +11219,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11284,7 +11274,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { @@ -11360,7 +11350,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", @@ -11392,7 +11382,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { @@ -11439,7 +11429,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11449,7 +11439,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11459,7 +11449,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11469,7 +11459,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11524,7 +11514,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.", "fieldConfig": { @@ -11600,7 +11590,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "heatmap", @@ -11634,7 +11624,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.", "fieldConfig": { @@ -11682,7 +11672,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", @@ -11695,7 +11685,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -11706,7 +11696,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -11717,7 +11707,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -11765,7 +11755,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.", "fill": 1, @@ -11805,7 +11795,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", @@ -11814,7 +11804,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", @@ -11823,7 +11813,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_times_pruned_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", @@ -11866,7 +11856,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -11878,7 +11868,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -11895,7 +11885,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -11949,7 +11939,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "max(synapse_admin_mau_max{instance=\"$instance\"})", @@ -11963,7 +11953,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "max(synapse_admin_mau_current{instance=\"$instance\"})", @@ -12012,7 +12002,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12051,7 +12041,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_admin_mau_current_mau_by_service{instance=\"$instance\"}", "interval": "", @@ -12094,7 +12084,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12106,7 +12096,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12123,7 +12113,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -12169,7 +12159,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_notifier_users_woken_by_stream_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -12222,7 +12212,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -12268,7 +12258,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_handler_presence_get_updates_total{job=~\"$job\",instance=\"$instance\"}[$bucket_size])", @@ -12319,7 +12309,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12331,7 +12321,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12348,7 +12338,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12387,7 +12377,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_appservice_api_sent_events_total{instance=\"$instance\"}[$bucket_size])", @@ -12436,7 +12426,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12475,7 +12465,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_appservice_api_sent_transactions_total{instance=\"$instance\"}[$bucket_size])", @@ -12522,7 +12512,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12534,7 +12524,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12550,7 +12540,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12589,7 +12579,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_notified_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12598,7 +12588,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_federation_presence_out_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12607,7 +12597,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_presence_updates_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12616,7 +12606,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_federation_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12625,7 +12615,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_bump_active_time_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12670,7 +12660,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12709,7 +12699,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_handler_presence_state_transition_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -12758,7 +12748,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12797,7 +12787,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_handler_presence_notify_reason_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -12844,7 +12834,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12856,7 +12846,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12869,7 +12859,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -12946,7 +12936,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_external_cache_set{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])", @@ -12966,7 +12956,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fill": 1, @@ -13006,7 +12996,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "sum without (hit) (rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size]))", @@ -13063,7 +13053,7 @@ "dataFormat": "tsbuckets", "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -13140,7 +13130,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -13172,7 +13162,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -13246,7 +13236,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\", hit=\"False\"}[$bucket_size])", @@ -13264,7 +13254,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -13290,7 +13280,8 @@ "hide": 0, "includeAll": false, "multi": false, - "name": "datasource", + "name": "DS_PROMETHEUS", + "label": "Datasource", "options": [], "query": "prometheus", "queryValue": "", @@ -13361,7 +13352,7 @@ { "current": {}, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "definition": "", "hide": 0, @@ -13387,7 +13378,7 @@ "allValue": "", "current": {}, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "definition": "", "hide": 0, @@ -13417,7 +13408,7 @@ "allValue": ".*", "current": {}, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "definition": "", "hide": 0, diff --git a/debian/changelog b/debian/changelog index 979d5facfa7f..5ec877160480 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,45 @@ +matrix-synapse-py3 (1.97.0) stable; urgency=medium + + * New Synapse release 1.97.0. + + -- Synapse Packaging team Tue, 28 Nov 2023 14:08:58 +0000 + +matrix-synapse-py3 (1.97.0~rc1) stable; urgency=medium + + * New Synapse release 1.97.0rc1. + + -- Synapse Packaging team Tue, 21 Nov 2023 12:32:03 +0000 + +matrix-synapse-py3 (1.96.1) stable; urgency=medium + + * New synapse release 1.96.1. + + -- Synapse Packaging team Fri, 17 Nov 2023 12:48:45 +0000 + +matrix-synapse-py3 (1.96.0) stable; urgency=medium + + * New synapse release 1.96.0. + + -- Synapse Packaging team Thu, 16 Nov 2023 17:54:26 +0000 + +matrix-synapse-py3 (1.96.0~rc1) stable; urgency=medium + + * New Synapse release 1.96.0rc1. + + -- Synapse Packaging team Tue, 31 Oct 2023 14:09:09 +0000 + +matrix-synapse-py3 (1.95.1) stable; urgency=medium + + * New Synapse release 1.95.1. + + -- Synapse Packaging team Tue, 31 Oct 2023 14:00:00 +0000 + +matrix-synapse-py3 (1.95.0) stable; urgency=medium + + * New Synapse release 1.95.0. + + -- Synapse Packaging team Tue, 24 Oct 2023 13:00:46 +0100 + matrix-synapse-py3 (1.95.0~rc1) stable; urgency=medium * New synapse release 1.95.0rc1. @@ -1619,7 +1661,7 @@ matrix-synapse-py3 (0.99.3.1) stable; urgency=medium matrix-synapse-py3 (0.99.3) stable; urgency=medium [ Richard van der Hoff ] - * Fix warning during preconfiguration. (Fixes: #4819) + * Fix warning during preconfiguration. (Fixes: https://github.com/matrix-org/synapse/issues/4819) [ Synapse Packaging team ] * New synapse release 0.99.3. diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index 5560ab8b95a8..7b012ce8abe3 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -68,6 +68,11 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then fi log "Workers requested: $SYNAPSE_WORKER_TYPES" + # adjust connection pool limits on worker mode as otherwise running lots of worker synapses + # can make docker unhappy (in GHA) + export POSTGRES_CP_MIN=1 + export POSTGRES_CP_MAX=3 + echo "using reduced connection pool limits for worker mode" # Improve startup times by using a launcher based on fork() export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1 else diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index c46b955d6353..c412ba2e87c2 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -67,8 +67,8 @@ database: host: "{{ POSTGRES_HOST or "db" }}" port: "{{ POSTGRES_PORT or "5432" }}" {% endif %} - cp_min: 5 - cp_max: 10 + cp_min: {{ POSTGRES_CP_MIN or 5 }} + cp_max: {{ POSTGRES_CP_MAX or 10 }} {% else %} database: name: "sqlite3" diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 31b303202913..c50121d5f784 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -19,7 +19,7 @@ # Usage - [Federation](federate.md) - [Configuration](usage/configuration/README.md) - - [Configuration Manual](usage/configuration/config_documentation.md) + - [Configuration Manual](usage/configuration/config_documentation.md) - [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md) - [Logging Sample Config File](usage/configuration/logging_sample_config.md) - [Structured Logging](structured_logging.md) @@ -48,6 +48,7 @@ - [Password auth provider callbacks](modules/password_auth_provider_callbacks.md) - [Background update controller callbacks](modules/background_update_controller_callbacks.md) - [Account data callbacks](modules/account_data_callbacks.md) + - [Add extra fields to client events unsigned section callbacks](modules/add_extra_fields_to_client_events_unsigned.md) - [Porting a legacy module to the new interface](modules/porting_legacy_module.md) - [Workers](workers.md) - [Using `synctl` with Workers](synctl_workers.md) diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 90b06045a820..ad011e5c36e6 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -536,7 +536,8 @@ The following query parameters are available: **Response** -* `event_id` - converted from timestamp +* `event_id` - The event ID closest to the given timestamp. +* `origin_server_ts` - The timestamp of the event in milliseconds since the Unix epoch. # Block Room API The Block Room admin API allows server admins to block and unblock rooms, diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index b91848dd272e..e8e492d0951c 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -618,6 +618,16 @@ A response body like the following is returned: "quarantined_by": null, "safe_from_quarantine": false, "upload_name": "test2.png" + }, + { + "created_ts": 300400, + "last_access_ts": 300700, + "media_id": "BzYNLRUgGHphBkdKGbzXwbjX", + "media_length": 1337, + "media_type": "application/octet-stream", + "quarantined_by": null, + "safe_from_quarantine": false, + "upload_name": null } ], "next_token": 3, @@ -679,16 +689,17 @@ The following fields are returned in the JSON response body: - `media` - An array of objects, each containing information about a media. Media objects contain the following fields: - `created_ts` - integer - Timestamp when the content was uploaded in ms. - - `last_access_ts` - integer - Timestamp when the content was last accessed in ms. + - `last_access_ts` - integer or null - Timestamp when the content was last accessed in ms. + Null if there was no access, yet. - `media_id` - string - The id used to refer to the media. Details about the format are documented under [media repository](../media_repository.md). - `media_length` - integer - Length of the media in bytes. - `media_type` - string - The MIME-type of the media. - - `quarantined_by` - string - The user ID that initiated the quarantine request - for this media. + - `quarantined_by` - string or null - The user ID that initiated the quarantine request + for this media. Null if not quarantined. - `safe_from_quarantine` - bool - Status if this media is safe from quarantining. - - `upload_name` - string - The name the media was uploaded with. + - `upload_name` - string or null - The name the media was uploaded with. Null if not provided during upload. - `next_token`: integer - Indication for pagination. See above. - `total` - integer - Total number of media. @@ -773,6 +784,43 @@ Note: The token will expire if the *admin* user calls `/logout/all` from any of their devices, but the token will *not* expire if the target user does the same. +## Allow replacing master cross-signing key without User-Interactive Auth + +This endpoint is not intended for server administrator usage; +we describe it here for completeness. + +This API temporarily permits a user to replace their master cross-signing key +without going through +[user-interactive authentication](https://spec.matrix.org/v1.8/client-server-api/#user-interactive-authentication-api) (UIA). +This is useful when Synapse has delegated its authentication to the +[Matrix Authentication Service](https://github.com/matrix-org/matrix-authentication-service/); +as Synapse cannot perform UIA is not possible in these circumstances. + +The API is + +```http request +POST /_synapse/admin/v1/users//_allow_cross_signing_replacement_without_uia +{} +``` + +If the user does not exist, or does exist but has no master cross-signing key, +this will return with status code `404 Not Found`. + +Otherwise, a response body like the following is returned, with status `200 OK`: + +```json +{ + "updatable_without_uia_before_ms": 1234567890 +} +``` + +The response body is a JSON object with a single field: + +- `updatable_without_uia_before_ms`: integer. The timestamp in milliseconds + before which the user is permitted to replace their cross-signing key without + going through UIA. + +_Added in Synapse 1.97.0._ ## User devices diff --git a/docs/changelogs/CHANGES-pre-1.0.md b/docs/changelogs/CHANGES-pre-1.0.md index a08f867b6757..5ebc4009c703 100644 --- a/docs/changelogs/CHANGES-pre-1.0.md +++ b/docs/changelogs/CHANGES-pre-1.0.md @@ -97,7 +97,7 @@ Bugfixes - start.sh: Fix the --no-rate-limit option for messages and make it bypass rate limit on registration and login too. ([\#4981](https://github.com/matrix-org/synapse/issues/4981)) - Transfer related groups on room upgrade. ([\#4990](https://github.com/matrix-org/synapse/issues/4990)) - Prevent the ability to kick users from a room they aren't in. ([\#4999](https://github.com/matrix-org/synapse/issues/4999)) -- Fix issue #4596 so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud . ([\#5003](https://github.com/matrix-org/synapse/issues/5003)) +- Fix issue [\#4596](https://github.com/matrix-org/synapse/issues/4596) so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud . ([\#5003](https://github.com/matrix-org/synapse/issues/5003)) - Clients timing out/disappearing while downloading from the media repository will now no longer log a spurious "Producer was not unregistered" message. ([\#5009](https://github.com/matrix-org/synapse/issues/5009)) - Fix "cannot import name execute_batch" error with postgres. ([\#5032](https://github.com/matrix-org/synapse/issues/5032)) - Fix disappearing exceptions in manhole. ([\#5035](https://github.com/matrix-org/synapse/issues/5035)) @@ -111,7 +111,7 @@ Bugfixes Internal Changes ---------------- -- Add test to verify threepid auth check added in #4435. ([\#4474](https://github.com/matrix-org/synapse/issues/4474)) +- Add test to verify threepid auth check added in [\#4435](https://github.com/matrix-org/synapse/issues/4435). ([\#4474](https://github.com/matrix-org/synapse/issues/4474)) - Fix/improve some docstrings in the replication code. ([\#4949](https://github.com/matrix-org/synapse/issues/4949)) - Split synapse.replication.tcp.streams into smaller files. ([\#4953](https://github.com/matrix-org/synapse/issues/4953)) - Refactor replication row generation/parsing. ([\#4954](https://github.com/matrix-org/synapse/issues/4954)) @@ -186,7 +186,7 @@ Features - Add support for /keys/query and /keys/changes REST endpoints to client_reader worker. ([\#4796](https://github.com/matrix-org/synapse/issues/4796)) - Add checks to incoming events over federation for events evading auth (aka "soft fail"). ([\#4814](https://github.com/matrix-org/synapse/issues/4814)) - Add configurable rate limiting to the /login endpoint. ([\#4821](https://github.com/matrix-org/synapse/issues/4821), [\#4865](https://github.com/matrix-org/synapse/issues/4865)) -- Remove trailing slashes from certain outbound federation requests. Retry if receiving a 404. Context: #3622. ([\#4840](https://github.com/matrix-org/synapse/issues/4840)) +- Remove trailing slashes from certain outbound federation requests. Retry if receiving a 404. Context: [\#3622](https://github.com/matrix-org/synapse/issues/3622). ([\#4840](https://github.com/matrix-org/synapse/issues/4840)) - Allow passing --daemonize flags to workers in the same way as with master. ([\#4853](https://github.com/matrix-org/synapse/issues/4853)) - Batch up outgoing read-receipts to reduce federation traffic. ([\#4890](https://github.com/matrix-org/synapse/issues/4890), [\#4927](https://github.com/matrix-org/synapse/issues/4927)) - Add option to disable searching the user directory. ([\#4895](https://github.com/matrix-org/synapse/issues/4895)) @@ -231,12 +231,12 @@ Internal Changes - Add some debug about processing read receipts. ([\#4798](https://github.com/matrix-org/synapse/issues/4798)) - Clean up some replication code. ([\#4799](https://github.com/matrix-org/synapse/issues/4799)) - Add some docstrings. ([\#4815](https://github.com/matrix-org/synapse/issues/4815)) -- Add debug logger to try and track down #4422. ([\#4816](https://github.com/matrix-org/synapse/issues/4816)) +- Add debug logger to try and track down [\#4422](https://github.com/matrix-org/synapse/issues/4422). ([\#4816](https://github.com/matrix-org/synapse/issues/4816)) - Make shutdown API send explanation message to room after users have been forced joined. ([\#4817](https://github.com/matrix-org/synapse/issues/4817)) - Update example_log_config.yaml. ([\#4820](https://github.com/matrix-org/synapse/issues/4820)) - Document the `generate` option for the docker image. ([\#4824](https://github.com/matrix-org/synapse/issues/4824)) - Fix check-newsfragment for debian-only changes. ([\#4825](https://github.com/matrix-org/synapse/issues/4825)) -- Add some debug logging for device list updates to help with #4828. ([\#4828](https://github.com/matrix-org/synapse/issues/4828)) +- Add some debug logging for device list updates to help with [\#4828](https://github.com/matrix-org/synapse/issues/4828). ([\#4828](https://github.com/matrix-org/synapse/issues/4828)) - Improve federation documentation, specifically .well-known support. Many thanks to @vaab. ([\#4832](https://github.com/matrix-org/synapse/issues/4832)) - Disable captcha registration by default in unit tests. ([\#4839](https://github.com/matrix-org/synapse/issues/4839)) - Add stuff back to the .gitignore. ([\#4843](https://github.com/matrix-org/synapse/issues/4843)) @@ -895,7 +895,7 @@ Bugfixes - Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804)) - Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810)) - Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824)) -- fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835)) +- fix VOIP crashes under Python 3 (issue [\#3821](https://github.com/matrix-org/synapse/issues/3821)). ([\#3835](https://github.com/matrix-org/synapse/issues/3835)) - Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841)) - Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845)) - Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851)) @@ -1123,7 +1123,7 @@ Bugfixes - Catch failures saving metrics captured by Measure, and instead log the faulty metrics information for further analysis. ([\#3548](https://github.com/matrix-org/synapse/issues/3548)) - Unicode passwords are now normalised before hashing, preventing the instance where two different devices or browsers might send a different UTF-8 sequence for the password. ([\#3569](https://github.com/matrix-org/synapse/issues/3569)) - Fix potential stack overflow and deadlock under heavy load ([\#3570](https://github.com/matrix-org/synapse/issues/3570)) -- Respond with M_NOT_FOUND when profiles are not found locally or over federation. Fixes #3585 ([\#3585](https://github.com/matrix-org/synapse/issues/3585)) +- Respond with M_NOT_FOUND when profiles are not found locally or over federation. Fixes [\#3585](https://github.com/matrix-org/synapse/issues/3585). ([\#3585](https://github.com/matrix-org/synapse/issues/3585)) - Fix failure to persist events over federation under load ([\#3601](https://github.com/matrix-org/synapse/issues/3601)) - Fix updating of cached remote profiles ([\#3605](https://github.com/matrix-org/synapse/issues/3605)) - Fix 'tuple index out of range' error ([\#3607](https://github.com/matrix-org/synapse/issues/3607)) @@ -1272,7 +1272,7 @@ Misc Changes in synapse v0.31.2 (2018-06-14) ======================================= -SECURITY UPDATE: Prevent unauthorised users from setting state events in a room when there is no `m.room.power_levels` event in force in the room. (PR #3397) +SECURITY UPDATE: Prevent unauthorised users from setting state events in a room when there is no `m.room.power_levels` event in force in the room. ([\#3397](https://github.com/matrix-org/synapse/issues/3397)) Discussion around the Matrix Spec change proposal for this change can be followed at . @@ -1285,7 +1285,7 @@ We are not aware of it being actively exploited but please upgrade asap. Bug Fixes: -- Fix event filtering in `get_missing_events` handler (PR #3371) +- Fix event filtering in `get_missing_events` handler. ([\#3371](https://github.com/matrix-org/synapse/issues/3371)) Changes in synapse v0.31.0 (2018-06-06) ======================================= @@ -1294,7 +1294,7 @@ Most notable change from v0.30.0 is to switch to the python prometheus library t Bug Fixes: -- Fix metric documentation tables (PR #3341) +- Fix metric documentation tables. ([\#3341](https://github.com/matrix-org/synapse/issues/3341)) - Fix LaterGauge error handling (694968f) - Fix replication metrics (b7e7fd2) @@ -1303,41 +1303,41 @@ Changes in synapse v0.31.0-rc1 (2018-06-04) Features: -- Switch to the Python Prometheus library (PR #3256, #3274) -- Let users leave the server notice room after joining (PR #3287) +- Switch to the Python Prometheus library. ([\#3256](https://github.com/matrix-org/synapse/issues/3256), [\#3274](https://github.com/matrix-org/synapse/issues/3274)) +- Let users leave the server notice room after joining. ([\#3287](https://github.com/matrix-org/synapse/issues/3287)) Changes: -- daily user type phone home stats (PR #3264) -- Use `iter*` methods for `_filter_events_for_server` (PR #3267) -- Docs on consent bits (PR #3268) -- Remove users from user directory on deactivate (PR #3277) -- Avoid sending consent notice to guest users (PR #3288) -- disable CPUMetrics if no /proc/self/stat (PR #3299) -- Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy (PR #3307) -- Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat! -- Reduce stuck read-receipts: ignore depth when updating (PR #3318) -- Put python's logs into Trial when running unit tests (PR #3319) +- daily user type phone home stats. ([\#3264](https://github.com/matrix-org/synapse/issues/3264)) +- Use `iter*` methods for `_filter_events_for_server`. ([\#3267](https://github.com/matrix-org/synapse/issues/3267)) +- Docs on consent bits. ([\#3268](https://github.com/matrix-org/synapse/issues/3268)) +- Remove users from user directory on deactivate. ([\#3277](https://github.com/matrix-org/synapse/issues/3277)) +- Avoid sending consent notice to guest users. ([\#3288](https://github.com/matrix-org/synapse/issues/3288)) +- disable CPUMetrics if no /proc/self/stat. ([\#3299](https://github.com/matrix-org/synapse/issues/3299)) +- Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy. ([\#3307](https://github.com/matrix-org/synapse/issues/3307)) +- Add private IPv6 addresses to example config for url preview blacklist. Thanks to @thegcat! ([\#3317](https://github.com/matrix-org/synapse/issues/3317)) +- Reduce stuck read-receipts: ignore depth when updating. ([\#3318](https://github.com/matrix-org/synapse/issues/3318)) +- Put python's logs into Trial when running unit tests. ([\#3319](https://github.com/matrix-org/synapse/issues/3319)) Changes, python 3 migration: -- Replace some more comparisons with six (PR #3243) Thanks to @NotAFile! -- replace some iteritems with six (PR #3244) Thanks to @NotAFile! -- Add `batch_iter` to utils (PR #3245) Thanks to @NotAFile! -- use repr, not str (PR #3246) Thanks to @NotAFile! -- Misc Python3 fixes (PR #3247) Thanks to @NotAFile! -- Py3 `storage/_base.py` (PR #3278) Thanks to @NotAFile! -- more six iteritems (PR #3279) Thanks to @NotAFile! -- More Misc. py3 fixes (PR #3280) Thanks to @NotAFile! -- remaining isintance fixes (PR #3281) Thanks to @NotAFile! -- py3-ize state.py (PR #3283) Thanks to @NotAFile! -- extend tox testing for py3 to avoid regressions (PR #3302) Thanks to @krombel! -- use memoryview in py3 (PR #3303) Thanks to @NotAFile! +- Replace some more comparisons with six. Thanks to @NotAFile! ([\#3243](https://github.com/matrix-org/synapse/issues/3243)) +- replace some iteritems with six. Thanks to @NotAFile! ([\#3244](https://github.com/matrix-org/synapse/issues/3244)) +- Add `batch_iter` to utils. Thanks to @NotAFile! ([\#3245](https://github.com/matrix-org/synapse/issues/3245)) +- use repr, not str. Thanks to @NotAFile! ([\#3246](https://github.com/matrix-org/synapse/issues/3246)) +- Misc Python3 fixes. Thanks to @NotAFile! ([\#3247](https://github.com/matrix-org/synapse/issues/3247)) +- Py3 `storage/_base.py`. Thanks to @NotAFile! ([\#3278](https://github.com/matrix-org/synapse/issues/3278)) +- more six iteritems. Thanks to @NotAFile! ([\#3279](https://github.com/matrix-org/synapse/issues/3279)) +- More Misc. py3 fixes. Thanks to @NotAFile! ([\#3280](https://github.com/matrix-org/synapse/issues/3280)) +- remaining isintance fixes. Thanks to @NotAFile! ([\#3281](https://github.com/matrix-org/synapse/issues/3281)) +- py3-ize state.py. Thanks to @NotAFile! ([\#3283](https://github.com/matrix-org/synapse/issues/3283)) +- extend tox testing for py3 to avoid regressions. Thanks to @krombel! ([\#3302](https://github.com/matrix-org/synapse/issues/3302)) +- use memoryview in py3. Thanks to @NotAFile! ([\#3303](https://github.com/matrix-org/synapse/issues/3303)) Bugs: -- Fix federation backfill bugs (PR #3261) -- federation: fix LaterGauge usage (PR #3328) Thanks to @intelfx! +- Fix federation backfill bugs. ([\#3261](https://github.com/matrix-org/synapse/issues/3261)) +- federation: fix LaterGauge usage. Thanks to @intelfx! ([\#3328](https://github.com/matrix-org/synapse/issues/3328)) Changes in synapse v0.30.0 (2018-05-24) ======================================= @@ -1350,50 +1350,50 @@ This feature is specific to Synapse, but uses standard Matrix communication mech Further Server Notices/Consent Tracking Support: -- Allow overriding the `server_notices` user's avatar (PR #3273) -- Use the localpart in the consent uri (PR #3272) -- Support for putting `%(consent_uri)s` in messages (PR #3271) -- Block attempts to send server notices to remote users (PR #3270) -- Docs on consent bits (PR #3268) +- Allow overriding the `server_notices` user's avatar. ([\#3273](https://github.com/matrix-org/synapse/issues/3273)) +- Use the localpart in the consent uri. ([\#3272](https://github.com/matrix-org/synapse/issues/3272)) +- Support for putting `%(consent_uri)s` in messages. ([\#3271](https://github.com/matrix-org/synapse/issues/3271)) +- Block attempts to send server notices to remote users. ([\#3270](https://github.com/matrix-org/synapse/issues/3270)) +- Docs on consent bits. ([\#3268](https://github.com/matrix-org/synapse/issues/3268)) Changes in synapse v0.30.0-rc1 (2018-05-23) =========================================== Server Notices/Consent Tracking Support: -- ConsentResource to gather policy consent from users (PR #3213) -- Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225) -- Infrastructure for a server notices room (PR #3232) -- Send users a server notice about consent (PR #3236) -- Reject attempts to send event before privacy consent is given (PR #3257) -- Add a `has_consented` template var to consent forms (PR #3262) -- Fix dependency on jinja2 (PR #3263) +- ConsentResource to gather policy consent from users. ([\#3213](https://github.com/matrix-org/synapse/issues/3213)) +- Move RoomCreationHandler out of synapse.handlers.Handlers. ([\#3225](https://github.com/matrix-org/synapse/issues/3225)) +- Infrastructure for a server notices room. ([\#3232](https://github.com/matrix-org/synapse/issues/3232)) +- Send users a server notice about consent. ([\#3236](https://github.com/matrix-org/synapse/issues/3236)) +- Reject attempts to send event before privacy consent is given. ([\#3257](https://github.com/matrix-org/synapse/issues/3257)) +- Add a `has_consented` template var to consent forms. ([\#3262](https://github.com/matrix-org/synapse/issues/3262)) +- Fix dependency on jinja2. ([\#3263](https://github.com/matrix-org/synapse/issues/3263)) Features: -- Cohort analytics (PR #3163, #3241, #3251) -- Add lxml to docker image for web previews (PR #3239) Thanks to @ptman! -- Add in flight request metrics (PR #3252) +- Cohort analytics. ([\#3163](https://github.com/matrix-org/synapse/issues/3163), [\#3241](https://github.com/matrix-org/synapse/issues/3241), [\#3251](https://github.com/matrix-org/synapse/issues/3251)) +- Add lxml to docker image for web previews. Thanks to @ptman! ([\#3239](https://github.com/matrix-org/synapse/issues/3239)) +- Add in flight request metrics. ([\#3252](https://github.com/matrix-org/synapse/issues/3252)) Changes: -- Remove unused `update_external_syncs` (PR #3233) -- Use stream rather depth ordering for push actions (PR #3212) -- Make `purge_history` operate on tokens (PR #3221) -- Don't support limitless pagination (PR #3265) +- Remove unused `update_external_syncs`. ([\#3233](https://github.com/matrix-org/synapse/issues/3233)) +- Use stream rather depth ordering for push actions. ([\#3212](https://github.com/matrix-org/synapse/issues/3212)) +- Make `purge_history` operate on tokens. ([\#3221](https://github.com/matrix-org/synapse/issues/3221)) +- Don't support limitless pagination. ([\#3265](https://github.com/matrix-org/synapse/issues/3265)) Bug Fixes: -- Fix logcontext resource usage tracking (PR #3258) -- Fix error in handling receipts (PR #3235) -- Stop the transaction cache caching failures (PR #3255) +- Fix logcontext resource usage tracking. ([\#3258](https://github.com/matrix-org/synapse/issues/3258)) +- Fix error in handling receipts. ([\#3235](https://github.com/matrix-org/synapse/issues/3235)) +- Stop the transaction cache caching failures. ([\#3255](https://github.com/matrix-org/synapse/issues/3255)) Changes in synapse v0.29.1 (2018-05-17) ======================================= Changes: -- Update docker documentation (PR #3222) +- Update docker documentation. ([\#3222](https://github.com/matrix-org/synapse/issues/3222)) Changes in synapse v0.29.0 (2018-05-16) ======================================= @@ -1407,7 +1407,7 @@ Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a cl Potentially breaking change: -- Make Client-Server API return 401 for invalid token (PR #3161). +- Make Client-Server API return 401 for invalid token. ([\#3161](https://github.com/matrix-org/synapse/issues/3161)) This changes the Client-server spec to return a 401 error code instead of 403 when the access token is unrecognised. This is the behaviour required by the specification, but some clients may be relying on the old, incorrect behaviour. @@ -1415,64 +1415,64 @@ Potentially breaking change: Features: -- Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou! +- Add a Dockerfile for synapse. Thanks to @kaiyou! ([\#2846](https://github.com/matrix-org/synapse/issues/2846)) Changes - General: -- nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77! -- Part user from rooms on account deactivate (PR #3201) -- Make "unexpected logging context" into warnings (PR #3007) -- Set Server header in SynapseRequest (PR #3208) -- remove duplicates from groups tables (PR #3129) -- Improve exception handling for background processes (PR #3138) -- Add missing consumeErrors to improve exception handling (PR #3139) -- reraise exceptions more carefully (PR #3142) -- Remove redundant call to `preserve_fn` (PR #3143) -- Trap exceptions thrown within `run_in_background` (PR #3144) +- nuke-room-from-db.sh: added postgresql option and help. Thanks to @rubo77! ([\#2337](https://github.com/matrix-org/synapse/issues/2337)) +- Part user from rooms on account deactivate. ([\#3201](https://github.com/matrix-org/synapse/issues/3201)) +- Make "unexpected logging context" into warnings. ([\#3007](https://github.com/matrix-org/synapse/issues/3007)) +- Set Server header in SynapseRequest. ([\#3208](https://github.com/matrix-org/synapse/issues/3208)) +- remove duplicates from groups tables. ([\#3129](https://github.com/matrix-org/synapse/issues/3129)) +- Improve exception handling for background processes. ([\#3138](https://github.com/matrix-org/synapse/issues/3138)) +- Add missing consumeErrors to improve exception handling. ([\#3139](https://github.com/matrix-org/synapse/issues/3139)) +- reraise exceptions more carefully. ([\#3142](https://github.com/matrix-org/synapse/issues/3142)) +- Remove redundant call to `preserve_fn`. ([\#3143](https://github.com/matrix-org/synapse/issues/3143)) +- Trap exceptions thrown within `run_in_background`. ([\#3144](https://github.com/matrix-org/synapse/issues/3144)) Changes - Refactors: -- Refactor /context to reuse pagination storage functions (PR #3193) -- Refactor recent events func to use pagination func (PR #3195) -- Refactor pagination DB API to return concrete type (PR #3196) -- Refactor `get_recent_events_for_room` return type (PR #3198) -- Refactor sync APIs to reuse pagination API (PR #3199) -- Remove unused code path from member change DB func (PR #3200) -- Refactor request handling wrappers (PR #3203) -- `transaction_id`, destination defined twice (PR #3209) Thanks to @damir-manapov! -- Refactor event storage to prepare for changes in state calculations (PR #3141) -- Set Server header in SynapseRequest (PR #3208) -- Use deferred.addTimeout instead of `time_bound_deferred` (PR #3127, #3178) -- Use `run_in_background` in preference to `preserve_fn` (PR #3140) +- Refactor /context to reuse pagination storage functions. ([\#3193](https://github.com/matrix-org/synapse/issues/3193)) +- Refactor recent events func to use pagination func. ([\#3195](https://github.com/matrix-org/synapse/issues/3195)) +- Refactor pagination DB API to return concrete type. ([\#3196](https://github.com/matrix-org/synapse/issues/3196)) +- Refactor `get_recent_events_for_room` return type. ([\#3198](https://github.com/matrix-org/synapse/issues/3198)) +- Refactor sync APIs to reuse pagination API. ([\#3199](https://github.com/matrix-org/synapse/issues/3199)) +- Remove unused code path from member change DB func. ([\#3200](https://github.com/matrix-org/synapse/issues/3200)) +- Refactor request handling wrappers. ([\#3203](https://github.com/matrix-org/synapse/issues/3203)) +- `transaction_id`, destination defined twice. Thanks to @damir-manapov! ([\#3209](https://github.com/matrix-org/synapse/issues/3209)) +- Refactor event storage to prepare for changes in state calculations. ([\#3141](https://github.com/matrix-org/synapse/issues/3141)) +- Set Server header in SynapseRequest. ([\#3208](https://github.com/matrix-org/synapse/issues/3208)) +- Use deferred.addTimeout instead of `time_bound_deferred`. ([\#3127](https://github.com/matrix-org/synapse/issues/3127), [\#3178](https://github.com/matrix-org/synapse/issues/3178)) +- Use `run_in_background` in preference to `preserve_fn`. ([\#3140](https://github.com/matrix-org/synapse/issues/3140)) Changes - Python 3 migration: -- Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile! -- run config tests on py3 (PR #3159) Thanks to @NotAFile! -- Open certificate files as bytes (PR #3084) Thanks to @NotAFile! -- Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile! -- Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile! -- Use six.moves.urlparse (PR #3108) Thanks to @NotAFile! -- Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile! -- Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile! -- Move more xrange to six (PR #3151) Thanks to @NotAFile! -- make imports local (PR #3152) Thanks to @NotAFile! -- move httplib import to six (PR #3153) Thanks to @NotAFile! -- Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile! -- more bytes strings (PR #3155) Thanks to @NotAFile! +- Construct HMAC as bytes on py3. Thanks to @NotAFile! ([\#3156](https://github.com/matrix-org/synapse/issues/3156)) +- run config tests on py3. Thanks to @NotAFile! ([\#3159](https://github.com/matrix-org/synapse/issues/3159)) +- Open certificate files as bytes. Thanks to @NotAFile! ([\#3084](https://github.com/matrix-org/synapse/issues/3084)) +- Open config file in non-bytes mode. Thanks to @NotAFile! ([\#3085](https://github.com/matrix-org/synapse/issues/3085)) +- Make event properties raise AttributeError instead. Thanks to @NotAFile! ([\#3102](https://github.com/matrix-org/synapse/issues/3102)) +- Use six.moves.urlparse. Thanks to @NotAFile! ([\#3108](https://github.com/matrix-org/synapse/issues/3108)) +- Add py3 tests to tox with folders that work. Thanks to @NotAFile! ([\#3145](https://github.com/matrix-org/synapse/issues/3145)) +- Don't yield in list comprehensions. Thanks to @NotAFile! ([\#3150](https://github.com/matrix-org/synapse/issues/3150)) +- Move more xrange to six. Thanks to @NotAFile! ([\#3151](https://github.com/matrix-org/synapse/issues/3151)) +- make imports local. Thanks to @NotAFile! ([\#3152](https://github.com/matrix-org/synapse/issues/3152)) +- move httplib import to six. Thanks to @NotAFile! ([\#3153](https://github.com/matrix-org/synapse/issues/3153)) +- Replace stringIO imports with six. Thanks to @NotAFile! ([\#3154](https://github.com/matrix-org/synapse/issues/3154), [\#3168](https://github.com/matrix-org/synapse/issues/3168)) +- more bytes strings. Thanks to @NotAFile! ([\#3155](https://github.com/matrix-org/synapse/issues/3155)) Bug Fixes: -- synapse fails to start under Twisted >= 18.4 (PR #3157) -- Fix a class of logcontext leaks (PR #3170) -- Fix a couple of logcontext leaks in unit tests (PR #3172) -- Fix logcontext leak in media repo (PR #3174) -- Escape label values in prometheus metrics (PR #3175, #3186) -- Fix "Unhandled Error" logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot! -- Fix logcontext leaks in rate limiter (PR #3183) -- notifications: Convert `next_token` to string according to the spec (PR #3190) Thanks to @mujx! -- nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77! -- add guard for None on `purge_history` api (PR #3160) Thanks to @krombel! +- synapse fails to start under Twisted >= 18.4. ([\#3157](https://github.com/matrix-org/synapse/issues/3157)) +- Fix a class of logcontext leaks. ([\#3170](https://github.com/matrix-org/synapse/issues/3170)) +- Fix a couple of logcontext leaks in unit tests. ([\#3172](https://github.com/matrix-org/synapse/issues/3172)) +- Fix logcontext leak in media repo. ([\#3174](https://github.com/matrix-org/synapse/issues/3174)) +- Escape label values in prometheus metrics. ([\#3175](https://github.com/matrix-org/synapse/issues/3175), [\#3186](https://github.com/matrix-org/synapse/issues/3186)) +- Fix "Unhandled Error" logs with Twisted 18.4. Thanks to @Half-Shot! ([\#3182](https://github.com/matrix-org/synapse/issues/3182)) +- Fix logcontext leaks in rate limiter. ([\#3183](https://github.com/matrix-org/synapse/issues/3183)) +- notifications: Convert `next_token` to string according to the spec. Thanks to @mujx! ([\#3190](https://github.com/matrix-org/synapse/issues/3190)) +- nuke-room-from-db.sh: fix deletion from search table. Thanks to @rubo77! ([\#3194](https://github.com/matrix-org/synapse/issues/3194)) +- add guard for None on `purge_history` api. Thanks to @krombel! ([\#3160](https://github.com/matrix-org/synapse/issues/3160)) Changes in synapse v0.28.1 (2018-05-01) ======================================= @@ -1492,8 +1492,8 @@ Changes in synapse v0.28.0 (2018-04-26) Bug Fixes: -- Fix quarantine media admin API and search reindex (PR #3130) -- Fix media admin APIs (PR #3134) +- Fix quarantine media admin API and search reindex. ([\#3130](https://github.com/matrix-org/synapse/issues/3130)) +- Fix media admin APIs. ([\#3134](https://github.com/matrix-org/synapse/issues/3134)) Changes in synapse v0.28.0-rc1 (2018-04-24) =========================================== @@ -1504,49 +1504,49 @@ Minor performance improvement to federation sending and bug fixes. Features: -- Add metrics for event processing lag (PR #3090) -- Add metrics for ResponseCache (PR #3092) +- Add metrics for event processing lag. ([\#3090](https://github.com/matrix-org/synapse/issues/3090)) +- Add metrics for ResponseCache. ([\#3092](https://github.com/matrix-org/synapse/issues/3092)) Changes: -- Synapse on PyPy (PR #2760) Thanks to @Valodim! -- move handling of `auto_join_rooms` to RegisterHandler (PR #2996) Thanks to @krombel! -- Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh! -- Document the behaviour of ResponseCache (PR #3059) -- Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile! -- update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel! -- use python3-compatible prints (PR #3074) Thanks to @NotAFile! -- Send federation events concurrently (PR #3078) -- Limit concurrent event sends for a room (PR #3079) -- Improve R30 stat definition (PR #3086) -- Send events to ASes concurrently (PR #3088) -- Refactor ResponseCache usage (PR #3093) -- Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh! -- Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile! -- Use six.itervalues in some places (PR #3106) Thanks to @NotAFile! -- Refactor `store.have_events` (PR #3117) +- Synapse on PyPy. Thanks to @Valodim! ([\#2760](https://github.com/matrix-org/synapse/issues/2760)) +- move handling of `auto_join_rooms` to RegisterHandler. Thanks to @krombel! ([\#2996](https://github.com/matrix-org/synapse/issues/2996)) +- Improve handling of SRV records for federation connections. Thanks to @silkeh! ([\#3016](https://github.com/matrix-org/synapse/issues/3016)) +- Document the behaviour of ResponseCache. ([\#3059](https://github.com/matrix-org/synapse/issues/3059)) +- Preparation for py3. Thanks to @NotAFile! ([\#3061](https://github.com/matrix-org/synapse/issues/3061), [\#3073](https://github.com/matrix-org/synapse/issues/3073), [\#3074](https://github.com/matrix-org/synapse/issues/3074), [\#3075](https://github.com/matrix-org/synapse/issues/3075), [\#3103](https://github.com/matrix-org/synapse/issues/3103), [\#3104](https://github.com/matrix-org/synapse/issues/3104), [\#3106](https://github.com/matrix-org/synapse/issues/3106), [\#3107](https://github.com/matrix-org/synapse/issues/3107), [\#3109](https://github.com/matrix-org/synapse/issues/3109), [\#3110](https://github.com/matrix-org/synapse/issues/3110)) +- update prometheus dashboard to use new metric names. Thanks to @krombel! ([\#3069](https://github.com/matrix-org/synapse/issues/3069)) +- use python3-compatible prints. Thanks to @NotAFile! ([\#3074](https://github.com/matrix-org/synapse/issues/3074)) +- Send federation events concurrently. ([\#3078](https://github.com/matrix-org/synapse/issues/3078)) +- Limit concurrent event sends for a room. ([\#3079](https://github.com/matrix-org/synapse/issues/3079)) +- Improve R30 stat definition. ([\#3086](https://github.com/matrix-org/synapse/issues/3086)) +- Send events to ASes concurrently. ([\#3088](https://github.com/matrix-org/synapse/issues/3088)) +- Refactor ResponseCache usage. ([\#3093](https://github.com/matrix-org/synapse/issues/3093)) +- Clarify that SRV may not point to a CNAME. Thanks to @silkeh! ([\#3100](https://github.com/matrix-org/synapse/issues/3100)) +- Use str(e) instead of e.message. Thanks to @NotAFile! ([\#3103](https://github.com/matrix-org/synapse/issues/3103)) +- Use six.itervalues in some places. Thanks to @NotAFile! ([\#3106](https://github.com/matrix-org/synapse/issues/3106)) +- Refactor `store.have_events`. ([\#3117](https://github.com/matrix-org/synapse/issues/3117)) Bug Fixes: -- Return 401 for invalid `access_token` on logout (PR #2938) Thanks to @dklug! -- Return a 404 rather than a 500 on rejoining empty rooms (PR #3080) -- fix `federation_domain_whitelist` (PR #3099) -- Avoid creating events with huge numbers of `prev_events` (PR #3113) -- Reject events which have lots of `prev_events` (PR #3118) +- Return 401 for invalid `access_token` on logout. Thanks to @dklug! ([\#2938](https://github.com/matrix-org/synapse/issues/2938)) +- Return a 404 rather than a 500 on rejoining empty rooms. ([\#3080](https://github.com/matrix-org/synapse/issues/3080)) +- fix `federation_domain_whitelist`. ([\#3099](https://github.com/matrix-org/synapse/issues/3099)) +- Avoid creating events with huge numbers of `prev_events`. ([\#3113](https://github.com/matrix-org/synapse/issues/3113)) +- Reject events which have lots of `prev_events`. ([\#3118](https://github.com/matrix-org/synapse/issues/3118)) Changes in synapse v0.27.4 (2018-04-13) ======================================= Changes: -- Update canonicaljson dependency (\#3095) +- Update canonicaljson dependency. ([\#3095](https://github.com/matrix-org/synapse/issues/3095)) Changes in synapse v0.27.3 (2018-04-11) ====================================== Bug fixes: -- URL quote path segments over federation (\#3082) +- URL quote path segments over federation. ([\#3082](https://github.com/matrix-org/synapse/issues/3082)) Changes in synapse v0.27.3-rc2 (2018-04-09) =========================================== @@ -1566,43 +1566,43 @@ Counts the number of native 30 day retained users, defined as: Features: -- Add joinability for groups (PR #3045) -- Implement group join API (PR #3046) -- Add counter metrics for calculating state delta (PR #3033) -- R30 stats (PR #3041) -- Measure time it takes to calculate state group ID (PR #3043) -- Add basic performance statistics to phone home (PR #3044) -- Add response size metrics (PR #3071) -- phone home cache size configurations (PR #3063) +- Add joinability for groups. ([\#3045](https://github.com/matrix-org/synapse/issues/3045)) +- Implement group join API. ([\#3046](https://github.com/matrix-org/synapse/issues/3046)) +- Add counter metrics for calculating state delta. ([\#3033](https://github.com/matrix-org/synapse/issues/3033)) +- R30 stats. ([\#3041](https://github.com/matrix-org/synapse/issues/3041)) +- Measure time it takes to calculate state group ID. ([\#3043](https://github.com/matrix-org/synapse/issues/3043)) +- Add basic performance statistics to phone home. ([\#3044](https://github.com/matrix-org/synapse/issues/3044)) +- Add response size metrics. ([\#3071](https://github.com/matrix-org/synapse/issues/3071)) +- phone home cache size configurations. ([\#3063](https://github.com/matrix-org/synapse/issues/3063)) Changes: -- Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live! -- Replace old style error catching with `as` keyword (PR #3000) Thanks to @NotAFile! -- Use `.iter*` to avoid copies in StateHandler (PR #3006) -- Linearize calls to `_generate_user_id` (PR #3029) -- Remove last usage of ujson (PR #3030) -- Use simplejson throughout (PR #3048) -- Use static JSONEncoders (PR #3049) -- Remove uses of events.content (PR #3060) -- Improve database cache performance (PR #3068) +- Add a blurb explaining the main synapse worker. Thanks to @turt2live! ([\#2886](https://github.com/matrix-org/synapse/issues/2886)) +- Replace old style error catching with `as` keyword. Thanks to @NotAFile! ([\#3000](https://github.com/matrix-org/synapse/issues/3000)) +- Use `.iter*` to avoid copies in StateHandler. ([\#3006](https://github.com/matrix-org/synapse/issues/3006)) +- Linearize calls to `_generate_user_id`. ([\#3029](https://github.com/matrix-org/synapse/issues/3029)) +- Remove last usage of ujson. ([\#3030](https://github.com/matrix-org/synapse/issues/3030)) +- Use simplejson throughout. ([\#3048](https://github.com/matrix-org/synapse/issues/3048)) +- Use static JSONEncoders. ([\#3049](https://github.com/matrix-org/synapse/issues/3049)) +- Remove uses of events.content. ([\#3060](https://github.com/matrix-org/synapse/issues/3060)) +- Improve database cache performance. ([\#3068](https://github.com/matrix-org/synapse/issues/3068)) Bug fixes: -- Add `room_id` to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte! -- Fix replication after switch to simplejson (PR #3015) -- 404 correctly on missing paths via NoResource (PR #3022) -- Fix error when claiming e2e keys from offline servers (PR #3034) -- fix `tests/storage/test_user_directory.py` (PR #3042) -- use `PUT` instead of `POST` for federating `groups`/`m.join_policy` (PR #3070) Thanks to @krombel! -- postgres port script: fix `state_groups_pkey` error (PR #3072) +- Add `room_id` to the response of rooms/{roomId}/join. Thanks to @jplatte! ([\#2986](https://github.com/matrix-org/synapse/issues/2986)) +- Fix replication after switch to simplejson. ([\#3015](https://github.com/matrix-org/synapse/issues/3015)) +- 404 correctly on missing paths via NoResource. ([\#3022](https://github.com/matrix-org/synapse/issues/3022)) +- Fix error when claiming e2e keys from offline servers. ([\#3034](https://github.com/matrix-org/synapse/issues/3034)) +- fix `tests/storage/test_user_directory.py`. ([\#3042](https://github.com/matrix-org/synapse/issues/3042)) +- use `PUT` instead of `POST` for federating `groups`/`m.join_policy`. Thanks to @krombel! ([\#3070](https://github.com/matrix-org/synapse/issues/3070)) +- postgres port script: fix `state_groups_pkey` error. ([\#3072](https://github.com/matrix-org/synapse/issues/3072)) Changes in synapse v0.27.2 (2018-03-26) ======================================= Bug fixes: -- Fix bug which broke TCP replication between workers (PR #3015) +- Fix bug which broke TCP replication between workers. ([\#3015](https://github.com/matrix-org/synapse/issues/3015)) Changes in synapse v0.27.1 (2018-03-26) ======================================= @@ -1621,14 +1621,14 @@ Pulls in v0.26.1 Bug fixes: -- Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005) +- Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache. ([\#3005](https://github.com/matrix-org/synapse/issues/3005)) Changes in synapse v0.26.1 (2018-03-15) ======================================= Bug fixes: -- Fix bug where an invalid event caused server to stop functioning correctly, due to parsing and serializing bugs in ujson library (PR #3008) +- Fix bug where an invalid event caused server to stop functioning correctly, due to parsing and serializing bugs in ujson library. ([\#3008](https://github.com/matrix-org/synapse/issues/3008)) Changes in synapse v0.27.0-rc1 (2018-03-14) =========================================== @@ -1639,41 +1639,41 @@ This release also begins the process of renaming a number of the metrics reporte Features: -- Add ability for ASes to override message send time (PR #2754) -- Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767) -- Add purge API features, see [docs/admin_api/purge_history_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943) -- Add support for whitelisting 3PIDs that users can register. (PR #2813) -- Add `/room/{id}/event/{id}` API (PR #2766) -- Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live! -- Add `federation_domain_whitelist` option (PR #2820, #2821) +- Add ability for ASes to override message send time. ([\#2754](https://github.com/matrix-org/synapse/issues/2754)) +- Add support for custom storage providers for media repository. ([\#2867](https://github.com/matrix-org/synapse/issues/2867), [\#2777](https://github.com/matrix-org/synapse/issues/2777), [\#2783](https://github.com/matrix-org/synapse/issues/2783), [\#2789](https://github.com/matrix-org/synapse/issues/2789), [\#2791](https://github.com/matrix-org/synapse/issues/2791), [\#2804](https://github.com/matrix-org/synapse/issues/2804), [\#2812](https://github.com/matrix-org/synapse/issues/2812), [\#2814](https://github.com/matrix-org/synapse/issues/2814), [\#2857](https://github.com/matrix-org/synapse/issues/2857), [\#2868](https://github.com/matrix-org/synapse/issues/2868), [\#2767](https://github.com/matrix-org/synapse/issues/2767)) +- Add purge API features, see [docs/admin_api/purge_history_api.rst](docs/admin_api/purge_history_api.rst) for full details. ([\#2858](https://github.com/matrix-org/synapse/issues/2858), [\#2867](https://github.com/matrix-org/synapse/issues/2867), [\#2882](https://github.com/matrix-org/synapse/issues/2882), [\#2946](https://github.com/matrix-org/synapse/issues/2946), [\#2962](https://github.com/matrix-org/synapse/issues/2962), [\#2943](https://github.com/matrix-org/synapse/issues/2943)) +- Add support for whitelisting 3PIDs that users can register. ([\#2813](https://github.com/matrix-org/synapse/issues/2813)) +- Add `/room/{id}/event/{id}` API. ([\#2766](https://github.com/matrix-org/synapse/issues/2766)) +- Add an admin API to get all the media in a room. Thanks to @turt2live! ([\#2818](https://github.com/matrix-org/synapse/issues/2818)) +- Add `federation_domain_whitelist` option. ([\#2820](https://github.com/matrix-org/synapse/issues/2820), [\#2821](https://github.com/matrix-org/synapse/issues/2821)) Changes: -- Continue to factor out processing from main process and into worker processes. See updated [docs/workers.rst](docs/workers.rst) (PR #2892 - \#2904, #2913, #2920 - \#2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - \#2984, #2987 - \#2989, #2991 - \#2993, #2995, #2784) -- Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849) -- Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh! -- No longer require a specific version of saml2 (PR #2695) Thanks to @okurz! -- Remove `verbosity`/`log_file` from generated config (PR #2755) -- Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838) -- When using synctl with workers, Don't start the main synapse automatically (PR #2774) -- Minor performance improvements (PR #2773, #2792) -- Use a connection pool for non-federation outbound connections (PR #2817) -- Make it possible to run unit tests against postgres (PR #2829) -- Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp! -- Remove ability for AS users to call /events and /sync (PR #2948) -- Use bcrypt.checkpw (PR #2949) Thanks to @krombel! +- Continue to factor out processing from main process and into worker processes. See updated [docs/workers.rst](docs/workers.rst) ([\#2892](https://github.com/matrix-org/synapse/issues/2892), [\#2893](https://github.com/matrix-org/synapse/issues/2893), [\#2894](https://github.com/matrix-org/synapse/issues/2894), [\#2896](https://github.com/matrix-org/synapse/issues/2896), [\#2897](https://github.com/matrix-org/synapse/issues/2897), [\#2898](https://github.com/matrix-org/synapse/issues/2898), [\#2899](https://github.com/matrix-org/synapse/issues/2899), [\#2900](https://github.com/matrix-org/synapse/issues/2900), [\#2901](https://github.com/matrix-org/synapse/issues/2901), [\#2902](https://github.com/matrix-org/synapse/issues/2902), [\#2903](https://github.com/matrix-org/synapse/issues/2903), [\#2904](https://github.com/matrix-org/synapse/issues/2904), [\#2913](https://github.com/matrix-org/synapse/issues/2913), [\#2920](https://github.com/matrix-org/synapse/issues/2920), [\#2921](https://github.com/matrix-org/synapse/issues/2921), [\#2922](https://github.com/matrix-org/synapse/issues/2922), [\#2923](https://github.com/matrix-org/synapse/issues/2923), [\#2924](https://github.com/matrix-org/synapse/issues/2924), [\#2925](https://github.com/matrix-org/synapse/issues/2925), [\#2926](https://github.com/matrix-org/synapse/issues/2926), [\#2947](https://github.com/matrix-org/synapse/issues/2947), [\#2847](https://github.com/matrix-org/synapse/issues/2847), [\#2854](https://github.com/matrix-org/synapse/issues/2854), [\#2872](https://github.com/matrix-org/synapse/issues/2872), [\#2873](https://github.com/matrix-org/synapse/issues/2873), [\#2874](https://github.com/matrix-org/synapse/issues/2874), [\#2928](https://github.com/matrix-org/synapse/issues/2928), [\#2929](https://github.com/matrix-org/synapse/issues/2929), [\#2934](https://github.com/matrix-org/synapse/issues/2934), [\#2856](https://github.com/matrix-org/synapse/issues/2856), [\#2976](https://github.com/matrix-org/synapse/issues/2976), [\#2977](https://github.com/matrix-org/synapse/issues/2977), [\#2978](https://github.com/matrix-org/synapse/issues/2978), [\#2979](https://github.com/matrix-org/synapse/issues/2979), [\#2980](https://github.com/matrix-org/synapse/issues/2980), [\#2981](https://github.com/matrix-org/synapse/issues/2981), [\#2982](https://github.com/matrix-org/synapse/issues/2982), [\#2983](https://github.com/matrix-org/synapse/issues/2983), [\#2984](https://github.com/matrix-org/synapse/issues/2984), [\#2987](https://github.com/matrix-org/synapse/issues/2987), [\#2988](https://github.com/matrix-org/synapse/issues/2988), [\#2989](https://github.com/matrix-org/synapse/issues/2989), [\#2991](https://github.com/matrix-org/synapse/issues/2991), [\#2992](https://github.com/matrix-org/synapse/issues/2992), [\#2993](https://github.com/matrix-org/synapse/issues/2993), [\#2995](https://github.com/matrix-org/synapse/issues/2995), [\#2784](https://github.com/matrix-org/synapse/issues/2784)) +- Ensure state cache is used when persisting events. ([\#2864](https://github.com/matrix-org/synapse/issues/2864), [\#2871](https://github.com/matrix-org/synapse/issues/2871), [\#2802](https://github.com/matrix-org/synapse/issues/2802), [\#2835](https://github.com/matrix-org/synapse/issues/2835), [\#2836](https://github.com/matrix-org/synapse/issues/2836), [\#2841](https://github.com/matrix-org/synapse/issues/2841), [\#2842](https://github.com/matrix-org/synapse/issues/2842), [\#2849](https://github.com/matrix-org/synapse/issues/2849)) +- Change the default config to bind on both IPv4 and IPv6 on all platforms. Thanks to @silkeh! ([\#2435](https://github.com/matrix-org/synapse/issues/2435)) +- No longer require a specific version of saml2. Thanks to @okurz! ([\#2695](https://github.com/matrix-org/synapse/issues/2695)) +- Remove `verbosity`/`log_file` from generated config. ([\#2755](https://github.com/matrix-org/synapse/issues/2755)) +- Add and improve metrics and logging. ([\#2770](https://github.com/matrix-org/synapse/issues/2770), [\#2778](https://github.com/matrix-org/synapse/issues/2778), [\#2785](https://github.com/matrix-org/synapse/issues/2785), [\#2786](https://github.com/matrix-org/synapse/issues/2786), [\#2787](https://github.com/matrix-org/synapse/issues/2787), [\#2793](https://github.com/matrix-org/synapse/issues/2793), [\#2794](https://github.com/matrix-org/synapse/issues/2794), [\#2795](https://github.com/matrix-org/synapse/issues/2795), [\#2809](https://github.com/matrix-org/synapse/issues/2809), [\#2810](https://github.com/matrix-org/synapse/issues/2810), [\#2833](https://github.com/matrix-org/synapse/issues/2833), [\#2834](https://github.com/matrix-org/synapse/issues/2834), [\#2844](https://github.com/matrix-org/synapse/issues/2844), [\#2965](https://github.com/matrix-org/synapse/issues/2965), [\#2927](https://github.com/matrix-org/synapse/issues/2927), [\#2975](https://github.com/matrix-org/synapse/issues/2975), [\#2790](https://github.com/matrix-org/synapse/issues/2790), [\#2796](https://github.com/matrix-org/synapse/issues/2796), [\#2838](https://github.com/matrix-org/synapse/issues/2838)) +- When using synctl with workers, Don't start the main synapse automatically. ([\#2774](https://github.com/matrix-org/synapse/issues/2774)) +- Minor performance improvements. ([\#2773](https://github.com/matrix-org/synapse/issues/2773), [\#2792](https://github.com/matrix-org/synapse/issues/2792)) +- Use a connection pool for non-federation outbound connections. ([\#2817](https://github.com/matrix-org/synapse/issues/2817)) +- Make it possible to run unit tests against postgres. ([\#2829](https://github.com/matrix-org/synapse/issues/2829)) +- Update pynacl dependency to 1.2.1 or higher. Thanks to @bachp! ([\#2888](https://github.com/matrix-org/synapse/issues/2888)) +- Remove ability for AS users to call /events and /sync. ([\#2948](https://github.com/matrix-org/synapse/issues/2948)) +- Use bcrypt.checkpw. Thanks to @krombel! ([\#2949](https://github.com/matrix-org/synapse/issues/2949)) Bug fixes: -- Fix broken `ldap_config` config option (PR #2683) Thanks to @seckrv! -- Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live! -- Fix publicised groups GET API (singular) over federation (PR #2772) -- Fix user directory when using `user_directory_search_all_users` config option (PR #2803, #2831) -- Fix error on `/publicRooms` when no rooms exist (PR #2827) -- Fix bug in `quarantine_media` (PR #2837) -- Fix `url_previews` when no `Content-Type` is returned from URL (PR #2845) -- Fix rare race in sync API when joining room (PR #2944) -- Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848) +- Fix broken `ldap_config` config option. Thanks to @seckrv! ([\#2683](https://github.com/matrix-org/synapse/issues/2683)) +- Fix error message when user is not allowed to unban. Thanks to @turt2live! ([\#2761](https://github.com/matrix-org/synapse/issues/2761)) +- Fix publicised groups GET API (singular) over federation. ([\#2772](https://github.com/matrix-org/synapse/issues/2772)) +- Fix user directory when using `user_directory_search_all_users` config option. ([\#2803](https://github.com/matrix-org/synapse/issues/2803), [\#2831](https://github.com/matrix-org/synapse/issues/2831)) +- Fix error on `/publicRooms` when no rooms exist. ([\#2827](https://github.com/matrix-org/synapse/issues/2827)) +- Fix bug in `quarantine_media`. ([\#2837](https://github.com/matrix-org/synapse/issues/2837)) +- Fix `url_previews` when no `Content-Type` is returned from URL. ([\#2845](https://github.com/matrix-org/synapse/issues/2845)) +- Fix rare race in sync API when joining room. ([\#2944](https://github.com/matrix-org/synapse/issues/2944)) +- Fix slow event search, switch back from GIST to GIN indexes. ([\#2769](https://github.com/matrix-org/synapse/issues/2769), [\#2848](https://github.com/matrix-org/synapse/issues/2848)) Changes in synapse v0.26.0 (2018-01-05) ======================================= @@ -1685,93 +1685,93 @@ Changes in synapse v0.26.0-rc1 (2017-12-13) Features: -- Add ability for ASes to publicise groups for their users (PR #2686) -- Add all local users to the `user_directory` and optionally search them (PR #2723) -- Add support for custom login types for validating users (PR #2729) +- Add ability for ASes to publicise groups for their users. ([\#2686](https://github.com/matrix-org/synapse/issues/2686)) +- Add all local users to the `user_directory` and optionally search them. ([\#2723](https://github.com/matrix-org/synapse/issues/2723)) +- Add support for custom login types for validating users. ([\#2729](https://github.com/matrix-org/synapse/issues/2729)) Changes: -- Update example Prometheus config to new format (PR #2648) Thanks to @krombel! -- Rename `redact_content` option to `include_content` in Push API (PR #2650) -- Declare support for r0.3.0 (PR #2677) -- Improve upserts (PR #2684, #2688, #2689, #2713) -- Improve documentation of workers (PR #2700) -- Improve tracebacks on exceptions (PR #2705) -- Allow guest access to group APIs for reading (PR #2715) -- Support for posting content in `federation_client` script (PR #2716) -- Delete devices and pushers on logouts etc (PR #2722) +- Update example Prometheus config to new format. Thanks to @krombel! ([\#2648](https://github.com/matrix-org/synapse/issues/2648)) +- Rename `redact_content` option to `include_content` in Push API. ([\#2650](https://github.com/matrix-org/synapse/issues/2650)) +- Declare support for r0.3.0. ([\#2677](https://github.com/matrix-org/synapse/issues/2677)) +- Improve upserts. ([\#2684](https://github.com/matrix-org/synapse/issues/2684), [\#2688](https://github.com/matrix-org/synapse/issues/2688), [\#2689](https://github.com/matrix-org/synapse/issues/2689), [\#2713](https://github.com/matrix-org/synapse/issues/2713)) +- Improve documentation of workers. ([\#2700](https://github.com/matrix-org/synapse/issues/2700)) +- Improve tracebacks on exceptions. ([\#2705](https://github.com/matrix-org/synapse/issues/2705)) +- Allow guest access to group APIs for reading. ([\#2715](https://github.com/matrix-org/synapse/issues/2715)) +- Support for posting content in `federation_client` script. ([\#2716](https://github.com/matrix-org/synapse/issues/2716)) +- Delete devices and pushers on logouts etc. ([\#2722](https://github.com/matrix-org/synapse/issues/2722)) Bug fixes: -- Fix database port script (PR #2673) -- Fix internal server error on login with `ldap_auth_provider` (PR #2678) Thanks to @jkolo! -- Fix error on sqlite 3.7 (PR #2697) -- Fix `OPTIONS` on `preview_url` (PR #2707) -- Fix error handling on dns lookup (PR #2711) -- Fix wrong avatars when inviting multiple users when creating room (PR #2717) -- Fix 500 when joining matrix-dev (PR #2719) +- Fix database port script. ([\#2673](https://github.com/matrix-org/synapse/issues/2673)) +- Fix internal server error on login with `ldap_auth_provider`. Thanks to @jkolo! ([\#2678](https://github.com/matrix-org/synapse/issues/2678)) +- Fix error on sqlite 3.7. ([\#2697](https://github.com/matrix-org/synapse/issues/2697)) +- Fix `OPTIONS` on `preview_url`. ([\#2707](https://github.com/matrix-org/synapse/issues/2707)) +- Fix error handling on dns lookup. ([\#2711](https://github.com/matrix-org/synapse/issues/2711)) +- Fix wrong avatars when inviting multiple users when creating room. ([\#2717](https://github.com/matrix-org/synapse/issues/2717)) +- Fix 500 when joining matrix-dev. ([\#2719](https://github.com/matrix-org/synapse/issues/2719)) Changes in synapse v0.25.1 (2017-11-17) ======================================= Bug fixes: -- Fix login with LDAP and other password provider modules (PR #2678). Thanks to @jkolo! +- Fix login with LDAP and other password provider modules. Thanks to @jkolo! ([\#2678](https://github.com/matrix-org/synapse/issues/2678)) Changes in synapse v0.25.0 (2017-11-15) ======================================= Bug fixes: -- Fix port script (PR #2673) +- Fix port script. ([\#2673](https://github.com/matrix-org/synapse/issues/2673)) Changes in synapse v0.25.0-rc1 (2017-11-14) =========================================== Features: -- Add `is_public` to groups table to allow for private groups (PR #2582) -- Add a route for determining who you are (PR #2668) Thanks to @turt2live! -- Add more features to the password providers (PR #2608, #2610, #2620, #2622, #2623, #2624, #2626, #2628, #2629) -- Add a hook for custom rest endpoints (PR #2627) -- Add API to update group room visibility (PR #2651) +- Add `is_public` to groups table to allow for private groups. ([\#2582](https://github.com/matrix-org/synapse/issues/2582)) +- Add a route for determining who you are. Thanks to @turt2live! ([\#2668](https://github.com/matrix-org/synapse/issues/2668)) +- Add more features to the password providers ([\#2608](https://github.com/matrix-org/synapse/issues/2608), [\#2610](https://github.com/matrix-org/synapse/issues/2610), [\#2620](https://github.com/matrix-org/synapse/issues/2620), [\#2622](https://github.com/matrix-org/synapse/issues/2622), [\#2623](https://github.com/matrix-org/synapse/issues/2623), [\#2624](https://github.com/matrix-org/synapse/issues/2624), [\#2626](https://github.com/matrix-org/synapse/issues/2626), [\#2628](https://github.com/matrix-org/synapse/issues/2628), [\#2629](https://github.com/matrix-org/synapse/issues/2629)) +- Add a hook for custom rest endpoints. ([\#2627](https://github.com/matrix-org/synapse/issues/2627)) +- Add API to update group room visibility. ([\#2651](https://github.com/matrix-org/synapse/issues/2651)) Changes: -- Ignore `` tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt! -- Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to @krombel! -- Support /keys/upload on /r0 as well as /unstable (PR #2585) -- Front-end proxy: pass through auth header (PR #2586) -- Allow ASes to deactivate their own users (PR #2589) -- Remove refresh tokens (PR #2613) -- Automatically set default displayname on register (PR #2617) -- Log login requests (PR #2618) -- Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630) -- Avoid no-op media deletes (PR #2637) Thanks to @spantaleev! -- Fix various embarrassing typos around `user_directory` and add some doc. (PR #2643) -- Return whether a user is an admin within a group (PR #2647) -- Namespace visibility options for groups (PR #2657) -- Downcase UserIDs on registration (PR #2662) -- Cache failures when fetching URL previews (PR #2669) +- Ignore `` tags when generating URL preview descriptions. Thanks to @maximevaillancourt! ([\#2576](https://github.com/matrix-org/synapse/issues/2576)) +- Register some /unstable endpoints in /r0 as well. Thanks to @krombel! ([\#2579](https://github.com/matrix-org/synapse/issues/2579)) +- Support /keys/upload on /r0 as well as /unstable. ([\#2585](https://github.com/matrix-org/synapse/issues/2585)) +- Front-end proxy: pass through auth header. ([\#2586](https://github.com/matrix-org/synapse/issues/2586)) +- Allow ASes to deactivate their own users. ([\#2589](https://github.com/matrix-org/synapse/issues/2589)) +- Remove refresh tokens. ([\#2613](https://github.com/matrix-org/synapse/issues/2613)) +- Automatically set default displayname on register. ([\#2617](https://github.com/matrix-org/synapse/issues/2617)) +- Log login requests. ([\#2618](https://github.com/matrix-org/synapse/issues/2618)) +- Always return `is_public` in the `/groups/:group_id/rooms` API. ([\#2630](https://github.com/matrix-org/synapse/issues/2630)) +- Avoid no-op media deletes. Thanks to @spantaleev! ([\#2637](https://github.com/matrix-org/synapse/issues/2637)) +- Fix various embarrassing typos around `user_directory` and add some doc. ([\#2643](https://github.com/matrix-org/synapse/issues/2643)) +- Return whether a user is an admin within a group. ([\#2647](https://github.com/matrix-org/synapse/issues/2647)) +- Namespace visibility options for groups. ([\#2657](https://github.com/matrix-org/synapse/issues/2657)) +- Downcase UserIDs on registration. ([\#2662](https://github.com/matrix-org/synapse/issues/2662)) +- Cache failures when fetching URL previews. ([\#2669](https://github.com/matrix-org/synapse/issues/2669)) Bug fixes: -- Fix port script (PR #2577) -- Fix error when running synapse with no logfile (PR #2581) -- Fix UI auth when deleting devices (PR #2591) -- Fix typo when checking if user is invited to group (PR #2599) -- Fix the port script to drop NUL values in all tables (PR #2611) -- Fix appservices being backlogged and not receiving new events due to a bug in `notify_interested_services` (PR #2631) Thanks to @xyzz! -- Fix updating rooms avatar/display name when modified by admin (PR #2636) Thanks to @farialima! -- Fix bug in state group storage (PR #2649) -- Fix 500 on invalid utf-8 in request (PR #2663) +- Fix port script. ([\#2577](https://github.com/matrix-org/synapse/issues/2577)) +- Fix error when running synapse with no logfile. ([\#2581](https://github.com/matrix-org/synapse/issues/2581)) +- Fix UI auth when deleting devices. ([\#2591](https://github.com/matrix-org/synapse/issues/2591)) +- Fix typo when checking if user is invited to group. ([\#2599](https://github.com/matrix-org/synapse/issues/2599)) +- Fix the port script to drop NUL values in all tables. ([\#2611](https://github.com/matrix-org/synapse/issues/2611)) +- Fix appservices being backlogged and not receiving new events due to a bug in `notify_interested_services`. Thanks to @xyzz! ([\#2631](https://github.com/matrix-org/synapse/issues/2631)) +- Fix updating rooms avatar/display name when modified by admin. Thanks to @farialima! ([\#2636](https://github.com/matrix-org/synapse/issues/2636)) +- Fix bug in state group storage. ([\#2649](https://github.com/matrix-org/synapse/issues/2649)) +- Fix 500 on invalid utf-8 in request. ([\#2663](https://github.com/matrix-org/synapse/issues/2663)) Changes in synapse v0.24.1 (2017-10-24) ======================================= Bug fixes: -- Fix updating group profiles over federation (PR #2567) +- Fix updating group profiles over federation. ([\#2567](https://github.com/matrix-org/synapse/issues/2567)) Changes in synapse v0.24.0 (2017-10-23) ======================================= @@ -1783,31 +1783,31 @@ Changes in synapse v0.24.0-rc1 (2017-10-19) Features: -- Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426, #2430, #2454, #2471, #2472, #2544) -- Add support for channel notifications (PR #2501) -- Add basic implementation of backup media store (PR #2538) -- Add config option to auto-join new users to rooms (PR #2545) +- Add Group Server ([\#2352](https://github.com/matrix-org/synapse/issues/2352), [\#2363](https://github.com/matrix-org/synapse/issues/2363), [\#2374](https://github.com/matrix-org/synapse/issues/2374), [\#2377](https://github.com/matrix-org/synapse/issues/2377), [\#2378](https://github.com/matrix-org/synapse/issues/2378), [\#2382](https://github.com/matrix-org/synapse/issues/2382), [\#2410](https://github.com/matrix-org/synapse/issues/2410), [\#2426](https://github.com/matrix-org/synapse/issues/2426), [\#2430](https://github.com/matrix-org/synapse/issues/2430), [\#2454](https://github.com/matrix-org/synapse/issues/2454), [\#2471](https://github.com/matrix-org/synapse/issues/2471), [\#2472](https://github.com/matrix-org/synapse/issues/2472), [\#2544](https://github.com/matrix-org/synapse/issues/2544)) +- Add support for channel notifications. ([\#2501](https://github.com/matrix-org/synapse/issues/2501)) +- Add basic implementation of backup media store. ([\#2538](https://github.com/matrix-org/synapse/issues/2538)) +- Add config option to auto-join new users to rooms. ([\#2545](https://github.com/matrix-org/synapse/issues/2545)) Changes: -- Make the spam checker a module (PR #2474) -- Delete expired url cache data (PR #2478) -- Ignore incoming events for rooms that we have left (PR #2490) -- Allow spam checker to reject invites too (PR #2492) -- Add room creation checks to spam checker (PR #2495) -- Spam checking: add the invitee to `user_may_invite` (PR #2502) -- Process events from federation for different rooms in parallel (PR #2520) -- Allow error strings from spam checker (PR #2531) -- Improve error handling for missing files in config (PR #2551) +- Make the spam checker a module. ([\#2474](https://github.com/matrix-org/synapse/issues/2474)) +- Delete expired url cache data. ([\#2478](https://github.com/matrix-org/synapse/issues/2478)) +- Ignore incoming events for rooms that we have left. ([\#2490](https://github.com/matrix-org/synapse/issues/2490)) +- Allow spam checker to reject invites too. ([\#2492](https://github.com/matrix-org/synapse/issues/2492)) +- Add room creation checks to spam checker. ([\#2495](https://github.com/matrix-org/synapse/issues/2495)) +- Spam checking: add the invitee to `user_may_invite`. ([\#2502](https://github.com/matrix-org/synapse/issues/2502)) +- Process events from federation for different rooms in parallel. ([\#2520](https://github.com/matrix-org/synapse/issues/2520)) +- Allow error strings from spam checker. ([\#2531](https://github.com/matrix-org/synapse/issues/2531)) +- Improve error handling for missing files in config. ([\#2551](https://github.com/matrix-org/synapse/issues/2551)) Bug fixes: -- Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477) -- Fix incompatibility with newer versions of ujson (PR #2483) Thanks to @jeremycline! -- Fix notification keywords that start/end with non-word chars (PR #2500) -- Fix stack overflow and logcontexts from linearizer (PR #2532) -- Fix 500 error when fields missing from `power_levels` event (PR #2552) -- Fix 500 error when we get an error handling a PDU (PR #2553) +- Fix handling SERVFAILs when doing AAAA lookups for federation. ([\#2477](https://github.com/matrix-org/synapse/issues/2477)) +- Fix incompatibility with newer versions of ujson. Thanks to @jeremycline! ([\#2483](https://github.com/matrix-org/synapse/issues/2483)) +- Fix notification keywords that start/end with non-word chars. ([\#2500](https://github.com/matrix-org/synapse/issues/2500)) +- Fix stack overflow and logcontexts from linearizer. ([\#2532](https://github.com/matrix-org/synapse/issues/2532)) +- Fix 500 error when fields missing from `power_levels` event. ([\#2552](https://github.com/matrix-org/synapse/issues/2552)) +- Fix 500 error when we get an error handling a PDU. ([\#2553](https://github.com/matrix-org/synapse/issues/2553)) Changes in synapse v0.23.1 (2017-10-02) ======================================= @@ -1826,42 +1826,42 @@ Changes in synapse v0.23.0-rc2 (2017-09-26) Bug fixes: -- Fix regression in performance of syncs (PR #2470) +- Fix regression in performance of syncs. ([\#2470](https://github.com/matrix-org/synapse/issues/2470)) Changes in synapse v0.23.0-rc1 (2017-09-25) =========================================== Features: -- Add a frontend proxy worker (PR #2344) -- Add support for `event_id_only` push format (PR #2450) -- Add a PoC for filtering spammy events (PR #2456) -- Add a config option to block all room invites (PR #2457) +- Add a frontend proxy worker. ([\#2344](https://github.com/matrix-org/synapse/issues/2344)) +- Add support for `event_id_only` push format. ([\#2450](https://github.com/matrix-org/synapse/issues/2450)) +- Add a PoC for filtering spammy events. ([\#2456](https://github.com/matrix-org/synapse/issues/2456)) +- Add a config option to block all room invites. ([\#2457](https://github.com/matrix-org/synapse/issues/2457)) Changes: -- Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias! -- Improve performance of generating push notifications (PR #2343, #2357, #2365, #2366, #2371) -- Improve DB performance for device list handling in sync (PR #2362) -- Include a sample prometheus config (PR #2416) -- Document known to work postgres version (PR #2433) Thanks to @ptman! +- Use bcrypt module instead of py-bcrypt. Thanks to @kyrias! ([\#2288](https://github.com/matrix-org/synapse/issues/2288)) +- Improve performance of generating push notifications. ([\#2343](https://github.com/matrix-org/synapse/issues/2343), [\#2357](https://github.com/matrix-org/synapse/issues/2357), [\#2365](https://github.com/matrix-org/synapse/issues/2365), [\#2366](https://github.com/matrix-org/synapse/issues/2366), [\#2371](https://github.com/matrix-org/synapse/issues/2371)) +- Improve DB performance for device list handling in sync. ([\#2362](https://github.com/matrix-org/synapse/issues/2362)) +- Include a sample prometheus config. ([\#2416](https://github.com/matrix-org/synapse/issues/2416)) +- Document known to work postgres version. Thanks to @ptman! ([\#2433](https://github.com/matrix-org/synapse/issues/2433)) Bug fixes: -- Fix caching error in the push evaluator (PR #2332) -- Fix bug where pusherpool didn't start and broke some rooms (PR #2342) -- Fix port script for user directory tables (PR #2375) -- Fix device lists notifications when user rejoins a room (PR #2443, #2449) -- Fix sync to always send down current state events in timeline (PR #2451) -- Fix bug where guest users were incorrectly kicked (PR #2453) -- Fix bug talking to IPv6 only servers using SRV records (PR #2462) +- Fix caching error in the push evaluator. ([\#2332](https://github.com/matrix-org/synapse/issues/2332)) +- Fix bug where pusherpool didn't start and broke some rooms. ([\#2342](https://github.com/matrix-org/synapse/issues/2342)) +- Fix port script for user directory tables. ([\#2375](https://github.com/matrix-org/synapse/issues/2375)) +- Fix device lists notifications when user rejoins a room. ([\#2443](https://github.com/matrix-org/synapse/issues/2443), [\#2449](https://github.com/matrix-org/synapse/issues/2449)) +- Fix sync to always send down current state events in timeline. ([\#2451](https://github.com/matrix-org/synapse/issues/2451)) +- Fix bug where guest users were incorrectly kicked. ([\#2453](https://github.com/matrix-org/synapse/issues/2453)) +- Fix bug talking to IPv6 only servers using SRV records. ([\#2462](https://github.com/matrix-org/synapse/issues/2462)) Changes in synapse v0.22.1 (2017-07-06) ======================================= Bug fixes: -- Fix bug where pusher pool didn't start and caused issues when interacting with some rooms (PR #2342) +- Fix bug where pusher pool didn't start and caused issues when interacting with some rooms. ([\#2342](https://github.com/matrix-org/synapse/issues/2342)) Changes in synapse v0.22.0 (2017-07-06) ======================================= @@ -1873,49 +1873,49 @@ Changes in synapse v0.22.0-rc2 (2017-07-04) Changes: -- Improve performance of storing user IPs (PR #2307, #2308) -- Slightly improve performance of verifying access tokens (PR #2320) -- Slightly improve performance of event persistence (PR #2321) -- Increase default cache factor size from 0.1 to 0.5 (PR #2330) +- Improve performance of storing user IPs. ([\#2307](https://github.com/matrix-org/synapse/issues/2307), [\#2308](https://github.com/matrix-org/synapse/issues/2308)) +- Slightly improve performance of verifying access tokens. ([\#2320](https://github.com/matrix-org/synapse/issues/2320)) +- Slightly improve performance of event persistence. ([\#2321](https://github.com/matrix-org/synapse/issues/2321)) +- Increase default cache factor size from 0.1 to 0.5. ([\#2330](https://github.com/matrix-org/synapse/issues/2330)) Bug fixes: -- Fix bug with storing registration sessions that caused frequent CPU churn (PR #2319) +- Fix bug with storing registration sessions that caused frequent CPU churn. ([\#2319](https://github.com/matrix-org/synapse/issues/2319)) Changes in synapse v0.22.0-rc1 (2017-06-26) =========================================== Features: -- Add a user directory API (PR #2252, and many more) -- Add shutdown room API to remove room from local server (PR #2291) -- Add API to quarantine media (PR #2292) -- Add new config option to not send event contents to push servers (PR #2301) Thanks to @cjdelisle! +- Add a user directory API ([\#2252](https://github.com/matrix-org/synapse/issues/2252), and many more) +- Add shutdown room API to remove room from local server. ([\#2291](https://github.com/matrix-org/synapse/issues/2291)) +- Add API to quarantine media. ([\#2292](https://github.com/matrix-org/synapse/issues/2292)) +- Add new config option to not send event contents to push servers. Thanks to @cjdelisle! ([\#2301](https://github.com/matrix-org/synapse/issues/2301)) Changes: -- Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256, #2274) -- Deduplicate sync filters (PR #2219) Thanks to @krombel! -- Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist! -- Add count of one time keys to sync stream (PR #2237) -- Only store `event_auth` for state events (PR #2247) -- Store URL cache preview downloads separately (PR #2299) +- Various performance fixes. ([\#2177](https://github.com/matrix-org/synapse/issues/2177), [\#2233](https://github.com/matrix-org/synapse/issues/2233), [\#2230](https://github.com/matrix-org/synapse/issues/2230), [\#2238](https://github.com/matrix-org/synapse/issues/2238), [\#2248](https://github.com/matrix-org/synapse/issues/2248), [\#2256](https://github.com/matrix-org/synapse/issues/2256), [\#2274](https://github.com/matrix-org/synapse/issues/2274)) +- Deduplicate sync filters. Thanks to @krombel! ([\#2219](https://github.com/matrix-org/synapse/issues/2219)) +- Correct a typo in UPGRADE.rst. Thanks to @aaronraimist! ([\#2231](https://github.com/matrix-org/synapse/issues/2231)) +- Add count of one time keys to sync stream. ([\#2237](https://github.com/matrix-org/synapse/issues/2237)) +- Only store `event_auth` for state events. ([\#2247](https://github.com/matrix-org/synapse/issues/2247)) +- Store URL cache preview downloads separately. ([\#2299](https://github.com/matrix-org/synapse/issues/2299)) Bug fixes: -- Fix users not getting notifications when AS listened to that `user_id` (PR #2216) Thanks to @slipeer! -- Fix users without push set up not getting notifications after joining rooms (PR #2236) -- Fix preview url API to trim long descriptions (PR #2243) -- Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart (PR #2263) -- Fix removing of pushers when using workers (PR #2267) -- Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel! +- Fix users not getting notifications when AS listened to that `user_id`. Thanks to @slipeer! ([\#2216](https://github.com/matrix-org/synapse/issues/2216)) +- Fix users without push set up not getting notifications after joining rooms. ([\#2236](https://github.com/matrix-org/synapse/issues/2236)) +- Fix preview url API to trim long descriptions. ([\#2243](https://github.com/matrix-org/synapse/issues/2243)) +- Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart. ([\#2263](https://github.com/matrix-org/synapse/issues/2263)) +- Fix removing of pushers when using workers. ([\#2267](https://github.com/matrix-org/synapse/issues/2267)) +- Fix CORS headers to allow Authorization header. Thanks to @krombel! ([\#2285](https://github.com/matrix-org/synapse/issues/2285)) Changes in synapse v0.21.1 (2017-06-15) ======================================= Bug fixes: -- Fix bug in anonymous usage statistic reporting (PR #2281) +- Fix bug in anonymous usage statistic reporting. ([\#2281](https://github.com/matrix-org/synapse/issues/2281)) Changes in synapse v0.21.0 (2017-05-18) ======================================= @@ -1927,116 +1927,116 @@ Changes in synapse v0.21.0-rc3 (2017-05-17) Features: -- Add per user rate-limiting overrides (PR #2208) -- Add config option to limit maximum number of events requested by `/sync` and `/messages` (PR #2221) Thanks to @psaavedra! +- Add per user rate-limiting overrides. ([\#2208](https://github.com/matrix-org/synapse/issues/2208)) +- Add config option to limit maximum number of events requested by `/sync` and `/messages`. Thanks to @psaavedra! ([\#2221](https://github.com/matrix-org/synapse/issues/2221)) Changes: -- Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228, #2229) -- Update username availability checker API (PR #2209, #2213) -- When purging, Don't de-delta state groups we're about to delete (PR #2214) -- Documentation to check synapse version (PR #2215) Thanks to @hamber-dick! -- Add an index to `event_search` to speed up purge history API (PR #2218) +- Various small performance fixes. ([\#2201](https://github.com/matrix-org/synapse/issues/2201), [\#2202](https://github.com/matrix-org/synapse/issues/2202), [\#2224](https://github.com/matrix-org/synapse/issues/2224), [\#2226](https://github.com/matrix-org/synapse/issues/2226), [\#2227](https://github.com/matrix-org/synapse/issues/2227), [\#2228](https://github.com/matrix-org/synapse/issues/2228), [\#2229](https://github.com/matrix-org/synapse/issues/2229)) +- Update username availability checker API. ([\#2209](https://github.com/matrix-org/synapse/issues/2209), [\#2213](https://github.com/matrix-org/synapse/issues/2213)) +- When purging, Don't de-delta state groups we're about to delete. ([\#2214](https://github.com/matrix-org/synapse/issues/2214)) +- Documentation to check synapse version. Thanks to @hamber-dick! ([\#2215](https://github.com/matrix-org/synapse/issues/2215)) +- Add an index to `event_search` to speed up purge history API. ([\#2218](https://github.com/matrix-org/synapse/issues/2218)) Bug fixes: -- Fix API to allow clients to upload one-time-keys with new sigs (PR #2206) +- Fix API to allow clients to upload one-time-keys with new sigs. ([\#2206](https://github.com/matrix-org/synapse/issues/2206)) Changes in synapse v0.21.0-rc2 (2017-05-08) =========================================== Changes: -- Always mark remotes as up if we receive a signed request from them (PR #2190) +- Always mark remotes as up if we receive a signed request from them. ([\#2190](https://github.com/matrix-org/synapse/issues/2190)) Bug fixes: -- Fix bug where users got pushed for rooms they had muted (PR #2200) +- Fix bug where users got pushed for rooms they had muted. ([\#2200](https://github.com/matrix-org/synapse/issues/2200)) Changes in synapse v0.21.0-rc1 (2017-05-08) =========================================== Features: -- Add username availability checker API (PR #2183) -- Add read marker API (PR #2120) +- Add username availability checker API. ([\#2183](https://github.com/matrix-org/synapse/issues/2183)) +- Add read marker API. ([\#2120](https://github.com/matrix-org/synapse/issues/2120)) Changes: -- Enable guest access for the 3pl/3pid APIs (PR #1986) -- Add setting to support TURN for guests (PR #2011) -- Various performance improvements (PR #2075, #2076, #2080, #2083, #2108, #2158, #2176, #2185) -- Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat! -- Replace HTTP replication with TCP replication (PR #2082, #2097, #2098, #2099, #2103, #2014, #2016, #2115, #2116, #2117) -- Support authenticated SMTP (PR #2102) Thanks @DanielDent! -- Add a counter metric for successfully-sent transactions (PR #2121) -- Propagate errors sensibly from proxied IS requests (PR #2147) -- Add more granular event send metrics (PR #2178) +- Enable guest access for the 3pl/3pid APIs. ([\#1986](https://github.com/matrix-org/synapse/issues/1986)) +- Add setting to support TURN for guests. ([\#2011](https://github.com/matrix-org/synapse/issues/2011)) +- Various performance improvements. ([\#2075](https://github.com/matrix-org/synapse/issues/2075), [\#2076](https://github.com/matrix-org/synapse/issues/2076), [\#2080](https://github.com/matrix-org/synapse/issues/2080), [\#2083](https://github.com/matrix-org/synapse/issues/2083), [\#2108](https://github.com/matrix-org/synapse/issues/2108), [\#2158](https://github.com/matrix-org/synapse/issues/2158), [\#2176](https://github.com/matrix-org/synapse/issues/2176), [\#2185](https://github.com/matrix-org/synapse/issues/2185)) +- Make synctl a bit more user friendly. ([\#2078](https://github.com/matrix-org/synapse/issues/2078), [\#2127](https://github.com/matrix-org/synapse/issues/2127)) Thanks @APwhitehat! +- Replace HTTP replication with TCP replication. ([\#2082](https://github.com/matrix-org/synapse/issues/2082), [\#2097](https://github.com/matrix-org/synapse/issues/2097), [\#2098](https://github.com/matrix-org/synapse/issues/2098), [\#2099](https://github.com/matrix-org/synapse/issues/2099), [\#2103](https://github.com/matrix-org/synapse/issues/2103), [\#2014](https://github.com/matrix-org/synapse/issues/2014), [\#2016](https://github.com/matrix-org/synapse/issues/2016), [\#2115](https://github.com/matrix-org/synapse/issues/2115), [\#2116](https://github.com/matrix-org/synapse/issues/2116), [\#2117](https://github.com/matrix-org/synapse/issues/2117)) +- Support authenticated SMTP. Thanks @DanielDent! ([\#2102](https://github.com/matrix-org/synapse/issues/2102)) +- Add a counter metric for successfully-sent transactions. ([\#2121](https://github.com/matrix-org/synapse/issues/2121)) +- Propagate errors sensibly from proxied IS requests. ([\#2147](https://github.com/matrix-org/synapse/issues/2147)) +- Add more granular event send metrics. ([\#2178](https://github.com/matrix-org/synapse/issues/2178)) Bug fixes: -- Fix nuke-room script to work with current schema (PR #1927) Thanks @zuckschwerdt! -- Fix db port script to not assume postgres tables are in the public schema (PR #2024) Thanks @jerrykan! -- Fix getting latest device IP for user with no devices (PR #2118) -- Fix rejection of invites to unreachable servers (PR #2145) -- Fix code for reporting old verify keys in synapse (PR #2156) -- Fix invite state to always include all events (PR #2163) -- Fix bug where synapse would always fetch state for any missing event (PR #2170) -- Fix a leak with timed out HTTP connections (PR #2180) -- Fix bug where we didn't time out HTTP requests to ASes (PR #2192) +- Fix nuke-room script to work with current schema. Thanks @zuckschwerdt! ([\#1927](https://github.com/matrix-org/synapse/issues/1927)) +- Fix db port script to not assume postgres tables are in the public schema. Thanks @jerrykan! ([\#2024](https://github.com/matrix-org/synapse/issues/2024)) +- Fix getting latest device IP for user with no devices. ([\#2118](https://github.com/matrix-org/synapse/issues/2118)) +- Fix rejection of invites to unreachable servers. ([\#2145](https://github.com/matrix-org/synapse/issues/2145)) +- Fix code for reporting old verify keys in synapse. ([\#2156](https://github.com/matrix-org/synapse/issues/2156)) +- Fix invite state to always include all events. ([\#2163](https://github.com/matrix-org/synapse/issues/2163)) +- Fix bug where synapse would always fetch state for any missing event. ([\#2170](https://github.com/matrix-org/synapse/issues/2170)) +- Fix a leak with timed out HTTP connections. ([\#2180](https://github.com/matrix-org/synapse/issues/2180)) +- Fix bug where we didn't time out HTTP requests to ASes. ([\#2192](https://github.com/matrix-org/synapse/issues/2192)) Docs: -- Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau! -- Fix typo in synctl help (PR #2107) Thanks @HarHarLinks! -- `web_client_location` documentation fix (PR #2131) Thanks @matthewjwolff! -- Update README.rst with FreeBSD changes (PR #2132) Thanks @feld! -- Clarify setting up metrics (PR #2149) Thanks @encks! +- Clarify doc for SQLite to PostgreSQL port. Thanks @benhylau! ([\#1961](https://github.com/matrix-org/synapse/issues/1961)) +- Fix typo in synctl help. Thanks @HarHarLinks! ([\#2107](https://github.com/matrix-org/synapse/issues/2107)) +- `web_client_location` documentation fix. Thanks @matthewjwolff! ([\#2131](https://github.com/matrix-org/synapse/issues/2131)) +- Update README.rst with FreeBSD changes. Thanks @feld! ([\#2132](https://github.com/matrix-org/synapse/issues/2132)) +- Clarify setting up metrics. Thanks @encks! ([\#2149](https://github.com/matrix-org/synapse/issues/2149)) Changes in synapse v0.20.0 (2017-04-11) ======================================= Bug fixes: -- Fix joining rooms over federation where not all servers in the room saw the new server had joined (PR #2094) +- Fix joining rooms over federation where not all servers in the room saw the new server had joined. ([\#2094](https://github.com/matrix-org/synapse/issues/2094)) Changes in synapse v0.20.0-rc1 (2017-03-30) =========================================== Features: -- Add `delete_devices` API (PR #1993) -- Add phone number registration/login support (PR #1994, #2055) +- Add `delete_devices` API. ([\#1993](https://github.com/matrix-org/synapse/issues/1993)) +- Add phone number registration/login support. ([\#1994](https://github.com/matrix-org/synapse/issues/1994), [\#2055](https://github.com/matrix-org/synapse/issues/2055)) Changes: -- Use JSONSchema for validation of filters. Thanks @pik! (PR #1783) -- Reread log config on SIGHUP (PR #1982) -- Speed up public room list (PR #1989) -- Add helpful texts to logger config options (PR #1990) -- Minor `/sync` performance improvements. (PR #2002, #2013, #2022) -- Add some debug to help diagnose weird federation issue (PR #2035) -- Correctly limit retries for all federation requests (PR #2050, #2061) -- Don't lock table when persisting new one time keys (PR #2053) -- Reduce some CPU work on DB threads (PR #2054) -- Cache hosts in room (PR #2060) -- Batch sending of device list pokes (PR #2063) -- Speed up persist event path in certain edge cases (PR #2070) +- Use JSONSchema for validation of filters. Thanks @pik! ([\#1783](https://github.com/matrix-org/synapse/issues/1783)) +- Reread log config on SIGHUP. ([\#1982](https://github.com/matrix-org/synapse/issues/1982)) +- Speed up public room list. ([\#1989](https://github.com/matrix-org/synapse/issues/1989)) +- Add helpful texts to logger config options. ([\#1990](https://github.com/matrix-org/synapse/issues/1990)) +- Minor `/sync` performance improvements. ([\#2002](https://github.com/matrix-org/synapse/issues/2002), [\#2013](https://github.com/matrix-org/synapse/issues/2013), [\#2022](https://github.com/matrix-org/synapse/issues/2022)) +- Add some debug to help diagnose weird federation issue. ([\#2035](https://github.com/matrix-org/synapse/issues/2035)) +- Correctly limit retries for all federation requests. ([\#2050](https://github.com/matrix-org/synapse/issues/2050), [\#2061](https://github.com/matrix-org/synapse/issues/2061)) +- Don't lock table when persisting new one time keys. ([\#2053](https://github.com/matrix-org/synapse/issues/2053)) +- Reduce some CPU work on DB threads. ([\#2054](https://github.com/matrix-org/synapse/issues/2054)) +- Cache hosts in room. ([\#2060](https://github.com/matrix-org/synapse/issues/2060)) +- Batch sending of device list pokes. ([\#2063](https://github.com/matrix-org/synapse/issues/2063)) +- Speed up persist event path in certain edge cases. ([\#2070](https://github.com/matrix-org/synapse/issues/2070)) Bug fixes: -- Fix bug where `current_state_events` renamed to `current_state_ids` (PR #1849) -- Fix routing loop when fetching remote media (PR #1992) -- Fix `current_state_events` table to not lie (PR #1996) -- Fix CAS login to handle PartialDownloadError (PR #1997) -- Fix assertion to stop transaction queue getting wedged (PR #2010) -- Fix presence to fallback to `last_active_ts` if it beats the last sync time. Thanks @Half-Shot! (PR #2014) -- Fix bug when federation received a PDU while a room join is in progress (PR #2016) -- Fix resetting state on rejected events (PR #2025) -- Fix installation issues in readme. Thanks @ricco386 (PR #2037) -- Fix caching of remote servers' signature keys (PR #2042) -- Fix some leaking log context (PR #2048, #2049, #2057, #2058) -- Fix rejection of invites not reaching sync (PR #2056) +- Fix bug where `current_state_events` renamed to `current_state_ids`. ([\#1849](https://github.com/matrix-org/synapse/issues/1849)) +- Fix routing loop when fetching remote media. ([\#1992](https://github.com/matrix-org/synapse/issues/1992)) +- Fix `current_state_events` table to not lie. ([\#1996](https://github.com/matrix-org/synapse/issues/1996)) +- Fix CAS login to handle PartialDownloadError. ([\#1997](https://github.com/matrix-org/synapse/issues/1997)) +- Fix assertion to stop transaction queue getting wedged. ([\#2010](https://github.com/matrix-org/synapse/issues/2010)) +- Fix presence to fallback to `last_active_ts` if it beats the last sync time. Thanks @Half-Shot! ([\#2014](https://github.com/matrix-org/synapse/issues/2014)) +- Fix bug when federation received a PDU while a room join is in progress. ([\#2016](https://github.com/matrix-org/synapse/issues/2016)) +- Fix resetting state on rejected events. ([\#2025](https://github.com/matrix-org/synapse/issues/2025)) +- Fix installation issues in readme. Thanks @ricco386. ([\#2037](https://github.com/matrix-org/synapse/issues/2037)) +- Fix caching of remote servers' signature keys. ([\#2042](https://github.com/matrix-org/synapse/issues/2042)) +- Fix some leaking log context. ([\#2048](https://github.com/matrix-org/synapse/issues/2048), [\#2049](https://github.com/matrix-org/synapse/issues/2049), [\#2057](https://github.com/matrix-org/synapse/issues/2057), [\#2058](https://github.com/matrix-org/synapse/issues/2058)) +- Fix rejection of invites not reaching sync. ([\#2056](https://github.com/matrix-org/synapse/issues/2056)) Changes in synapse v0.19.3 (2017-03-20) ======================================= @@ -2055,36 +2055,36 @@ Changes in synapse v0.19.3-rc1 (2017-03-08) Features: -- Add some administration functionalities. Thanks to morteza-araby! (PR #1784) +- Add some administration functionalities. Thanks to morteza-araby! ([\#1784](https://github.com/matrix-org/synapse/issues/1784)) Changes: -- Reduce database table sizes (PR #1873, #1916, #1923, #1963) -- Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907) -- Don't fetch current state when sending an event in common case (PR #1955) +- Reduce database table sizes. ([\#1873](https://github.com/matrix-org/synapse/issues/1873), [\#1916](https://github.com/matrix-org/synapse/issues/1916), [\#1923](https://github.com/matrix-org/synapse/issues/1923), [\#1963](https://github.com/matrix-org/synapse/issues/1963)) +- Update contrib/ to not use syutil. Thanks to andrewshadura! ([\#1907](https://github.com/matrix-org/synapse/issues/1907)) +- Don't fetch current state when sending an event in common case. ([\#1955](https://github.com/matrix-org/synapse/issues/1955)) Bug fixes: -- Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904) -- Fix caching to not cache error responses (PR #1913) -- Fix APIs to make kick & ban reasons work (PR #1917) -- Fix bugs in the /keys/changes api (PR #1921) -- Fix bug where users couldn't forget rooms they were banned from (PR #1922) -- Fix issue with long language values in pushers API (PR #1925) -- Fix a race in transaction queue (PR #1930) -- Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR #1945) -- Fix device list update to not constantly resync (PR #1964) -- Fix potential for huge memory usage when getting device that have changed (PR #1969) +- Fix synapse_port_db failure. Thanks to Pneumaticat! ([\#1904](https://github.com/matrix-org/synapse/issues/1904)) +- Fix caching to not cache error responses. ([\#1913](https://github.com/matrix-org/synapse/issues/1913)) +- Fix APIs to make kick & ban reasons work. ([\#1917](https://github.com/matrix-org/synapse/issues/1917)) +- Fix bugs in the /keys/changes api. ([\#1921](https://github.com/matrix-org/synapse/issues/1921)) +- Fix bug where users couldn't forget rooms they were banned from. ([\#1922](https://github.com/matrix-org/synapse/issues/1922)) +- Fix issue with long language values in pushers API. ([\#1925](https://github.com/matrix-org/synapse/issues/1925)) +- Fix a race in transaction queue. ([\#1930](https://github.com/matrix-org/synapse/issues/1930)) +- Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! ([\#1945](https://github.com/matrix-org/synapse/issues/1945)) +- Fix device list update to not constantly resync. ([\#1964](https://github.com/matrix-org/synapse/issues/1964)) +- Fix potential for huge memory usage when getting device that have changed. ([\#1969](https://github.com/matrix-org/synapse/issues/1969)) Changes in synapse v0.19.2 (2017-02-20) ======================================= -- Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for pointing it out! (PR #1929) +- Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for pointing it out! ([\#1929](https://github.com/matrix-org/synapse/issues/1929)) Changes in synapse v0.19.1 (2017-02-09) ======================================= -- Fix bug where state was incorrectly reset in a room when synapse received an event over federation that did not pass auth checks (PR #1892) +- Fix bug where state was incorrectly reset in a room when synapse received an event over federation that did not pass auth checks. ([\#1892](https://github.com/matrix-org/synapse/issues/1892)) Changes in synapse v0.19.0 (2017-02-04) ======================================= @@ -2094,59 +2094,59 @@ No changes since RC 4. Changes in synapse v0.19.0-rc4 (2017-02-02) =========================================== -- Bump cache sizes for common membership queries (PR #1879) +- Bump cache sizes for common membership queries. ([\#1879](https://github.com/matrix-org/synapse/issues/1879)) Changes in synapse v0.19.0-rc3 (2017-02-02) =========================================== -- Fix email push in pusher worker (PR #1875) -- Make `presence.get_new_events` a bit faster (PR #1876) -- Make /keys/changes a bit more performant (PR #1877) +- Fix email push in pusher worker. ([\#1875](https://github.com/matrix-org/synapse/issues/1875)) +- Make `presence.get_new_events` a bit faster. ([\#1876](https://github.com/matrix-org/synapse/issues/1876)) +- Make /keys/changes a bit more performant. ([\#1877](https://github.com/matrix-org/synapse/issues/1877)) Changes in synapse v0.19.0-rc2 (2017-02-02) =========================================== -- Include newly joined users in /keys/changes API (PR #1872) +- Include newly joined users in /keys/changes API. ([\#1872](https://github.com/matrix-org/synapse/issues/1872)) Changes in synapse v0.19.0-rc1 (2017-02-02) =========================================== Features: -- Add support for specifying multiple bind addresses (PR #1709, #1712, #1795, #1835). Thanks to @kyrias! -- Add /account/3pid/delete endpoint (PR #1714) -- Add config option to configure the Riot URL used in notification emails (PR #1811). Thanks to @aperezdc! -- Add username and password config options for turn server (PR #1832). Thanks to @xsteadfastx! -- Implement device lists updates over federation (PR #1857, #1861, #1864) -- Implement /keys/changes (PR #1869, #1872) +- Add support for specifying multiple bind addresses. Thanks to @kyrias! ([\#1709](https://github.com/matrix-org/synapse/issues/1709), [\#1712](https://github.com/matrix-org/synapse/issues/1712), [\#1795](https://github.com/matrix-org/synapse/issues/1795), [\#1835](https://github.com/matrix-org/synapse/issues/1835)) +- Add /account/3pid/delete endpoint. ([\#1714](https://github.com/matrix-org/synapse/issues/1714)) +- Add config option to configure the Riot URL used in notification emails. Thanks to @aperezdc! ([\#1811](https://github.com/matrix-org/synapse/issues/1811)) +- Add username and password config options for turn server. Thanks to @xsteadfastx! ([\#1832](https://github.com/matrix-org/synapse/issues/1832)) +- Implement device lists updates over federation. ([\#1857](https://github.com/matrix-org/synapse/issues/1857), [\#1861](https://github.com/matrix-org/synapse/issues/1861), [\#1864](https://github.com/matrix-org/synapse/issues/1864)) +- Implement /keys/changes. ([\#1869](https://github.com/matrix-org/synapse/issues/1869), [\#1872](https://github.com/matrix-org/synapse/issues/1872)) Changes: -- Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph! -- Log which files we saved attachments to in the `media_repository` (PR #1791) -- Linearize updates to membership via PUT /state/ to better handle multiple joins (PR #1787) -- Limit number of entries to prefill from cache on startup (PR #1792) -- Remove `full_twisted_stacktraces` option (PR #1802) -- Measure size of some caches by sum of the size of cached values (PR #1815) -- Measure metrics of `string_cache` (PR #1821) -- Reduce logging verbosity (PR #1822, #1823, #1824) -- Don't clobber a displayname or `avatar_url` if provided by an m.room.member event (PR #1852) -- Better handle 401/404 response for federation /send/ (PR #1866, #1871) +- Improve IPv6 support. Thanks to @kyrias and @glyph! ([\#1696](https://github.com/matrix-org/synapse/issues/1696)) +- Log which files we saved attachments to in the `media_repository`. ([\#1791](https://github.com/matrix-org/synapse/issues/1791)) +- Linearize updates to membership via PUT /state/ to better handle multiple joins. ([\#1787](https://github.com/matrix-org/synapse/issues/1787)) +- Limit number of entries to prefill from cache on startup. ([\#1792](https://github.com/matrix-org/synapse/issues/1792)) +- Remove `full_twisted_stacktraces` option. ([\#1802](https://github.com/matrix-org/synapse/issues/1802)) +- Measure size of some caches by sum of the size of cached values. ([\#1815](https://github.com/matrix-org/synapse/issues/1815)) +- Measure metrics of `string_cache`. ([\#1821](https://github.com/matrix-org/synapse/issues/1821)) +- Reduce logging verbosity. ([\#1822](https://github.com/matrix-org/synapse/issues/1822), [\#1823](https://github.com/matrix-org/synapse/issues/1823), [\#1824](https://github.com/matrix-org/synapse/issues/1824)) +- Don't clobber a displayname or `avatar_url` if provided by an m.room.member event. ([\#1852](https://github.com/matrix-org/synapse/issues/1852)) +- Better handle 401/404 response for federation /send/. ([\#1866](https://github.com/matrix-org/synapse/issues/1866), [\#1871](https://github.com/matrix-org/synapse/issues/1871)) Fixes: -- Fix ability to change password to a non-ascii one (PR #1711) -- Fix push getting stuck due to looking at the wrong view of state (PR #1820) -- Fix email address comparison to be case insensitive (PR #1827) -- Fix occasional inconsistencies of room membership (PR #1836, #1840) +- Fix ability to change password to a non-ascii one. ([\#1711](https://github.com/matrix-org/synapse/issues/1711)) +- Fix push getting stuck due to looking at the wrong view of state. ([\#1820](https://github.com/matrix-org/synapse/issues/1820)) +- Fix email address comparison to be case insensitive. ([\#1827](https://github.com/matrix-org/synapse/issues/1827)) +- Fix occasional inconsistencies of room membership. ([\#1836](https://github.com/matrix-org/synapse/issues/1836), [\#1840](https://github.com/matrix-org/synapse/issues/1840)) Performance: -- Don't block messages sending on bumping presence (PR #1789) -- Change `device_inbox` stream index to include user (PR #1793) -- Optimise state resolution (PR #1818) -- Use DB cache of joined users for presence (PR #1862) -- Add an index to make membership queries faster (PR #1867) +- Don't block messages sending on bumping presence. ([\#1789](https://github.com/matrix-org/synapse/issues/1789)) +- Change `device_inbox` stream index to include user. ([\#1793](https://github.com/matrix-org/synapse/issues/1793)) +- Optimise state resolution. ([\#1818](https://github.com/matrix-org/synapse/issues/1818)) +- Use DB cache of joined users for presence. ([\#1862](https://github.com/matrix-org/synapse/issues/1862)) +- Add an index to make membership queries faster. ([\#1867](https://github.com/matrix-org/synapse/issues/1867)) Changes in synapse v0.18.7 (2017-01-09) ======================================= @@ -2165,79 +2165,79 @@ Changes in synapse v0.18.7-rc1 (2017-01-06) Bug fixes: -- Fix error in \#PR 1764 to actually fix the nightmare \#1753 bug. +- Fix error in [\#1764](https://github.com/matrix-org/synapse/issues/1764) to actually fix the nightmare [\#1753](https://github.com/matrix-org/synapse/issues/1753) bug. - Improve deadlock logging further -- Discard inbound federation traffic from invalid domains, to immunise against \#1753 +- Discard inbound federation traffic from invalid domains, to immunise against [\#1753](https://github.com/matrix-org/synapse/issues/1753). Changes in synapse v0.18.6 (2017-01-06) ======================================= Bug fixes: -- Fix bug when checking if a guest user is allowed to join a room (PR #1772) Thanks to Patrik Oldsberg for diagnosing and the fix! +- Fix bug when checking if a guest user is allowed to join a room. Thanks to Patrik Oldsberg for diagnosing and the fix! ([\#1772](https://github.com/matrix-org/synapse/issues/1772)) Changes in synapse v0.18.6-rc3 (2017-01-05) =========================================== Bug fixes: -- Fix bug where we failed to send ban events to the banned server (PR #1758) -- Fix bug where we sent event that didn't originate on this server to other servers (PR #1764) -- Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests (PR #1765, PR #1744) +- Fix bug where we failed to send ban events to the banned server. ([\#1758](https://github.com/matrix-org/synapse/issues/1758)) +- Fix bug where we sent event that didn't originate on this server to other servers. ([\#1764](https://github.com/matrix-org/synapse/issues/1764)) +- Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests. ([\#1765](https://github.com/matrix-org/synapse/issues/1765), [\#1744](https://github.com/matrix-org/synapse/issues/1744)) Changes: -- Improve logging for debugging deadlocks (PR #1766, PR #1767) +- Improve logging for debugging deadlocks. ([\#1766](https://github.com/matrix-org/synapse/issues/1766), [\#1767](https://github.com/matrix-org/synapse/issues/1767)) Changes in synapse v0.18.6-rc2 (2016-12-30) =========================================== Bug fixes: -- Fix memory leak in twisted by initialising logging correctly (PR #1731) -- Fix bug where fetching missing events took an unacceptable amount of time in large rooms (PR #1734) +- Fix memory leak in twisted by initialising logging correctly. ([\#1731](https://github.com/matrix-org/synapse/issues/1731)) +- Fix bug where fetching missing events took an unacceptable amount of time in large rooms. ([\#1734](https://github.com/matrix-org/synapse/issues/1734)) Changes in synapse v0.18.6-rc1 (2016-12-29) =========================================== Bug fixes: -- Make sure that outbound connections are closed (PR #1725) +- Make sure that outbound connections are closed. ([\#1725](https://github.com/matrix-org/synapse/issues/1725)) Changes in synapse v0.18.5 (2016-12-16) ======================================= Bug fixes: -- Fix federation /backfill returning events it shouldn't (PR #1700) -- Fix crash in url preview (PR #1701) +- Fix federation /backfill returning events it shouldn't. ([\#1700](https://github.com/matrix-org/synapse/issues/1700)) +- Fix crash in url preview. ([\#1701](https://github.com/matrix-org/synapse/issues/1701)) Changes in synapse v0.18.5-rc3 (2016-12-13) =========================================== Features: -- Add support for E2E for guests (PR #1653) -- Add new API appservice specific public room list (PR #1676) -- Add new room membership APIs (PR #1680) +- Add support for E2E for guests. ([\#1653](https://github.com/matrix-org/synapse/issues/1653)) +- Add new API appservice specific public room list. ([\#1676](https://github.com/matrix-org/synapse/issues/1676)) +- Add new room membership APIs. ([\#1680](https://github.com/matrix-org/synapse/issues/1680)) Changes: -- Enable guest access for private rooms by default (PR #653) -- Limit the number of events that can be created on a given room concurrently (PR #1620) -- Log the args that we have on UI auth completion (PR #1649) -- Stop generating `refresh_tokens` (PR #1654) -- Stop putting a time caveat on access tokens (PR #1656) -- Remove unspecced GET endpoints for e2e keys (PR #1694) +- Enable guest access for private rooms by default. ([\#653](https://github.com/matrix-org/synapse/issues/653)) +- Limit the number of events that can be created on a given room concurrently. ([\#1620](https://github.com/matrix-org/synapse/issues/1620)) +- Log the args that we have on UI auth completion. ([\#1649](https://github.com/matrix-org/synapse/issues/1649)) +- Stop generating `refresh_tokens`. ([\#1654](https://github.com/matrix-org/synapse/issues/1654)) +- Stop putting a time caveat on access tokens. ([\#1656](https://github.com/matrix-org/synapse/issues/1656)) +- Remove unspecced GET endpoints for e2e keys. ([\#1694](https://github.com/matrix-org/synapse/issues/1694)) Bug fixes: -- Fix handling of 500 and 429's over federation (PR #1650) -- Fix Content-Type header parsing (PR #1660) -- Fix error when previewing sites that include unicode, thanks to kyrias (PR #1664) -- Fix some cases where we drop read receipts (PR #1678) -- Fix bug where calls to `/sync` didn't correctly timeout (PR #1683) -- Fix bug where E2E key query would fail if a single remote host failed (PR #1686) +- Fix handling of 500 and 429's over federation. ([\#1650](https://github.com/matrix-org/synapse/issues/1650)) +- Fix Content-Type header parsing. ([\#1660](https://github.com/matrix-org/synapse/issues/1660)) +- Fix error when previewing sites that include unicode, thanks to kyrias. ([\#1664](https://github.com/matrix-org/synapse/issues/1664)) +- Fix some cases where we drop read receipts. ([\#1678](https://github.com/matrix-org/synapse/issues/1678)) +- Fix bug where calls to `/sync` didn't correctly timeout. ([\#1683](https://github.com/matrix-org/synapse/issues/1683)) +- Fix bug where E2E key query would fail if a single remote host failed. ([\#1686](https://github.com/matrix-org/synapse/issues/1686)) Changes in synapse v0.18.5-rc2 (2016-11-24) =========================================== @@ -2251,37 +2251,37 @@ Changes in synapse v0.18.5-rc1 (2016-11-24) Features: -- Implement `event_fields` in filters (PR #1638) +- Implement `event_fields` in filters. ([\#1638](https://github.com/matrix-org/synapse/issues/1638)) Changes: -- Use external ldap auth package (PR #1628) -- Split out federation transaction sending to a worker (PR #1635) -- Fail with a coherent error message if /sync?filter= is invalid (PR #1636) -- More efficient notif count queries (PR #1644) +- Use external ldap auth package. ([\#1628](https://github.com/matrix-org/synapse/issues/1628)) +- Split out federation transaction sending to a worker. ([\#1635](https://github.com/matrix-org/synapse/issues/1635)) +- Fail with a coherent error message if /sync?filter= is invalid. ([\#1636](https://github.com/matrix-org/synapse/issues/1636)) +- More efficient notif count queries. ([\#1644](https://github.com/matrix-org/synapse/issues/1644)) Changes in synapse v0.18.4 (2016-11-22) ======================================= Bug fixes: -- Add workaround for buggy clients that the fail to register (PR #1632) +- Add workaround for buggy clients that the fail to register. ([\#1632](https://github.com/matrix-org/synapse/issues/1632)) Changes in synapse v0.18.4-rc1 (2016-11-14) =========================================== Changes: -- Various database efficiency improvements (PR #1188, #1192) -- Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR #1198) -- Allow specifying duration in minutes in config, thanks to Daniel Dent (PR #1625) +- Various database efficiency improvements. ([\#1188](https://github.com/matrix-org/synapse/issues/1188), [\#1192](https://github.com/matrix-org/synapse/issues/1192)) +- Update default config to blacklist more internal IPs, thanks to Euan Kemp. ([\#1198](https://github.com/matrix-org/synapse/issues/1198)) +- Allow specifying duration in minutes in config, thanks to Daniel Dent. ([\#1625](https://github.com/matrix-org/synapse/issues/1625)) Bug fixes: -- Fix media repo to set CORs headers on responses (PR #1190) -- Fix registration to not error on non-ascii passwords (PR #1191) -- Fix create event code to limit the number of `prev_events` (PR #1615) -- Fix bug in transaction ID deduplication (PR #1624) +- Fix media repo to set CORs headers on responses. ([\#1190](https://github.com/matrix-org/synapse/issues/1190)) +- Fix registration to not error on non-ascii passwords. ([\#1191](https://github.com/matrix-org/synapse/issues/1191)) +- Fix create event code to limit the number of `prev_events`. ([\#1615](https://github.com/matrix-org/synapse/issues/1615)) +- Fix bug in transaction ID deduplication. ([\#1624](https://github.com/matrix-org/synapse/issues/1624)) Changes in synapse v0.18.3 (2016-11-08) ======================================= @@ -2302,32 +2302,32 @@ Changes in synapse v0.18.2-rc5 (2016-10-28) Bug fixes: -- Fix prometheus process metrics in worker processes (PR #1184) +- Fix prometheus process metrics in worker processes. ([\#1184](https://github.com/matrix-org/synapse/issues/1184)) Changes in synapse v0.18.2-rc4 (2016-10-27) =========================================== Bug fixes: -- Fix `user_threepids` schema delta, which in some instances prevented startup after upgrade (PR #1183) +- Fix `user_threepids` schema delta, which in some instances prevented startup after upgrade. ([\#1183](https://github.com/matrix-org/synapse/issues/1183)) Changes in synapse v0.18.2-rc3 (2016-10-27) =========================================== Changes: -- Allow clients to supply access tokens as headers (PR #1098) -- Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164) -- Make password reset email field case insensitive (PR #1170) -- Reduce redundant database work in email pusher (PR #1174) -- Allow configurable rate limiting per AS (PR #1175) -- Check whether to ratelimit sooner to avoid work (PR #1176) -- Standardise prometheus metrics (PR #1177) +- Allow clients to supply access tokens as headers. ([\#1098](https://github.com/matrix-org/synapse/issues/1098)) +- Clarify error codes for GET /filter/, thanks to Alexander Maznev. ([\#1164](https://github.com/matrix-org/synapse/issues/1164)) +- Make password reset email field case insensitive. ([\#1170](https://github.com/matrix-org/synapse/issues/1170)) +- Reduce redundant database work in email pusher. ([\#1174](https://github.com/matrix-org/synapse/issues/1174)) +- Allow configurable rate limiting per AS. ([\#1175](https://github.com/matrix-org/synapse/issues/1175)) +- Check whether to ratelimit sooner to avoid work. ([\#1176](https://github.com/matrix-org/synapse/issues/1176)) +- Standardise prometheus metrics. ([\#1177](https://github.com/matrix-org/synapse/issues/1177)) Bug fixes: -- Fix incredibly slow back pagination query (PR #1178) -- Fix infinite typing bug (PR #1179) +- Fix incredibly slow back pagination query. ([\#1178](https://github.com/matrix-org/synapse/issues/1178)) +- Fix infinite typing bug. ([\#1179](https://github.com/matrix-org/synapse/issues/1179)) Changes in synapse v0.18.2-rc2 (2016-10-25) =========================================== @@ -2339,20 +2339,20 @@ Changes in synapse v0.18.2-rc1 (2016-10-17) Changes: -- Remove redundant `event_auth` index (PR #1113) -- Reduce DB hits for replication (PR #1141) -- Implement pluggable password auth (PR #1155) -- Remove rate limiting from app service senders and fix `get_or_create_user` requester, thanks to Patrik Oldsberg (PR #1157) -- window.postmessage for Interactive Auth fallback (PR #1159) -- Use sys.executable instead of hardcoded python, thanks to Pedro Larroy (PR #1162) -- Add config option for adding additional TLS fingerprints (PR #1167) -- User-interactive auth on delete device (PR #1168) +- Remove redundant `event_auth` index. ([\#1113](https://github.com/matrix-org/synapse/issues/1113)) +- Reduce DB hits for replication. ([\#1141](https://github.com/matrix-org/synapse/issues/1141)) +- Implement pluggable password auth. ([\#1155](https://github.com/matrix-org/synapse/issues/1155)) +- Remove rate limiting from app service senders and fix `get_or_create_user` requester, thanks to Patrik Oldsberg. ([\#1157](https://github.com/matrix-org/synapse/issues/1157)) +- window.postmessage for Interactive Auth fallback. ([\#1159](https://github.com/matrix-org/synapse/issues/1159)) +- Use sys.executable instead of hardcoded python, thanks to Pedro Larroy. ([\#1162](https://github.com/matrix-org/synapse/issues/1162)) +- Add config option for adding additional TLS fingerprints. ([\#1167](https://github.com/matrix-org/synapse/issues/1167)) +- User-interactive auth on delete device. ([\#1168](https://github.com/matrix-org/synapse/issues/1168)) Bug fixes: -- Fix not being allowed to set your own `state_key`, thanks to Patrik Oldsberg (PR #1150) -- Fix interactive auth to return 401 from for incorrect password (PR #1160, #1166) -- Fix email push notifs being dropped (PR #1169) +- Fix not being allowed to set your own `state_key`, thanks to Patrik Oldsberg. ([\#1150](https://github.com/matrix-org/synapse/issues/1150)) +- Fix interactive auth to return 401 from for incorrect password. ([\#1160](https://github.com/matrix-org/synapse/issues/1160), [\#1166](https://github.com/matrix-org/synapse/issues/1166)) +- Fix email push notifs being dropped. ([\#1169](https://github.com/matrix-org/synapse/issues/1169)) Changes in synapse v0.18.1 (2016-10-05) ======================================= @@ -2364,19 +2364,19 @@ Changes in synapse v0.18.1-rc1 (2016-09-30) Features: -- Add `total_room_count_estimate` to `/publicRooms` (PR #1133) +- Add `total_room_count_estimate` to `/publicRooms`. ([\#1133](https://github.com/matrix-org/synapse/issues/1133)) Changes: -- Time out typing over federation (PR #1140) -- Restructure LDAP authentication (PR #1153) +- Time out typing over federation. ([\#1140](https://github.com/matrix-org/synapse/issues/1140)) +- Restructure LDAP authentication. ([\#1153](https://github.com/matrix-org/synapse/issues/1153)) Bug fixes: -- Fix 3pid invites when server is already in the room (PR #1136) -- Fix upgrading with SQLite taking lots of CPU for a few days after upgrade (PR #1144) -- Fix upgrading from very old database versions (PR #1145) -- Fix port script to work with recently added tables (PR #1146) +- Fix 3pid invites when server is already in the room. ([\#1136](https://github.com/matrix-org/synapse/issues/1136)) +- Fix upgrading with SQLite taking lots of CPU for a few days after upgrade. ([\#1144](https://github.com/matrix-org/synapse/issues/1144)) +- Fix upgrading from very old database versions. ([\#1145](https://github.com/matrix-org/synapse/issues/1145)) +- Fix port script to work with recently added tables. ([\#1146](https://github.com/matrix-org/synapse/issues/1146)) Changes in synapse v0.18.0 (2016-09-19) ======================================= @@ -2385,39 +2385,39 @@ The release includes major changes to the state storage database schemas, which Changes: -- Make public room search case insensitive (PR #1127) +- Make public room search case insensitive. ([\#1127](https://github.com/matrix-org/synapse/issues/1127)) Bug fixes: -- Fix and clean up publicRooms pagination (PR #1129) +- Fix and clean up publicRooms pagination. ([\#1129](https://github.com/matrix-org/synapse/issues/1129)) Changes in synapse v0.18.0-rc1 (2016-09-16) =========================================== Features: -- Add `only=highlight` on `/notifications` (PR #1081) -- Add server param to /publicRooms (PR #1082) -- Allow clients to ask for the whole of a single state event (PR #1094) -- Add `is_direct` param to /createRoom (PR #1108) -- Add pagination support to publicRooms (PR #1121) -- Add very basic filter API to /publicRooms (PR #1126) -- Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, #1111) +- Add `only=highlight` on `/notifications`. ([\#1081](https://github.com/matrix-org/synapse/issues/1081)) +- Add server param to /publicRooms. ([\#1082](https://github.com/matrix-org/synapse/issues/1082)) +- Allow clients to ask for the whole of a single state event. ([\#1094](https://github.com/matrix-org/synapse/issues/1094)) +- Add `is_direct` param to /createRoom. ([\#1108](https://github.com/matrix-org/synapse/issues/1108)) +- Add pagination support to publicRooms. ([\#1121](https://github.com/matrix-org/synapse/issues/1121)) +- Add very basic filter API to /publicRooms. ([\#1126](https://github.com/matrix-org/synapse/issues/1126)) +- Add basic direct to device messaging support for E2E. ([\#1074](https://github.com/matrix-org/synapse/issues/1074), [\#1084](https://github.com/matrix-org/synapse/issues/1084), [\#1104](https://github.com/matrix-org/synapse/issues/1104), [\#1111](https://github.com/matrix-org/synapse/issues/1111)) Changes: -- Move to storing `state_groups_state` as deltas, greatly reducing DB size (PR #1065) -- Reduce amount of state pulled out of the DB during common requests (PR #1069) -- Allow PDF to be rendered from media repo (PR #1071) -- Reindex `state_groups_state` after pruning (PR #1085) -- Clobber EDUs in send queue (PR #1095) -- Conform better to the CAS protocol specification (PR #1100) -- Limit how often we ask for keys from dead servers (PR #1114) +- Move to storing `state_groups_state` as deltas, greatly reducing DB size. ([\#1065](https://github.com/matrix-org/synapse/issues/1065)) +- Reduce amount of state pulled out of the DB during common requests. ([\#1069](https://github.com/matrix-org/synapse/issues/1069)) +- Allow PDF to be rendered from media repo. ([\#1071](https://github.com/matrix-org/synapse/issues/1071)) +- Reindex `state_groups_state` after pruning. ([\#1085](https://github.com/matrix-org/synapse/issues/1085)) +- Clobber EDUs in send queue. ([\#1095](https://github.com/matrix-org/synapse/issues/1095)) +- Conform better to the CAS protocol specification. ([\#1100](https://github.com/matrix-org/synapse/issues/1100)) +- Limit how often we ask for keys from dead servers. ([\#1114](https://github.com/matrix-org/synapse/issues/1114)) Bug fixes: -- Fix /notifications API when used with `from` param (PR #1080) -- Fix backfill when cannot find an event. (PR #1107) +- Fix /notifications API when used with `from` param. ([\#1080](https://github.com/matrix-org/synapse/issues/1080)) +- Fix backfill when cannot find an event. ([\#1107](https://github.com/matrix-org/synapse/issues/1107)) Changes in synapse v0.17.3 (2016-09-09) ======================================= @@ -2436,20 +2436,20 @@ Changes in synapse v0.17.2-rc1 (2016-09-05) Features: -- Start adding store-and-forward direct-to-device messaging (PR #1046, #1050, #1062, #1066) +- Start adding store-and-forward direct-to-device messaging. ([\#1046](https://github.com/matrix-org/synapse/issues/1046), [\#1050](https://github.com/matrix-org/synapse/issues/1050), [\#1062](https://github.com/matrix-org/synapse/issues/1062), [\#1066](https://github.com/matrix-org/synapse/issues/1066)) Changes: -- Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068) -- Don't notify for online to online presence transitions. (PR #1054) -- Occasionally persist unpersisted presence updates (PR #1055) -- Allow application services to have an optional `url` (PR #1056) -- Clean up old sent transactions from DB (PR #1059) +- Avoid pulling the full state of a room out so often. ([\#1047](https://github.com/matrix-org/synapse/issues/1047), [\#1049](https://github.com/matrix-org/synapse/issues/1049), [\#1063](https://github.com/matrix-org/synapse/issues/1063), [\#1068](https://github.com/matrix-org/synapse/issues/1068)) +- Don't notify for online to online presence transitions. ([\#1054](https://github.com/matrix-org/synapse/issues/1054)) +- Occasionally persist unpersisted presence updates. ([\#1055](https://github.com/matrix-org/synapse/issues/1055)) +- Allow application services to have an optional `url`. ([\#1056](https://github.com/matrix-org/synapse/issues/1056)) +- Clean up old sent transactions from DB. ([\#1059](https://github.com/matrix-org/synapse/issues/1059)) Bug fixes: -- Fix None check in backfill (PR #1043) -- Fix membership changes to be idempotent (PR #1067) +- Fix None check in backfill. ([\#1043](https://github.com/matrix-org/synapse/issues/1043)) +- Fix membership changes to be idempotent. ([\#1067](https://github.com/matrix-org/synapse/issues/1067)) - Fix bug in `get_pdu` where it would sometimes return events with incorrect signature Changes in synapse v0.17.1 (2016-08-24) @@ -2457,86 +2457,86 @@ Changes in synapse v0.17.1 (2016-08-24) Changes: -- Delete old `received_transactions` rows (PR #1038) -- Pass through user-supplied content in `/join/$room_id` (PR #1039) +- Delete old `received_transactions` rows. ([\#1038](https://github.com/matrix-org/synapse/issues/1038)) +- Pass through user-supplied content in `/join/$room_id`. ([\#1039](https://github.com/matrix-org/synapse/issues/1039)) Bug fixes: -- Fix bug with backfill (PR #1040) +- Fix bug with backfill. ([\#1040](https://github.com/matrix-org/synapse/issues/1040)) Changes in synapse v0.17.1-rc1 (2016-08-22) =========================================== Features: -- Add notification API (PR #1028) +- Add notification API. ([\#1028](https://github.com/matrix-org/synapse/issues/1028)) Changes: -- Don't print stack traces when failing to get remote keys (PR #996) -- Various federation /event/ perf improvements (PR #998) -- Only process one local membership event per room at a time (PR #1005) -- Move default display name push rule (PR #1011, #1023) -- Fix up preview URL API. Add tests. (PR #1015) -- Set `Content-Security-Policy` on media repo (PR #1021) -- Make `notify_interested_services` faster (PR #1022) -- Add usage stats to prometheus monitoring (PR #1037) +- Don't print stack traces when failing to get remote keys. ([\#996](https://github.com/matrix-org/synapse/issues/996)) +- Various federation /event/ perf improvements. ([\#998](https://github.com/matrix-org/synapse/issues/998)) +- Only process one local membership event per room at a time. ([\#1005](https://github.com/matrix-org/synapse/issues/1005)) +- Move default display name push rule. ([\#1011](https://github.com/matrix-org/synapse/issues/1011), [\#1023](https://github.com/matrix-org/synapse/issues/1023)) +- Fix up preview URL API. Add tests. ([\#1015](https://github.com/matrix-org/synapse/issues/1015)) +- Set `Content-Security-Policy` on media repo. ([\#1021](https://github.com/matrix-org/synapse/issues/1021)) +- Make `notify_interested_services` faster. ([\#1022](https://github.com/matrix-org/synapse/issues/1022)) +- Add usage stats to prometheus monitoring. ([\#1037](https://github.com/matrix-org/synapse/issues/1037)) Bug fixes: -- Fix token login (PR #993) -- Fix CAS login (PR #994, #995) -- Fix /sync to not clobber `status_msg` (PR #997) -- Fix redacted state events to include `prev_content` (PR #1003) -- Fix some bugs in the auth/ldap handler (PR #1007) -- Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits (PR #1012) -- Fix AS push code to not send duplicate events (PR #1025) +- Fix token login. ([\#993](https://github.com/matrix-org/synapse/issues/993)) +- Fix CAS login. ([\#994](https://github.com/matrix-org/synapse/issues/994), [\#995](https://github.com/matrix-org/synapse/issues/995)) +- Fix /sync to not clobber `status_msg`. ([\#997](https://github.com/matrix-org/synapse/issues/997)) +- Fix redacted state events to include `prev_content`. ([\#1003](https://github.com/matrix-org/synapse/issues/1003)) +- Fix some bugs in the auth/ldap handler. ([\#1007](https://github.com/matrix-org/synapse/issues/1007)) +- Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits. ([\#1012](https://github.com/matrix-org/synapse/issues/1012)) +- Fix AS push code to not send duplicate events. ([\#1025](https://github.com/matrix-org/synapse/issues/1025)) Changes in synapse v0.17.0 (2016-08-08) ======================================= This release contains significant security bug fixes regarding authenticating events received over federation. PLEASE UPGRADE. -This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details. +This release changes the LDAP configuration format in a backwards incompatible way, see [\#843](https://github.com/matrix-org/synapse/issues/843) for details. Changes: -- Add federation /version API (PR #990) -- Make psutil dependency optional (PR #992) +- Add federation /version API. ([\#990](https://github.com/matrix-org/synapse/issues/990)) +- Make psutil dependency optional. ([\#992](https://github.com/matrix-org/synapse/issues/992)) Bug fixes: -- Fix URL preview API to exclude HTML comments in description (PR #988) -- Fix error handling of remote joins (PR #991) +- Fix URL preview API to exclude HTML comments in description. ([\#988](https://github.com/matrix-org/synapse/issues/988)) +- Fix error handling of remote joins. ([\#991](https://github.com/matrix-org/synapse/issues/991)) Changes in synapse v0.17.0-rc4 (2016-08-05) =========================================== Changes: -- Change the way we summarize URLs when previewing (PR #973) -- Add new `/state_ids/` federation API (PR #979) -- Speed up processing of `/state/` response (PR #986) +- Change the way we summarize URLs when previewing. ([\#973](https://github.com/matrix-org/synapse/issues/973)) +- Add new `/state_ids/` federation API. ([\#979](https://github.com/matrix-org/synapse/issues/979)) +- Speed up processing of `/state/` response. ([\#986](https://github.com/matrix-org/synapse/issues/986)) Bug fixes: -- Fix event persistence when event has already been partially persisted (PR #975, #983, #985) -- Fix port script to also copy across backfilled events (PR #982) +- Fix event persistence when event has already been partially persisted. ([\#975](https://github.com/matrix-org/synapse/issues/975), [\#983](https://github.com/matrix-org/synapse/issues/983), [\#985](https://github.com/matrix-org/synapse/issues/985)) +- Fix port script to also copy across backfilled events. ([\#982](https://github.com/matrix-org/synapse/issues/982)) Changes in synapse v0.17.0-rc3 (2016-08-02) =========================================== Changes: -- Forbid non-ASes from registering users whose names begin with `_` (PR #958) -- Add some basic admin API docs (PR #963) +- Forbid non-ASes from registering users whose names begin with `_`. ([\#958](https://github.com/matrix-org/synapse/issues/958)) +- Add some basic admin API docs. ([\#963](https://github.com/matrix-org/synapse/issues/963)) Bug fixes: -- Send the correct host header when fetching keys (PR #941) -- Fix joining a room that has missing auth events (PR #964) -- Fix various push bugs (PR #966, #970) -- Fix adding emails on registration (PR #968) +- Send the correct host header when fetching keys. ([\#941](https://github.com/matrix-org/synapse/issues/941)) +- Fix joining a room that has missing auth events. ([\#964](https://github.com/matrix-org/synapse/issues/964)) +- Fix various push bugs. ([\#966](https://github.com/matrix-org/synapse/issues/966), [\#970](https://github.com/matrix-org/synapse/issues/970)) +- Fix adding emails on registration. ([\#968](https://github.com/matrix-org/synapse/issues/968)) Changes in synapse v0.17.0-rc2 (2016-08-02) =========================================== @@ -2546,51 +2546,51 @@ Changes in synapse v0.17.0-rc2 (2016-08-02) Changes in synapse v0.17.0-rc1 (2016-07-28) =========================================== -This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details. +This release changes the LDAP configuration format in a backwards incompatible way, see [\#843](https://github.com/matrix-org/synapse/issues/843) for details. Features: -- Add `purge_media_cache` admin API (PR #902) -- Add deactivate account admin API (PR #903) -- Add optional pepper to password hashing (PR #907, #910 by KentShikama) -- Add an admin option to shared secret registration (breaks backwards compat) (PR #909) -- Add purge local room history API (PR #911, #923, #924) -- Add requestToken endpoints (PR #915) -- Add an /account/deactivate endpoint (PR #921) -- Add filter param to /messages. Add `contains_url` to filter. (PR #922) -- Add `device_id` support to /login (PR #929) -- Add `device_id` support to /v2/register flow. (PR #937, #942) -- Add GET /devices endpoint (PR #939, #944) -- Add GET /device/{deviceId} (PR #943) -- Add update and delete APIs for devices (PR #949) +- Add `purge_media_cache` admin API. ([\#902](https://github.com/matrix-org/synapse/issues/902)) +- Add deactivate account admin API. ([\#903](https://github.com/matrix-org/synapse/issues/903)) +- Add optional pepper to password hashing by KentShikama. ([\#907](https://github.com/matrix-org/synapse/issues/907), [\#910](https://github.com/matrix-org/synapse/issues/910)) +- Add an admin option to shared secret registration (breaks backwards compat). ([\#909](https://github.com/matrix-org/synapse/issues/909)) +- Add purge local room history API. ([\#911](https://github.com/matrix-org/synapse/issues/911), [\#923](https://github.com/matrix-org/synapse/issues/923), [\#924](https://github.com/matrix-org/synapse/issues/924)) +- Add requestToken endpoints. ([\#915](https://github.com/matrix-org/synapse/issues/915)) +- Add an /account/deactivate endpoint. ([\#921](https://github.com/matrix-org/synapse/issues/921)) +- Add filter param to /messages. Add `contains_url` to filter. ([\#922](https://github.com/matrix-org/synapse/issues/922)) +- Add `device_id` support to /login. ([\#929](https://github.com/matrix-org/synapse/issues/929)) +- Add `device_id` support to /v2/register flow. ([\#937](https://github.com/matrix-org/synapse/issues/937), [\#942](https://github.com/matrix-org/synapse/issues/942)) +- Add GET /devices endpoint. ([\#939](https://github.com/matrix-org/synapse/issues/939), [\#944](https://github.com/matrix-org/synapse/issues/944)) +- Add GET /device/{deviceId}. ([\#943](https://github.com/matrix-org/synapse/issues/943)) +- Add update and delete APIs for devices. ([\#949](https://github.com/matrix-org/synapse/issues/949)) Changes: -- Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt) -- Linearize some federation endpoints based on `(origin, room_id)` (PR #879) -- Remove the legacy v0 content upload API. (PR #888) -- Use similar naming we use in email notifs for push (PR #894) -- Optionally include password hash in createUser endpoint (PR #905 by KentShikama) -- Use a query that postgresql optimises better for `get_events_around` (PR #906) -- Fall back to '`username` if `user` is not given for appservice registration. (PR #927 by Half-Shot) -- Add metrics for psutil derived memory usage (PR #936) -- Record `device_id` in `client_ips` (PR #938) -- Send the correct host header when fetching keys (PR #941) -- Log the hostname the reCAPTCHA was completed on (PR #946) -- Make the device id on e2e key upload optional (PR #956) -- Add r0.2.0 to the "supported versions" list (PR #960) -- Don't include name of room for invites in push (PR #961) +- Rewrite LDAP Authentication against ldap3. Contributed by mweinelt. ([\#843](https://github.com/matrix-org/synapse/issues/843)) +- Linearize some federation endpoints based on `(origin, room_id)`. ([\#879](https://github.com/matrix-org/synapse/issues/879)) +- Remove the legacy v0 content upload API. ([\#888](https://github.com/matrix-org/synapse/issues/888)) +- Use similar naming we use in email notifs for push. ([\#894](https://github.com/matrix-org/synapse/issues/894)) +- Optionally include password hash in createUser endpoint. Contributed by KentShikama. ([\#905](https://github.com/matrix-org/synapse/issues/905)) +- Use a query that postgresql optimises better for `get_events_around`. ([\#906](https://github.com/matrix-org/synapse/issues/906)) +- Fall back to '`username` if `user` is not given for appservice registration. Contributed by Half-Shot. ([\#927](https://github.com/matrix-org/synapse/issues/927)) +- Add metrics for psutil derived memory usage. ([\#936](https://github.com/matrix-org/synapse/issues/936)) +- Record `device_id` in `client_ips`. ([\#938](https://github.com/matrix-org/synapse/issues/938)) +- Send the correct host header when fetching keys. ([\#941](https://github.com/matrix-org/synapse/issues/941)) +- Log the hostname the reCAPTCHA was completed on. ([\#946](https://github.com/matrix-org/synapse/issues/946)) +- Make the device id on e2e key upload optional. ([\#956](https://github.com/matrix-org/synapse/issues/956)) +- Add r0.2.0 to the "supported versions" list. ([\#960](https://github.com/matrix-org/synapse/issues/960)) +- Don't include name of room for invites in push. ([\#961](https://github.com/matrix-org/synapse/issues/961)) Bug fixes: -- Fix substitution failure in mail template (PR #887) -- Put most recent 20 messages in email notif (PR #892) -- Ensure that the guest user is in the database when upgrading accounts (PR #914) -- Fix various edge cases in auth handling (PR #919) -- Fix 500 ISE when sending alias event without a `state_key` (PR #925) -- Fix bug where we stored rejections in the `state_group`, persist all rejections (PR #948) -- Fix lack of check of if the user is banned when handling 3pid invites (PR #952) -- Fix a couple of bugs in the transaction and keyring code (PR #954, #955) +- Fix substitution failure in mail template. ([\#887](https://github.com/matrix-org/synapse/issues/887)) +- Put most recent 20 messages in email notif. ([\#892](https://github.com/matrix-org/synapse/issues/892)) +- Ensure that the guest user is in the database when upgrading accounts. ([\#914](https://github.com/matrix-org/synapse/issues/914)) +- Fix various edge cases in auth handling. ([\#919](https://github.com/matrix-org/synapse/issues/919)) +- Fix 500 ISE when sending alias event without a `state_key`. ([\#925](https://github.com/matrix-org/synapse/issues/925)) +- Fix bug where we stored rejections in the `state_group`, persist all rejections. ([\#948](https://github.com/matrix-org/synapse/issues/948)) +- Fix lack of check of if the user is banned when handling 3pid invites. ([\#952](https://github.com/matrix-org/synapse/issues/952)) +- Fix a couple of bugs in the transaction and keyring code. ([\#954](https://github.com/matrix-org/synapse/issues/954), [\#955](https://github.com/matrix-org/synapse/issues/955)) Changes in synapse v0.16.1-r1 (2016-07-08) ========================================== @@ -2604,13 +2604,13 @@ Changes in synapse v0.16.1 (2016-06-20) Bug fixes: -- Fix assorted bugs in `/preview_url` (PR #872) -- Fix TypeError when setting unicode passwords (PR #873) +- Fix assorted bugs in `/preview_url`. ([\#872](https://github.com/matrix-org/synapse/issues/872)) +- Fix TypeError when setting unicode passwords. ([\#873](https://github.com/matrix-org/synapse/issues/873)) Performance improvements: -- Turn `use_frozen_events` off by default (PR #877) -- Disable responding with canonical json for federation (PR #878) +- Turn `use_frozen_events` off by default. ([\#877](https://github.com/matrix-org/synapse/issues/877)) +- Disable responding with canonical json for federation. ([\#878](https://github.com/matrix-org/synapse/issues/878)) Changes in synapse v0.16.1-rc1 (2016-06-15) =========================================== @@ -2619,20 +2619,20 @@ Features: None Changes: -- Log requester for `/publicRoom` endpoints when possible (PR #856) -- 502 on `/thumbnail` when can't connect to remote server (PR #862) -- Linearize fetching of gaps on incoming events (PR #871) +- Log requester for `/publicRoom` endpoints when possible. ([\#856](https://github.com/matrix-org/synapse/issues/856)) +- 502 on `/thumbnail` when can't connect to remote server. ([\#862](https://github.com/matrix-org/synapse/issues/862)) +- Linearize fetching of gaps on incoming events. ([\#871](https://github.com/matrix-org/synapse/issues/871)) Bugs fixes: -- Fix bug where rooms where marked as published by default (PR #857) -- Fix bug where joining room with an event with invalid sender (PR #868) -- Fix bug where backfilled events were sent down sync streams (PR #869) -- Fix bug where outgoing connections could wedge indefinitely, causing push notifications to be unreliable (PR #870) +- Fix bug where rooms where marked as published by default. ([\#857](https://github.com/matrix-org/synapse/issues/857)) +- Fix bug where joining room with an event with invalid sender. ([\#868](https://github.com/matrix-org/synapse/issues/868)) +- Fix bug where backfilled events were sent down sync streams. ([\#869](https://github.com/matrix-org/synapse/issues/869)) +- Fix bug where outgoing connections could wedge indefinitely, causing push notifications to be unreliable. ([\#870](https://github.com/matrix-org/synapse/issues/870)) Performance improvements: -- Improve `/publicRooms` performance(PR #859) +- Improve `/publicRooms` performance. ([\#859](https://github.com/matrix-org/synapse/issues/859)) Changes in synapse v0.16.0 (2016-06-09) ======================================= @@ -2641,31 +2641,31 @@ NB: As of v0.14 all AS config files must have an ID field. Bug fixes: -- Don't make rooms published by default (PR #857) +- Don't make rooms published by default. ([\#857](https://github.com/matrix-org/synapse/issues/857)) Changes in synapse v0.16.0-rc2 (2016-06-08) =========================================== Features: -- Add configuration option for tuning GC via `gc.set_threshold` (PR #849) +- Add configuration option for tuning GC via `gc.set_threshold`. ([\#849](https://github.com/matrix-org/synapse/issues/849)) Changes: -- Record metrics about GC (PR #771, #847, #852) -- Add metric counter for number of persisted events (PR #841) +- Record metrics about GC. ([\#771](https://github.com/matrix-org/synapse/issues/771), [\#847](https://github.com/matrix-org/synapse/issues/847), [\#852](https://github.com/matrix-org/synapse/issues/852)) +- Add metric counter for number of persisted events. ([\#841](https://github.com/matrix-org/synapse/issues/841)) Bug fixes: -- Fix `From` header in email notifications (PR #843) -- Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842) +- Fix `From` header in email notifications. ([\#843](https://github.com/matrix-org/synapse/issues/843)) +- Fix presence where timeouts were not being fired for the first 8h after restarts. ([\#842](https://github.com/matrix-org/synapse/issues/842)) - Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906) Performance improvements: -- Remove event fetching from DB threads (PR #835) -- Change the way we cache events (PR #836) -- Add events to cache when we persist them (PR #840) +- Remove event fetching from DB threads. ([\#835](https://github.com/matrix-org/synapse/issues/835)) +- Change the way we cache events. ([\#836](https://github.com/matrix-org/synapse/issues/836)) +- Add events to cache when we persist them. ([\#840](https://github.com/matrix-org/synapse/issues/840)) Changes in synapse v0.16.0-rc1 (2016-06-03) =========================================== @@ -2674,74 +2674,74 @@ Version 0.15 was not released. See v0.15.0-rc1 below for additional changes. Features: -- Add email notifications for missed messages (PR #759, #786, #799, #810, #815, #821) -- Add a `url_preview_ip_range_whitelist` config param (PR #760) -- Add /report endpoint (PR #762) -- Add basic ignore user API (PR #763) -- Add an openidish mechanism for proving that you own a given `user_id` (PR #765) -- Allow clients to specify a `server_name` to avoid "No known servers" (PR #794) -- Add `secondary_directory_servers` option to fetch room list from other servers (PR #808, #813) +- Add email notifications for missed messages. ([\#759](https://github.com/matrix-org/synapse/issues/759), [\#786](https://github.com/matrix-org/synapse/issues/786), [\#799](https://github.com/matrix-org/synapse/issues/799), [\#810](https://github.com/matrix-org/synapse/issues/810), [\#815](https://github.com/matrix-org/synapse/issues/815), [\#821](https://github.com/matrix-org/synapse/issues/821)) +- Add a `url_preview_ip_range_whitelist` config param. ([\#760](https://github.com/matrix-org/synapse/issues/760)) +- Add /report endpoint. ([\#762](https://github.com/matrix-org/synapse/issues/762)) +- Add basic ignore user API. ([\#763](https://github.com/matrix-org/synapse/issues/763)) +- Add an openidish mechanism for proving that you own a given `user_id`. ([\#765](https://github.com/matrix-org/synapse/issues/765)) +- Allow clients to specify a `server_name` to avoid "No known servers". ([\#794](https://github.com/matrix-org/synapse/issues/794)) +- Add `secondary_directory_servers` option to fetch room list from other servers. ([\#808](https://github.com/matrix-org/synapse/issues/808), [\#813](https://github.com/matrix-org/synapse/issues/813)) Changes: -- Report per request metrics for all of the things using `request_handler` (PR #756) -- Correctly handle `NULL` password hashes from the database (PR #775) -- Allow receipts for events we haven't seen in the db (PR #784) -- Make synctl read a cache factor from config file (PR #785) -- Increment badge count per missed convo, not per msg (PR #793) -- Special case `m.room.third_party_invite` event auth to match invites (PR #814) +- Report per request metrics for all of the things using `request_handler`. ([\#756](https://github.com/matrix-org/synapse/issues/756)) +- Correctly handle `NULL` password hashes from the database. ([\#775](https://github.com/matrix-org/synapse/issues/775)) +- Allow receipts for events we haven't seen in the db. ([\#784](https://github.com/matrix-org/synapse/issues/784)) +- Make synctl read a cache factor from config file. ([\#785](https://github.com/matrix-org/synapse/issues/785)) +- Increment badge count per missed convo, not per msg. ([\#793](https://github.com/matrix-org/synapse/issues/793)) +- Special case `m.room.third_party_invite` event auth to match invites. ([\#814](https://github.com/matrix-org/synapse/issues/814)) Bug fixes: -- Fix typo in `event_auth` servlet path (PR #757) -- Fix password reset (PR #758) +- Fix typo in `event_auth` servlet path. ([\#757](https://github.com/matrix-org/synapse/issues/757)) +- Fix password reset. ([\#758](https://github.com/matrix-org/synapse/issues/758)) Performance improvements: -- Reduce database inserts when sending transactions (PR #767) -- Queue events by room for persistence (PR #768) -- Add cache to `get_user_by_id` (PR #772) -- Add and use `get_domain_from_id` (PR #773) -- Use tree cache for `get_linearized_receipts_for_room` (PR #779) -- Remove unused indices (PR #782) -- Add caches to `bulk_get_push_rules*` (PR #804) -- Cache `get_event_reference_hashes` (PR #806) -- Add `get_users_with_read_receipts_in_room` cache (PR #809) -- Use state to calculate `get_users_in_room` (PR #811) -- Load push rules in storage layer so that they get cached (PR #825) -- Make `get_joined_hosts_for_room` use `get_users_in_room` (PR #828) -- Poke notifier on next reactor tick (PR #829) -- Change CacheMetrics to be quicker (PR #830) +- Reduce database inserts when sending transactions. ([\#767](https://github.com/matrix-org/synapse/issues/767)) +- Queue events by room for persistence. ([\#768](https://github.com/matrix-org/synapse/issues/768)) +- Add cache to `get_user_by_id`. ([\#772](https://github.com/matrix-org/synapse/issues/772)) +- Add and use `get_domain_from_id`. ([\#773](https://github.com/matrix-org/synapse/issues/773)) +- Use tree cache for `get_linearized_receipts_for_room`. ([\#779](https://github.com/matrix-org/synapse/issues/779)) +- Remove unused indices. ([\#782](https://github.com/matrix-org/synapse/issues/782)) +- Add caches to `bulk_get_push_rules*`. ([\#804](https://github.com/matrix-org/synapse/issues/804)) +- Cache `get_event_reference_hashes`. ([\#806](https://github.com/matrix-org/synapse/issues/806)) +- Add `get_users_with_read_receipts_in_room` cache. ([\#809](https://github.com/matrix-org/synapse/issues/809)) +- Use state to calculate `get_users_in_room`. ([\#811](https://github.com/matrix-org/synapse/issues/811)) +- Load push rules in storage layer so that they get cached. ([\#825](https://github.com/matrix-org/synapse/issues/825)) +- Make `get_joined_hosts_for_room` use `get_users_in_room`. ([\#828](https://github.com/matrix-org/synapse/issues/828)) +- Poke notifier on next reactor tick. ([\#829](https://github.com/matrix-org/synapse/issues/829)) +- Change CacheMetrics to be quicker. ([\#830](https://github.com/matrix-org/synapse/issues/830)) Changes in synapse v0.15.0-rc1 (2016-04-26) =========================================== Features: -- Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck (PR #671,\#687) -- Add URL previewing support (PR #688) -- Add login support for LDAP, thanks to Christoph Witzany (PR #701) -- Add GET endpoint for pushers (PR #716) +- Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck. ([\#671](https://github.com/matrix-org/synapse/issues/671), [\#687](https://github.com/matrix-org/synapse/issues/687)) +- Add URL previewing support. ([\#688](https://github.com/matrix-org/synapse/issues/688)) +- Add login support for LDAP, thanks to Christoph Witzany. ([\#701](https://github.com/matrix-org/synapse/issues/701)) +- Add GET endpoint for pushers. ([\#716](https://github.com/matrix-org/synapse/issues/716)) Changes: -- Never notify for member events (PR #667) -- Deduplicate identical `/sync` requests (PR #668) -- Require user to have left room to forget room (PR #673) -- Use DNS cache if within TTL (PR #677) -- Let users see their own leave events (PR #699) -- Deduplicate membership changes (PR #700) -- Increase performance of pusher code (PR #705) -- Respond with error status 504 if failed to talk to remote server (PR #731) -- Increase search performance on postgres (PR #745) +- Never notify for member events. ([\#667](https://github.com/matrix-org/synapse/issues/667)) +- Deduplicate identical `/sync` requests. ([\#668](https://github.com/matrix-org/synapse/issues/668)) +- Require user to have left room to forget room. ([\#673](https://github.com/matrix-org/synapse/issues/673)) +- Use DNS cache if within TTL. ([\#677](https://github.com/matrix-org/synapse/issues/677)) +- Let users see their own leave events. ([\#699](https://github.com/matrix-org/synapse/issues/699)) +- Deduplicate membership changes. ([\#700](https://github.com/matrix-org/synapse/issues/700)) +- Increase performance of pusher code. ([\#705](https://github.com/matrix-org/synapse/issues/705)) +- Respond with error status 504 if failed to talk to remote server. ([\#731](https://github.com/matrix-org/synapse/issues/731)) +- Increase search performance on postgres. ([\#745](https://github.com/matrix-org/synapse/issues/745)) Bug fixes: -- Fix bug where disabling all notifications still resulted in push (PR #678) -- Fix bug where users couldn't reject remote invites if remote refused (PR #691) -- Fix bug where synapse attempted to backfill from itself (PR #693) -- Fix bug where profile information was not correctly added when joining remote rooms (PR #703) -- Fix bug where register API required incorrect key name for AS registration (PR #727) +- Fix bug where disabling all notifications still resulted in push. ([\#678](https://github.com/matrix-org/synapse/issues/678)) +- Fix bug where users couldn't reject remote invites if remote refused. ([\#691](https://github.com/matrix-org/synapse/issues/691)) +- Fix bug where synapse attempted to backfill from itself. ([\#693](https://github.com/matrix-org/synapse/issues/693)) +- Fix bug where profile information was not correctly added when joining remote rooms. ([\#703](https://github.com/matrix-org/synapse/issues/703)) +- Fix bug where register API required incorrect key name for AS registration. ([\#727](https://github.com/matrix-org/synapse/issues/727)) Changes in synapse v0.14.0 (2016-03-30) ======================================= @@ -2753,58 +2753,58 @@ Changes in synapse v0.14.0-rc2 (2016-03-23) Features: -- Add published room list API (PR #657) +- Add published room list API. ([\#657](https://github.com/matrix-org/synapse/issues/657)) Changes: -- Change various caches to consume less memory (PR #656, #658, #660, #662, #663, #665) -- Allow rooms to be published without requiring an alias (PR #664) -- Intern common strings in caches to reduce memory footprint (\#666) +- Change various caches to consume less memory. ([\#656](https://github.com/matrix-org/synapse/issues/656), [\#658](https://github.com/matrix-org/synapse/issues/658), [\#660](https://github.com/matrix-org/synapse/issues/660), [\#662](https://github.com/matrix-org/synapse/issues/662), [\#663](https://github.com/matrix-org/synapse/issues/663), [\#665](https://github.com/matrix-org/synapse/issues/665)) +- Allow rooms to be published without requiring an alias. ([\#664](https://github.com/matrix-org/synapse/issues/664)) +- Intern common strings in caches to reduce memory footprint. ([\#666](https://github.com/matrix-org/synapse/issues/666)) Bug fixes: -- Fix reject invites over federation (PR #646) -- Fix bug where registration was not idempotent (PR #649) -- Update aliases event after deleting aliases (PR #652) -- Fix unread notification count, which was sometimes wrong (PR #661) +- Fix reject invites over federation. ([\#646](https://github.com/matrix-org/synapse/issues/646)) +- Fix bug where registration was not idempotent. ([\#649](https://github.com/matrix-org/synapse/issues/649)) +- Update aliases event after deleting aliases. ([\#652](https://github.com/matrix-org/synapse/issues/652)) +- Fix unread notification count, which was sometimes wrong. ([\#661](https://github.com/matrix-org/synapse/issues/661)) Changes in synapse v0.14.0-rc1 (2016-03-14) =========================================== Features: -- Add `event_id` to response to state event PUT (PR #581) -- Allow guest users access to messages in rooms they have joined (PR #587) -- Add config for what state is included in a room invite (PR #598) -- Send the inviter's member event in room invite state (PR #607) -- Add error codes for malformed/bad JSON in /login (PR #608) -- Add support for changing the actions for default rules (PR #609) -- Add environment variable `SYNAPSE_CACHE_FACTOR`, default it to 0.1 (PR #612) -- Add ability for alias creators to delete aliases (PR #614) -- Add profile information to invites (PR #624) +- Add `event_id` to response to state event PUT. ([\#581](https://github.com/matrix-org/synapse/issues/581)) +- Allow guest users access to messages in rooms they have joined. ([\#587](https://github.com/matrix-org/synapse/issues/587)) +- Add config for what state is included in a room invite. ([\#598](https://github.com/matrix-org/synapse/issues/598)) +- Send the inviter's member event in room invite state. ([\#607](https://github.com/matrix-org/synapse/issues/607)) +- Add error codes for malformed/bad JSON in /login. ([\#608](https://github.com/matrix-org/synapse/issues/608)) +- Add support for changing the actions for default rules. ([\#609](https://github.com/matrix-org/synapse/issues/609)) +- Add environment variable `SYNAPSE_CACHE_FACTOR`, default it to 0.1. ([\#612](https://github.com/matrix-org/synapse/issues/612)) +- Add ability for alias creators to delete aliases. ([\#614](https://github.com/matrix-org/synapse/issues/614)) +- Add profile information to invites. ([\#624](https://github.com/matrix-org/synapse/issues/624)) Changes: -- Enforce `user_id` exclusivity for AS registrations (PR #572) -- Make adding push rules idempotent (PR #587) -- Improve presence performance (PR #582, #586) -- Change presence semantics for `last_active_ago` (PR #582, #586) -- Don't allow `m.room.create` to be changed (PR #596) -- Add 800x600 to default list of valid thumbnail sizes (PR #616) -- Always include kicks and bans in full /sync (PR #625) -- Send history visibility on boundary changes (PR #626) -- Register endpoint now returns a `refresh_token` (PR #637) +- Enforce `user_id` exclusivity for AS registrations. ([\#572](https://github.com/matrix-org/synapse/issues/572)) +- Make adding push rules idempotent. ([\#587](https://github.com/matrix-org/synapse/issues/587)) +- Improve presence performance. ([\#582](https://github.com/matrix-org/synapse/issues/582), [\#586](https://github.com/matrix-org/synapse/issues/586)) +- Change presence semantics for `last_active_ago`. ([\#582](https://github.com/matrix-org/synapse/issues/582), [\#586](https://github.com/matrix-org/synapse/issues/586)) +- Don't allow `m.room.create` to be changed. ([\#596](https://github.com/matrix-org/synapse/issues/596)) +- Add 800x600 to default list of valid thumbnail sizes. ([\#616](https://github.com/matrix-org/synapse/issues/616)) +- Always include kicks and bans in full /sync. ([\#625](https://github.com/matrix-org/synapse/issues/625)) +- Send history visibility on boundary changes. ([\#626](https://github.com/matrix-org/synapse/issues/626)) +- Register endpoint now returns a `refresh_token`. ([\#637](https://github.com/matrix-org/synapse/issues/637)) Bug fixes: -- Fix bug where we returned incorrect state in /sync (PR #573) -- Always return a JSON object from push rule API (PR #606) -- Fix bug where registering without a user id sometimes failed (PR #610) -- Report size of ExpiringCache in cache size metrics (PR #611) -- Fix rejection of invites to empty rooms (PR #615) -- Fix usage of `bcrypt` to not use `checkpw` (PR #619) -- Pin `pysaml2` dependency (PR #634) -- Fix bug in `/sync` where timeline order was incorrect for backfilled events (PR #635) +- Fix bug where we returned incorrect state in /sync. ([\#573](https://github.com/matrix-org/synapse/issues/573)) +- Always return a JSON object from push rule API. ([\#606](https://github.com/matrix-org/synapse/issues/606)) +- Fix bug where registering without a user id sometimes failed. ([\#610](https://github.com/matrix-org/synapse/issues/610)) +- Report size of ExpiringCache in cache size metrics. ([\#611](https://github.com/matrix-org/synapse/issues/611)) +- Fix rejection of invites to empty rooms. ([\#615](https://github.com/matrix-org/synapse/issues/615)) +- Fix usage of `bcrypt` to not use `checkpw`. ([\#619](https://github.com/matrix-org/synapse/issues/619)) +- Pin `pysaml2` dependency. ([\#634](https://github.com/matrix-org/synapse/issues/634)) +- Fix bug in `/sync` where timeline order was incorrect for backfilled events. ([\#635](https://github.com/matrix-org/synapse/issues/635)) Changes in synapse v0.13.3 (2016-02-11) ======================================= @@ -2814,7 +2814,7 @@ Changes in synapse v0.13.3 (2016-02-11) Changes in synapse v0.13.2 (2016-02-11) ======================================= -- Fix bug where `/events` would fail to skip some events if there had been more events than the limit specified since the last request (PR #570) +- Fix bug where `/events` would fail to skip some events if there had been more events than the limit specified since the last request. ([\#570](https://github.com/matrix-org/synapse/issues/570)) Changes in synapse v0.13.1 (2016-02-10) ======================================= @@ -2828,176 +2828,176 @@ This version includes an upgrade of the schema, specifically adding an index to Changes: -- Improve general performance (PR #540, #543. \#544, #54, #549, #567) -- Change guest user ids to be incrementing integers (PR #550) -- Improve performance of public room list API (PR #552) -- Change profile API to omit keys rather than return null (PR #557) -- Add `/media/r0` endpoint prefix, which is equivalent to `/media/v1/` (PR #595) +- Improve general performance. ([\#540](https://github.com/matrix-org/synapse/issues/540), [\#543](https://github.com/matrix-org/synapse/issues/543). [\#544](https://github.com/matrix-org/synapse/issues/544), [\#54](https://github.com/matrix-org/synapse/issues/54), [\#549](https://github.com/matrix-org/synapse/issues/549), [\#567](https://github.com/matrix-org/synapse/issues/567)) +- Change guest user ids to be incrementing integers. ([\#550](https://github.com/matrix-org/synapse/issues/550)) +- Improve performance of public room list API. ([\#552](https://github.com/matrix-org/synapse/issues/552)) +- Change profile API to omit keys rather than return null. ([\#557](https://github.com/matrix-org/synapse/issues/557)) +- Add `/media/r0` endpoint prefix, which is equivalent to `/media/v1/`. ([\#595](https://github.com/matrix-org/synapse/issues/595)) Bug fixes: -- Fix bug with upgrading guest accounts where it would fail if you opened the registration email on a different device (PR #547) -- Fix bug where unread count could be wrong (PR #568) +- Fix bug with upgrading guest accounts where it would fail if you opened the registration email on a different device. ([\#547](https://github.com/matrix-org/synapse/issues/547)) +- Fix bug where unread count could be wrong. ([\#568](https://github.com/matrix-org/synapse/issues/568)) Changes in synapse v0.12.1-rc1 (2016-01-29) =========================================== Features: -- Add unread notification counts in `/sync` (PR #456) -- Add support for inviting 3pids in `/createRoom` (PR #460) -- Add ability for guest accounts to upgrade (PR #462) -- Add `/versions` API (PR #468) -- Add `event` to `/context` API (PR #492) -- Add specific error code for invalid user names in `/register` (PR #499) -- Add support for push badge counts (PR #507) -- Add support for non-guest users to peek in rooms using `/events` (PR #510) +- Add unread notification counts in `/sync`. ([\#456](https://github.com/matrix-org/synapse/issues/456)) +- Add support for inviting 3pids in `/createRoom`. ([\#460](https://github.com/matrix-org/synapse/issues/460)) +- Add ability for guest accounts to upgrade. ([\#462](https://github.com/matrix-org/synapse/issues/462)) +- Add `/versions` API. ([\#468](https://github.com/matrix-org/synapse/issues/468)) +- Add `event` to `/context` API. ([\#492](https://github.com/matrix-org/synapse/issues/492)) +- Add specific error code for invalid user names in `/register`. ([\#499](https://github.com/matrix-org/synapse/issues/499)) +- Add support for push badge counts. ([\#507](https://github.com/matrix-org/synapse/issues/507)) +- Add support for non-guest users to peek in rooms using `/events`. ([\#510](https://github.com/matrix-org/synapse/issues/510)) Changes: -- Change `/sync` so that guest users only get rooms they've joined (PR #469) -- Change to require unbanning before other membership changes (PR #501) -- Change default push rules to notify for all messages (PR #486) -- Change default push rules to not notify on membership changes (PR #514) -- Change default push rules in one to one rooms to only notify for events that are messages (PR #529) -- Change `/sync` to reject requests with a `from` query param (PR #512) -- Change server manhole to use SSH rather than telnet (PR #473) -- Change server to require AS users to be registered before use (PR #487) -- Change server not to start when ASes are invalidly configured (PR #494) -- Change server to require ID and `as_token` to be unique for AS's (PR #496) -- Change maximum pagination limit to 1000 (PR #497) +- Change `/sync` so that guest users only get rooms they've joined. ([\#469](https://github.com/matrix-org/synapse/issues/469)) +- Change to require unbanning before other membership changes. ([\#501](https://github.com/matrix-org/synapse/issues/501)) +- Change default push rules to notify for all messages. ([\#486](https://github.com/matrix-org/synapse/issues/486)) +- Change default push rules to not notify on membership changes. ([\#514](https://github.com/matrix-org/synapse/issues/514)) +- Change default push rules in one to one rooms to only notify for events that are messages. ([\#529](https://github.com/matrix-org/synapse/issues/529)) +- Change `/sync` to reject requests with a `from` query param. ([\#512](https://github.com/matrix-org/synapse/issues/512)) +- Change server manhole to use SSH rather than telnet. ([\#473](https://github.com/matrix-org/synapse/issues/473)) +- Change server to require AS users to be registered before use. ([\#487](https://github.com/matrix-org/synapse/issues/487)) +- Change server not to start when ASes are invalidly configured. ([\#494](https://github.com/matrix-org/synapse/issues/494)) +- Change server to require ID and `as_token` to be unique for AS's. ([\#496](https://github.com/matrix-org/synapse/issues/496)) +- Change maximum pagination limit to 1000. ([\#497](https://github.com/matrix-org/synapse/issues/497)) Bug fixes: -- Fix bug where `/sync` didn't return when something under the leave key changed (PR #461) -- Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop` (PR #464) -- Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail (PR #475) -- Fix bug where we occasionally still logged access tokens (PR #477) -- Fix bug where `/events` would always return immediately for guest users (PR #480) -- Fix bug where `/sync` unexpectedly returned old left rooms (PR #481) -- Fix enabling and disabling push rules (PR #498) -- Fix bug where `/register` returned 500 when given unicode username (PR #513) +- Fix bug where `/sync` didn't return when something under the leave key changed. ([\#461](https://github.com/matrix-org/synapse/issues/461)) +- Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop`. ([\#464](https://github.com/matrix-org/synapse/issues/464)) +- Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail. ([\#475](https://github.com/matrix-org/synapse/issues/475)) +- Fix bug where we occasionally still logged access tokens. ([\#477](https://github.com/matrix-org/synapse/issues/477)) +- Fix bug where `/events` would always return immediately for guest users. ([\#480](https://github.com/matrix-org/synapse/issues/480)) +- Fix bug where `/sync` unexpectedly returned old left rooms. ([\#481](https://github.com/matrix-org/synapse/issues/481)) +- Fix enabling and disabling push rules. ([\#498](https://github.com/matrix-org/synapse/issues/498)) +- Fix bug where `/register` returned 500 when given unicode username. ([\#513](https://github.com/matrix-org/synapse/issues/513)) Changes in synapse v0.12.0 (2016-01-04) ======================================= -- Expose `/login` under `r0` (PR #459) +- Expose `/login` under `r0`. ([\#459](https://github.com/matrix-org/synapse/issues/459)) Changes in synapse v0.12.0-rc3 (2015-12-23) =========================================== -- Allow guest accounts access to `/sync` (PR #455) -- Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. (PR #454) -- Include urls for room avatars in the response to `/publicRooms` (PR #453) -- Don't set a identicon as the avatar for a user when they register (PR #450) -- Add a `display_name` to third-party invites (PR #449) -- Send more information to the identity server for third-party invites so that it can send richer messages to the invitee (PR #446) -- Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests (PR #457) -- Fix a bug where synapse would always request the signing keys of remote servers even when the key was cached locally (PR #452) -- Fix 500 when pagination search results (PR #447) -- Fix a bug where synapse was leaking raw email address in third-party invites (PR #448) +- Allow guest accounts access to `/sync`. ([\#455](https://github.com/matrix-org/synapse/issues/455)) +- Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. ([\#454](https://github.com/matrix-org/synapse/issues/454)) +- Include urls for room avatars in the response to `/publicRooms`. ([\#453](https://github.com/matrix-org/synapse/issues/453)) +- Don't set a identicon as the avatar for a user when they register. ([\#450](https://github.com/matrix-org/synapse/issues/450)) +- Add a `display_name` to third-party invites. ([\#449](https://github.com/matrix-org/synapse/issues/449)) +- Send more information to the identity server for third-party invites so that it can send richer messages to the invitee. ([\#446](https://github.com/matrix-org/synapse/issues/446)) +- Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests. ([\#457](https://github.com/matrix-org/synapse/issues/457)) +- Fix a bug where synapse would always request the signing keys of remote servers even when the key was cached locally. ([\#452](https://github.com/matrix-org/synapse/issues/452)) +- Fix 500 when pagination search results. ([\#447](https://github.com/matrix-org/synapse/issues/447)) +- Fix a bug where synapse was leaking raw email address in third-party invites. ([\#448](https://github.com/matrix-org/synapse/issues/448)) Changes in synapse v0.12.0-rc2 (2015-12-14) =========================================== -- Add caches for whether rooms have been forgotten by a user (PR #434) -- Remove instructions to use `--process-dependency-link` since all of the dependencies of synapse are on PyPI (PR #436) -- Parallelise the processing of `/sync` requests (PR #437) -- Fix race updating presence in `/events` (PR #444) -- Fix bug back-populating search results (PR #441) -- Fix bug calculating state in `/sync` requests (PR #442) +- Add caches for whether rooms have been forgotten by a user. ([\#434](https://github.com/matrix-org/synapse/issues/434)) +- Remove instructions to use `--process-dependency-link` since all of the dependencies of synapse are on PyPI. ([\#436](https://github.com/matrix-org/synapse/issues/436)) +- Parallelise the processing of `/sync` requests. ([\#437](https://github.com/matrix-org/synapse/issues/437)) +- Fix race updating presence in `/events`. ([\#444](https://github.com/matrix-org/synapse/issues/444)) +- Fix bug back-populating search results. ([\#441](https://github.com/matrix-org/synapse/issues/441)) +- Fix bug calculating state in `/sync` requests. ([\#442](https://github.com/matrix-org/synapse/issues/442)) Changes in synapse v0.12.0-rc1 (2015-12-10) =========================================== -- Host the client APIs released as r0 by on paths prefixed by `/_matrix/client/r0`. (PR #430, PR #415, PR #400) +- Host the client APIs released as r0 by on paths prefixed by `/_matrix/client/r0`. ([\#430](https://github.com/matrix-org/synapse/issues/430), [\#415](https://github.com/matrix-org/synapse/issues/415), [\#400](https://github.com/matrix-org/synapse/issues/400)) - Updates the client APIs to match r0 of the matrix specification. - - All APIs return events in the new event format, old APIs also include the fields needed to parse the event using the old format for compatibility. (PR #402) - - Search results are now given as a JSON array rather than a JSON object (PR #405) - - Miscellaneous changes to search (PR #403, PR #406, PR #412) - - Filter JSON objects may now be passed as query parameters to `/sync` (PR #431) - - Fix implementation of `/admin/whois` (PR #418) - - Only include the rooms that user has left in `/sync` if the client requests them in the filter (PR #423) - - Don't push for `m.room.message` by default (PR #411) - - Add API for setting per account user data (PR #392) - - Allow users to forget rooms (PR #385) + - All APIs return events in the new event format, old APIs also include the fields needed to parse the event using the old format for compatibility. ([\#402](https://github.com/matrix-org/synapse/issues/402)) + - Search results are now given as a JSON array rather than a JSON object. ([\#405](https://github.com/matrix-org/synapse/issues/405)) + - Miscellaneous changes to search. ([\#403](https://github.com/matrix-org/synapse/issues/403), [\#406](https://github.com/matrix-org/synapse/issues/406), [\#412](https://github.com/matrix-org/synapse/issues/412)) + - Filter JSON objects may now be passed as query parameters to `/sync`. ([\#431](https://github.com/matrix-org/synapse/issues/431)) + - Fix implementation of `/admin/whois`. ([\#418](https://github.com/matrix-org/synapse/issues/418)) + - Only include the rooms that user has left in `/sync` if the client requests them in the filter. ([\#423](https://github.com/matrix-org/synapse/issues/423)) + - Don't push for `m.room.message` by default. ([\#411](https://github.com/matrix-org/synapse/issues/411)) + - Add API for setting per account user data. ([\#392](https://github.com/matrix-org/synapse/issues/392)) + - Allow users to forget rooms. ([\#385](https://github.com/matrix-org/synapse/issues/385)) - Performance improvements and monitoring: - - Add per-request counters for CPU time spent on the main python thread. (PR #421, PR #420) - - Add per-request counters for time spent in the database (PR #429) - - Make state updates in the C+S API idempotent (PR #416) - - Only fire `user_joined_room` if the user has actually joined. (PR #410) - - Reuse a single http client, rather than creating new ones (PR #413) -- Fixed a bug upgrading from older versions of synapse on postgresql (PR #417) + - Add per-request counters for CPU time spent on the main python thread. ([\#421](https://github.com/matrix-org/synapse/issues/421), [\#420](https://github.com/matrix-org/synapse/issues/420)) + - Add per-request counters for time spent in the database. ([\#429](https://github.com/matrix-org/synapse/issues/429)) + - Make state updates in the C+S API idempotent. ([\#416](https://github.com/matrix-org/synapse/issues/416)) + - Only fire `user_joined_room` if the user has actually joined. ([\#410](https://github.com/matrix-org/synapse/issues/410)) + - Reuse a single http client, rather than creating new ones. ([\#413](https://github.com/matrix-org/synapse/issues/413)) +- Fixed a bug upgrading from older versions of synapse on postgresql. ([\#417](https://github.com/matrix-org/synapse/issues/417)) Changes in synapse v0.11.1 (2015-11-20) ======================================= -- Add extra options to search API (PR #394) -- Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to resurrected servers, even when they were receiving traffic from them (PR #393) -- Don't advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391) -- Change /v2 sync API to rename `private_user_data` to `account_data` (PR #386) -- Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object (PR #389) +- Add extra options to search API. ([\#394](https://github.com/matrix-org/synapse/issues/394)) +- Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to resurrected servers, even when they were receiving traffic from them. ([\#393](https://github.com/matrix-org/synapse/issues/393)) +- Don't advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows. ([\#391](https://github.com/matrix-org/synapse/issues/391)) +- Change /v2 sync API to rename `private_user_data` to `account_data`. ([\#386](https://github.com/matrix-org/synapse/issues/386)) +- Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object. ([\#389](https://github.com/matrix-org/synapse/issues/389)) Changes in synapse v0.11.0-r2 (2015-11-19) ========================================== -- Fix bug in database port script (PR #387) +- Fix bug in database port script. ([\#387](https://github.com/matrix-org/synapse/issues/387)) Changes in synapse v0.11.0-r1 (2015-11-18) ========================================== -- Retry and fail federation requests more aggressively for requests that block client side requests (PR #384) +- Retry and fail federation requests more aggressively for requests that block client side requests. ([\#384](https://github.com/matrix-org/synapse/issues/384)) Changes in synapse v0.11.0 (2015-11-17) ======================================= -- Change CAS login API (PR #349) +- Change CAS login API. ([\#349](https://github.com/matrix-org/synapse/issues/349)) Changes in synapse v0.11.0-rc2 (2015-11-13) =========================================== -- Various changes to /sync API response format (PR #373) -- Fix regression when setting display name in newly joined room over federation (PR #368) -- Fix problem where /search was slow when using SQLite (PR #366) +- Various changes to /sync API response format. ([\#373](https://github.com/matrix-org/synapse/issues/373)) +- Fix regression when setting display name in newly joined room over federation. ([\#368](https://github.com/matrix-org/synapse/issues/368)) +- Fix problem where /search was slow when using SQLite. ([\#366](https://github.com/matrix-org/synapse/issues/366)) Changes in synapse v0.11.0-rc1 (2015-11-11) =========================================== -- Add Search API (PR #307, #324, #327, #336, #350, #359) -- Add `archived` state to v2 /sync API (PR #316) -- Add ability to reject invites (PR #317) -- Add config option to disable password login (PR #322) -- Add the login fallback API (PR #330) -- Add room context API (PR #334) -- Add room tagging support (PR #335) -- Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341) -- Change retry schedule for application services (PR #320) -- Change retry schedule for remote servers (PR #340) -- Fix bug where we hosted static content in the incorrect place (PR #329) -- Fix bug where we didn't increment retry interval for remote servers (PR #343) +- Add Search API. ([\#307](https://github.com/matrix-org/synapse/issues/307), [\#324](https://github.com/matrix-org/synapse/issues/324), [\#327](https://github.com/matrix-org/synapse/issues/327), [\#336](https://github.com/matrix-org/synapse/issues/336), [\#350](https://github.com/matrix-org/synapse/issues/350), [\#359](https://github.com/matrix-org/synapse/issues/359)) +- Add `archived` state to v2 /sync API. ([\#316](https://github.com/matrix-org/synapse/issues/316)) +- Add ability to reject invites. ([\#317](https://github.com/matrix-org/synapse/issues/317)) +- Add config option to disable password login. ([\#322](https://github.com/matrix-org/synapse/issues/322)) +- Add the login fallback API. ([\#330](https://github.com/matrix-org/synapse/issues/330)) +- Add room context API. ([\#334](https://github.com/matrix-org/synapse/issues/334)) +- Add room tagging support. ([\#335](https://github.com/matrix-org/synapse/issues/335)) +- Update v2 /sync API to match spec. ([\#305](https://github.com/matrix-org/synapse/issues/305), [\#316](https://github.com/matrix-org/synapse/issues/316), [\#321](https://github.com/matrix-org/synapse/issues/321), [\#332](https://github.com/matrix-org/synapse/issues/332), [\#337](https://github.com/matrix-org/synapse/issues/337), [\#341](https://github.com/matrix-org/synapse/issues/341)) +- Change retry schedule for application services. ([\#320](https://github.com/matrix-org/synapse/issues/320)) +- Change retry schedule for remote servers. ([\#340](https://github.com/matrix-org/synapse/issues/340)) +- Fix bug where we hosted static content in the incorrect place. ([\#329](https://github.com/matrix-org/synapse/issues/329)) +- Fix bug where we didn't increment retry interval for remote servers. ([\#343](https://github.com/matrix-org/synapse/issues/343)) Changes in synapse v0.10.1-rc1 (2015-10-15) =========================================== -- Add support for CAS, thanks to Steven Hammerton (PR #295, #296) -- Add support for using macaroons for `access_token` (PR #256, #229) -- Add support for `m.room.canonical_alias` (PR #287) -- Add support for viewing the history of rooms that they have left. (PR #276, #294) -- Add support for refresh tokens (PR #240) -- Add flag on creation which disables federation of the room (PR #279) -- Add some room state to invites. (PR #275) -- Atomically persist events when joining a room over federation (PR #283) -- Change default history visibility for private rooms (PR #271) -- Allow users to redact their own sent events (PR #262) -- Use tox for tests (PR #247) -- Split up syutil into separate libraries (PR #243) +- Add support for CAS, thanks to Steven Hammerton. ([\#295](https://github.com/matrix-org/synapse/issues/295), [\#296](https://github.com/matrix-org/synapse/issues/296)) +- Add support for using macaroons for `access_token`. ([\#256](https://github.com/matrix-org/synapse/issues/256), [\#229](https://github.com/matrix-org/synapse/issues/229)) +- Add support for `m.room.canonical_alias`. ([\#287](https://github.com/matrix-org/synapse/issues/287)) +- Add support for viewing the history of rooms that they have left. ([\#276](https://github.com/matrix-org/synapse/issues/276), [\#294](https://github.com/matrix-org/synapse/issues/294)) +- Add support for refresh tokens. ([\#240](https://github.com/matrix-org/synapse/issues/240)) +- Add flag on creation which disables federation of the room. ([\#279](https://github.com/matrix-org/synapse/issues/279)) +- Add some room state to invites. ([\#275](https://github.com/matrix-org/synapse/issues/275)) +- Atomically persist events when joining a room over federation. ([\#283](https://github.com/matrix-org/synapse/issues/283)) +- Change default history visibility for private rooms. ([\#271](https://github.com/matrix-org/synapse/issues/271)) +- Allow users to redact their own sent events. ([\#262](https://github.com/matrix-org/synapse/issues/262)) +- Use tox for tests. ([\#247](https://github.com/matrix-org/synapse/issues/247)) +- Split up syutil into separate libraries. ([\#243](https://github.com/matrix-org/synapse/issues/243)) Changes in synapse v0.10.0-r2 (2015-09-16) ========================================== - Fix bug where we always fetched remote server signing keys instead of using ones in our cache. - Fix adding threepids to an existing account. -- Fix bug with invinting over federation where remote server was already in the room. (PR #281, SYN-392) +- Fix bug with invinting over federation where remote server was already in the room. ([\#281](https://github.com/matrix-org/synapse/issues/281), SYN-392) Changes in synapse v0.10.0-r1 (2015-09-08) ========================================== @@ -3023,20 +3023,20 @@ Changes in synapse v0.10.0-rc5 (2015-08-27) Changes in synapse v0.10.0-rc4 (2015-08-27) =========================================== -- Allow UTF-8 filenames for upload. (PR #259) +- Allow UTF-8 filenames for upload. ([\#259](https://github.com/matrix-org/synapse/issues/259)) Changes in synapse v0.10.0-rc3 (2015-08-25) =========================================== -- Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. (PR #250) -- Allow `--config-path` to specify a directory, causing synapse to use all `*.yaml` files in the directory as config files. (PR #249) -- Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. (PR #245) +- Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. ([\#250](https://github.com/matrix-org/synapse/issues/250)) +- Allow `--config-path` to specify a directory, causing synapse to use all `*.yaml` files in the directory as config files. ([\#249](https://github.com/matrix-org/synapse/issues/249)) +- Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. ([\#245](https://github.com/matrix-org/synapse/issues/245)) - Add helper utility to synapse to read and parse the config files and extract the value of a given key. For example: $ python -m synapse.config read server_name -c homeserver.yaml localhost - (PR #246) + . ([\#246](https://github.com/matrix-org/synapse/issues/246)) Changes in synapse v0.10.0-rc2 (2015-08-24) =========================================== @@ -3051,37 +3051,37 @@ Also see v0.9.4-rc1 changelog, which has been amalgamated into this release. General: -- Upgrade to Twisted 15 (PR #173) -- Add support for serving and fetching encryption keys over federation. (PR #208) -- Add support for logging in with email address (PR #234) -- Add support for new `m.room.canonical_alias` event. (PR #233) +- Upgrade to Twisted 15. ([\#173](https://github.com/matrix-org/synapse/issues/173)) +- Add support for serving and fetching encryption keys over federation. ([\#208](https://github.com/matrix-org/synapse/issues/208)) +- Add support for logging in with email address. ([\#234](https://github.com/matrix-org/synapse/issues/234)) +- Add support for new `m.room.canonical_alias` event. ([\#233](https://github.com/matrix-org/synapse/issues/233)) - Change synapse to treat user IDs case insensitively during registration and login. (If two users already exist with case insensitive matching user ids, synapse will continue to require them to specify their user ids exactly.) -- Error if a user tries to register with an email already in use. (PR #211) -- Add extra and improve existing caches (PR #212, #219, #226, #228) -- Batch various storage request (PR #226, #228) -- Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service (PR #230) -- Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232) -- Add support for AS's to use `v2_alpha` registration API (PR #210) +- Error if a user tries to register with an email already in use. ([\#211](https://github.com/matrix-org/synapse/issues/211)) +- Add extra and improve existing caches. ([\#212](https://github.com/matrix-org/synapse/issues/212), [\#219](https://github.com/matrix-org/synapse/issues/219), [\#226](https://github.com/matrix-org/synapse/issues/226), [\#228](https://github.com/matrix-org/synapse/issues/228)) +- Batch various storage request. ([\#226](https://github.com/matrix-org/synapse/issues/226), [\#228](https://github.com/matrix-org/synapse/issues/228)) +- Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service. ([\#230](https://github.com/matrix-org/synapse/issues/230)) +- Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. ([\#232](https://github.com/matrix-org/synapse/issues/232)) +- Add support for AS's to use `v2_alpha` registration API. ([\#210](https://github.com/matrix-org/synapse/issues/210)) Configuration: -- Add `--generate-keys` that will generate any missing cert and key files in the configuration files. This is equivalent to running `--generate-config` on an existing configuration file. (PR #220) -- `--generate-config` now no longer requires a `--server-name` parameter when used on existing configuration files. (PR #220) -- Add `--print-pidfile` flag that controls the printing of the pid to stdout of the demonised process. (PR #213) +- Add `--generate-keys` that will generate any missing cert and key files in the configuration files. This is equivalent to running `--generate-config` on an existing configuration file. ([\#220](https://github.com/matrix-org/synapse/issues/220)) +- `--generate-config` now no longer requires a `--server-name` parameter when used on existing configuration files. ([\#220](https://github.com/matrix-org/synapse/issues/220)) +- Add `--print-pidfile` flag that controls the printing of the pid to stdout of the demonised process. ([\#213](https://github.com/matrix-org/synapse/issues/213)) Media Repository: -- Fix bug where we picked a lower resolution image than requested. (PR #205) -- Add support for specifying if a the media repository should dynamically thumbnail images or not. (PR #206) +- Fix bug where we picked a lower resolution image than requested. ([\#205](https://github.com/matrix-org/synapse/issues/205)) +- Add support for specifying if a the media repository should dynamically thumbnail images or not. ([\#206](https://github.com/matrix-org/synapse/issues/206)) Metrics: -- Add statistics from the reactor to the metrics API. (PR #224, #225) +- Add statistics from the reactor to the metrics API. ([\#224](https://github.com/matrix-org/synapse/issues/224), [\#225](https://github.com/matrix-org/synapse/issues/225)) Demo Homeservers: -- Fix starting the demo homeservers without rate-limiting enabled. (PR #182) -- Fix enabling registration on demo homeservers (PR #223) +- Fix starting the demo homeservers without rate-limiting enabled. ([\#182](https://github.com/matrix-org/synapse/issues/182)) +- Fix enabling registration on demo homeservers. ([\#223](https://github.com/matrix-org/synapse/issues/223)) Changes in synapse v0.9.4-rc1 (2015-07-21) ========================================== @@ -3089,13 +3089,13 @@ Changes in synapse v0.9.4-rc1 (2015-07-21) General: - Add basic implementation of receipts. (SPEC-99) -- Add support for configuration presets in room creation API. (PR #203) +- Add support for configuration presets in room creation API. ([\#203](https://github.com/matrix-org/synapse/issues/203)) - Add auth event that limits the visibility of history for new users. (SPEC-134) -- Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!) -- Add client side key management APIs for end to end encryption. (PR #198) +- Add SAML2 login/registration support. Thanks Muthu Subramanian! ([\#201](https://github.com/matrix-org/synapse/issues/201)) +- Add client side key management APIs for end to end encryption. ([\#198](https://github.com/matrix-org/synapse/issues/198)) - Change power level semantics so that you cannot kick, ban or change power levels of users that have equal or greater power level than you. (SYN-192) -- Improve performance by bulk inserting events where possible. (PR #193) -- Improve performance by bulk verifying signatures where possible. (PR #194) +- Improve performance by bulk inserting events where possible. ([\#193](https://github.com/matrix-org/synapse/issues/193)) +- Improve performance by bulk verifying signatures where possible. ([\#194](https://github.com/matrix-org/synapse/issues/194)) Configuration: diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 4ae2fcfee3a5..57cac7ed16ef 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -66,7 +66,7 @@ Of their installation methods, we recommend ```shell pip install --user pipx -pipx install poetry +pipx install poetry==1.5.1 # Problems with Poetry 1.6, see https://github.com/matrix-org/synapse/issues/16147 ``` but see poetry's [installation instructions](https://python-poetry.org/docs/#installation) diff --git a/docs/development/synapse_architecture/streams.md b/docs/development/synapse_architecture/streams.md index bee0b8a8c0a2..67d92acfa1fc 100644 --- a/docs/development/synapse_architecture/streams.md +++ b/docs/development/synapse_architecture/streams.md @@ -51,17 +51,24 @@ will be inserted with that ID. For any given stream reader (including writers themselves), we may define a per-writer current stream ID: -> The current stream ID _for a writer W_ is the largest stream ID such that +> A current stream ID _for a writer W_ is the largest stream ID such that > all transactions added by W with equal or smaller ID have completed. Similarly, there is a "linear" notion of current stream ID: -> The "linear" current stream ID is the largest stream ID such that +> A "linear" current stream ID is the largest stream ID such that > all facts (added by any writer) with equal or smaller ID have completed. Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs. Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates. +The above definition does not give a unique current stream ID, in fact there can +be a range of current stream IDs. Synapse uses both the minimum and maximum IDs +for different purposes. Most often the maximum is used, as its generally +beneficial for workers to advance their IDs as soon as possible. However, the +minimum is used in situations where e.g. another worker is going to wait until +the stream advances past a position. + **NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID. For single-writer streams, the per-writer current ID and the linear current ID are the same. @@ -114,7 +121,7 @@ Writers need to track: - track their current position (i.e. its own per-writer stream ID). - their facts currently awaiting completion. -At startup, +At startup, - the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and - there are no facts awaiting completion. diff --git a/docs/modules/add_extra_fields_to_client_events_unsigned.md b/docs/modules/add_extra_fields_to_client_events_unsigned.md new file mode 100644 index 000000000000..c4fd19bde0d2 --- /dev/null +++ b/docs/modules/add_extra_fields_to_client_events_unsigned.md @@ -0,0 +1,32 @@ +# Add extra fields to client events unsigned section callbacks + +_First introduced in Synapse v1.96.0_ + +This callback allows modules to add extra fields to the unsigned section of +events when they get sent down to clients. + +These get called *every* time an event is to be sent to clients, so care should +be taken to ensure with respect to performance. + +### API + +To register the callback, use +`register_add_extra_fields_to_unsigned_client_event_callbacks` on the +`ModuleApi`. + +The callback should be of the form + +```python +async def add_field_to_unsigned( + event: EventBase, +) -> JsonDict: +``` + +where the extra fields to add to the event's unsigned section is returned. +(Modules must not attempt to modify the `event` directly). + +This cannot be used to alter the "core" fields in the unsigned section emitted +by Synapse itself. + +If multiple such callbacks try to add the same field to an event's unsigned +section, the last-registered callback wins. diff --git a/docs/opentracing.md b/docs/opentracing.md index abb94b565f21..bf4887416033 100644 --- a/docs/opentracing.md +++ b/docs/opentracing.md @@ -51,6 +51,11 @@ docker run -d --name jaeger \ jaegertracing/all-in-one:1 ``` +By default, Synapse will publish traces to Jaeger on localhost. +If Jaeger is hosted elsewhere, point Synapse to the correct host by setting +`opentracing.jaeger_config.local_agent.reporting_host` [in the Synapse configuration](usage/configuration/config_documentation.md#opentracing-1) +or by setting the `JAEGER_AGENT_HOST` environment variable to the desired address. + Latest documentation is probably at https://www.jaegertracing.io/docs/latest/getting-started. diff --git a/docs/postgres.md b/docs/postgres.md index 02d4b9b162f6..ad7c6a0738d3 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -66,7 +66,7 @@ database: args: user: password: - database: + dbname: host: cp_min: 5 cp_max: 10 diff --git a/docs/upgrade.md b/docs/upgrade.md index ba2f7703bc75..329c9c778713 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,15 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +Generally Synapse database schemas are compatible across multiple versions, once +a version of Synapse is deployed you may not be able to rollback automatically. +The following table gives the version ranges and the earliest version they can +be rolled back to. E.g. Synapse versions v1.58.0 through v1.61.1 can be rolled +back safely to v1.57.0, but starting with v1.62.0 it is only safe to rollback to +v1.61.0. + + + # Upgrading to v1.93.0 ## Minimum supported Rust version diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 92e00c138086..7c4e742cd5d5 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -230,6 +230,13 @@ Example configuration: presence: enabled: false ``` + +`enabled` can also be set to a special value of "untracked" which ignores updates +received via clients and federation, while still accepting updates from the +[module API](../../modules/index.md). + +*The "untracked" option was added in Synapse 1.96.0.* + --- ### `require_auth_for_profile_requests` @@ -1440,7 +1447,7 @@ database: args: user: synapse_user password: secretpassword - database: synapse + dbname: synapse host: localhost port: 5432 cp_min: 5 @@ -1519,7 +1526,7 @@ databases: args: user: synapse_user password: secretpassword - database: synapse_main + dbname: synapse_main host: localhost port: 5432 cp_min: 5 @@ -1532,7 +1539,7 @@ databases: args: user: synapse_user password: secretpassword - database: synapse_state + dbname: synapse_state host: localhost port: 5432 cp_min: 5 @@ -1746,6 +1753,19 @@ rc_third_party_invite: burst_count: 10 ``` --- +### `rc_media_create` + +This option ratelimits creation of MXC URIs via the `/_matrix/media/v1/create` +endpoint based on the account that's creating the media. Defaults to +`per_second: 10`, `burst_count: 50`. + +Example configuration: +```yaml +rc_media_create: + per_second: 10 + burst_count: 50 +``` +--- ### `rc_federation` Defines limits on federation requests. @@ -1807,6 +1827,27 @@ Example configuration: media_store_path: "DATADIR/media_store" ``` --- +### `max_pending_media_uploads` + +How many *pending media uploads* can a given user have? A pending media upload +is a created MXC URI that (a) is not expired (the `unused_expires_at` timestamp +has not passed) and (b) the media has not yet been uploaded for. Defaults to 5. + +Example configuration: +```yaml +max_pending_media_uploads: 5 +``` +--- +### `unused_expiration_time` + +How long to wait in milliseconds before expiring created media IDs. Defaults to +"24h" + +Example configuration: +```yaml +unused_expiration_time: "1h" +``` +--- ### `media_storage_providers` Media storage providers allow media to be stored in different @@ -3797,62 +3838,160 @@ enable_room_list_search: false --- ### `alias_creation_rules` -The `alias_creation_rules` option controls who is allowed to create aliases -on this server. +The `alias_creation_rules` option allows server admins to prevent unwanted +alias creation on this server. + +This setting is an optional list of 0 or more rules. By default, no list is +provided, meaning that all alias creations are permitted. -The format of this option is a list of rules that contain globs that -match against user_id, room_id and the new alias (fully qualified with -server name). The action in the first rule that matches is taken, -which can currently either be "allow" or "deny". +Otherwise, requests to create aliases are matched against each rule in order. +The first rule that matches decides if the request is allowed or denied. If no +rule matches, the request is denied. In particular, this means that configuring +an empty list of rules will deny every alias creation request. -Missing user_id/room_id/alias fields default to "*". +Each rule is a YAML object containing four fields, each of which is an optional string: -If no rules match the request is denied. An empty list means no one -can create aliases. +* `user_id`: a glob pattern that matches against the creator of the alias. +* `alias`: a glob pattern that matches against the alias being created. +* `room_id`: a glob pattern that matches against the room ID the alias is being pointed at. +* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. -Options for the rules include: -* `user_id`: Matches against the creator of the alias. Defaults to "*". -* `alias`: Matches against the alias being created. Defaults to "*". -* `room_id`: Matches against the room ID the alias is being pointed at. Defaults to "*" -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +Each of the glob patterns is optional, defaulting to `*` ("match anything"). +Note that the patterns match against fully qualified IDs, e.g. against +`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead +of `alice`, `room` and `abcedgghijk`. Example configuration: + +```yaml +# No rule list specified. All alias creations are allowed. +# This is the default behaviour. +alias_creation_rules: +``` + +```yaml +# A list of one rule which allows everything. +# This has the same effect as the previous example. +alias_creation_rules: + - "action": "allow" +``` + +```yaml +# An empty list of rules. All alias creations are denied. +alias_creation_rules: [] +``` + ```yaml +# A list of one rule which denies everything. +# This has the same effect as the previous example. alias_creation_rules: - - user_id: "bad_user" - alias: "spammy_alias" - room_id: "*" + - "action": "deny" +``` + +```yaml +# Prevent a specific user from creating aliases. +# Allow other users to create any alias +alias_creation_rules: + - user_id: "@bad_user:example.com" action: deny + + - action: allow ``` + +```yaml +# Prevent aliases being created which point to a specific room. +alias_creation_rules: + - room_id: "!forbiddenRoom:example.com" + action: deny + + - action: allow +``` + --- ### `room_list_publication_rules` -The `room_list_publication_rules` option controls who can publish and -which rooms can be published in the public room list. +The `room_list_publication_rules` option allows server admins to prevent +unwanted entries from being published in the public room list. The format of this option is the same as that for -`alias_creation_rules`. +[`alias_creation_rules`](#alias_creation_rules): an optional list of 0 or more +rules. By default, no list is provided, meaning that all rooms may be +published to the room list. + +Otherwise, requests to publish a room are matched against each rule in order. +The first rule that matches decides if the request is allowed or denied. If no +rule matches, the request is denied. In particular, this means that configuring +an empty list of rules will deny every alias creation request. + +Each rule is a YAML object containing four fields, each of which is an optional string: -If the room has one or more aliases associated with it, only one of -the aliases needs to match the alias rule. If there are no aliases -then only rules with `alias: *` match. +* `user_id`: a glob pattern that matches against the user publishing the room. +* `alias`: a glob pattern that matches against one of published room's aliases. + - If the room has no aliases, the alias match fails unless `alias` is unspecified or `*`. + - If the room has exactly one alias, the alias match succeeds if the `alias` pattern matches that alias. + - If the room has two or more aliases, the alias match succeeds if the pattern matches at least one of the aliases. +* `room_id`: a glob pattern that matches against the room ID of the room being published. +* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. -If no rules match the request is denied. An empty list means no one -can publish rooms. +Each of the glob patterns is optional, defaulting to `*` ("match anything"). +Note that the patterns match against fully qualified IDs, e.g. against +`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead +of `alice`, `room` and `abcedgghijk`. -Options for the rules include: -* `user_id`: Matches against the creator of the alias. Defaults to "*". -* `alias`: Matches against any current local or canonical aliases associated with the room. Defaults to "*". -* `room_id`: Matches against the room ID being published. Defaults to "*". -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: + +```yaml +# No rule list specified. Anyone may publish any room to the public list. +# This is the default behaviour. +room_list_publication_rules: +``` + +```yaml +# A list of one rule which allows everything. +# This has the same effect as the previous example. +room_list_publication_rules: + - "action": "allow" +``` + +```yaml +# An empty list of rules. No-one may publish to the room list. +room_list_publication_rules: [] +``` + ```yaml +# A list of one rule which denies everything. +# This has the same effect as the previous example. room_list_publication_rules: - - user_id: "*" - alias: "*" - room_id: "*" - action: allow + - "action": "deny" +``` + +```yaml +# Prevent a specific user from publishing rooms. +# Allow other users to publish anything. +room_list_publication_rules: + - user_id: "@bad_user:example.com" + action: deny + + - action: allow +``` + +```yaml +# Prevent publication of a specific room. +room_list_publication_rules: + - room_id: "!forbiddenRoom:example.com" + action: deny + + - action: allow +``` + +```yaml +# Prevent publication of rooms with at least one alias containing the word "potato". +room_list_publication_rules: + - alias: "#*potato*:example.com" + action: deny + + - action: allow ``` --- @@ -4114,6 +4253,9 @@ outbound_federation_restricted_to: Also see the [worker documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) for more info. + +_Added in Synapse 1.89.0._ + --- ### `run_background_tasks_on` diff --git a/mypy.ini b/mypy.ini index fdfe9432fcc7..1a2b9ea410b0 100644 --- a/mypy.ini +++ b/mypy.ini @@ -37,8 +37,8 @@ files = build_rust.py [mypy-synapse.metrics._reactor_metrics] -# This module imports select.epoll. That exists on Linux, but doesn't on macOS. -# See https://github.com/matrix-org/synapse/pull/11771. +# This module pokes at the internals of OS-specific classes, to appease mypy +# on different systems we add additional ignores. warn_unused_ignores = False [mypy-synapse.util.caches.treecache] diff --git a/poetry.lock b/poetry.lock index a8912802772f..dd99f39ab723 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "alabaster" @@ -162,33 +162,29 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.9.1" +version = "23.10.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-23.9.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301"}, - {file = "black-23.9.1-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100"}, - {file = "black-23.9.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71"}, - {file = "black-23.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7"}, - {file = "black-23.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80"}, - {file = "black-23.9.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f"}, - {file = "black-23.9.1-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe"}, - {file = "black-23.9.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186"}, - {file = "black-23.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f"}, - {file = "black-23.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300"}, - {file = "black-23.9.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948"}, - {file = "black-23.9.1-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855"}, - {file = "black-23.9.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204"}, - {file = "black-23.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377"}, - {file = "black-23.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573"}, - {file = "black-23.9.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c"}, - {file = "black-23.9.1-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325"}, - {file = "black-23.9.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393"}, - {file = "black-23.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9"}, - {file = "black-23.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f"}, - {file = "black-23.9.1-py3-none-any.whl", hash = "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9"}, - {file = "black-23.9.1.tar.gz", hash = "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d"}, + {file = "black-23.10.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:ec3f8e6234c4e46ff9e16d9ae96f4ef69fa328bb4ad08198c8cee45bb1f08c69"}, + {file = "black-23.10.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:1b917a2aa020ca600483a7b340c165970b26e9029067f019e3755b56e8dd5916"}, + {file = "black-23.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c74de4c77b849e6359c6f01987e94873c707098322b91490d24296f66d067dc"}, + {file = "black-23.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:7b4d10b0f016616a0d93d24a448100adf1699712fb7a4efd0e2c32bbb219b173"}, + {file = "black-23.10.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b15b75fc53a2fbcac8a87d3e20f69874d161beef13954747e053bca7a1ce53a0"}, + {file = "black-23.10.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:e293e4c2f4a992b980032bbd62df07c1bcff82d6964d6c9496f2cd726e246ace"}, + {file = "black-23.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d56124b7a61d092cb52cce34182a5280e160e6aff3137172a68c2c2c4b76bcb"}, + {file = "black-23.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f157a8945a7b2d424da3335f7ace89c14a3b0625e6593d21139c2d8214d55ce"}, + {file = "black-23.10.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:cfcce6f0a384d0da692119f2d72d79ed07c7159879d0bb1bb32d2e443382bf3a"}, + {file = "black-23.10.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:33d40f5b06be80c1bbce17b173cda17994fbad096ce60eb22054da021bf933d1"}, + {file = "black-23.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:840015166dbdfbc47992871325799fd2dc0dcf9395e401ada6d88fe11498abad"}, + {file = "black-23.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:037e9b4664cafda5f025a1728c50a9e9aedb99a759c89f760bd83730e76ba884"}, + {file = "black-23.10.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:7cb5936e686e782fddb1c73f8aa6f459e1ad38a6a7b0e54b403f1f05a1507ee9"}, + {file = "black-23.10.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:7670242e90dc129c539e9ca17665e39a146a761e681805c54fbd86015c7c84f7"}, + {file = "black-23.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed45ac9a613fb52dad3b61c8dea2ec9510bf3108d4db88422bacc7d1ba1243d"}, + {file = "black-23.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:6d23d7822140e3fef190734216cefb262521789367fbdc0b3f22af6744058982"}, + {file = "black-23.10.1-py3-none-any.whl", hash = "sha256:d431e6739f727bb2e0495df64a6c7a5310758e87505f5f8cde9ff6c0f2d7e4fe"}, + {file = "black-23.10.1.tar.gz", hash = "sha256:1f8ce316753428ff68749c65a5f7844631aa18c8679dfd3ca9dc1a289979c258"}, ] [package.dependencies] @@ -420,19 +416,6 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -[[package]] -name = "click-default-group" -version = "1.2.2" -description = "Extends click.Group to invoke a command without explicit subcommand name" -optional = false -python-versions = "*" -files = [ - {file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"}, -] - -[package.dependencies] -click = "*" - [[package]] name = "colorama" version = "0.4.6" @@ -471,34 +454,34 @@ files = [ [[package]] name = "cryptography" -version = "41.0.4" +version = "41.0.6" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, - {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, - {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, - {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, + {file = "cryptography-41.0.6-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:0f27acb55a4e77b9be8d550d762b0513ef3fc658cd3eb15110ebbcbd626db12c"}, + {file = "cryptography-41.0.6-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ae236bb8760c1e55b7a39b6d4d32d2279bc6c7c8500b7d5a13b6fb9fc97be35b"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afda76d84b053923c27ede5edc1ed7d53e3c9f475ebaf63c68e69f1403c405a8"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da46e2b5df770070412c46f87bac0849b8d685c5f2679771de277a422c7d0b86"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ff369dd19e8fe0528b02e8df9f2aeb2479f89b1270d90f96a63500afe9af5cae"}, + {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b648fe2a45e426aaee684ddca2632f62ec4613ef362f4d681a9a6283d10e079d"}, + {file = "cryptography-41.0.6-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5daeb18e7886a358064a68dbcaf441c036cbdb7da52ae744e7b9207b04d3908c"}, + {file = "cryptography-41.0.6-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:068bc551698c234742c40049e46840843f3d98ad7ce265fd2bd4ec0d11306596"}, + {file = "cryptography-41.0.6-cp37-abi3-win32.whl", hash = "sha256:2132d5865eea673fe6712c2ed5fb4fa49dba10768bb4cc798345748380ee3660"}, + {file = "cryptography-41.0.6-cp37-abi3-win_amd64.whl", hash = "sha256:48783b7e2bef51224020efb61b42704207dde583d7e371ef8fc2a5fb6c0aabc7"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8efb2af8d4ba9dbc9c9dd8f04d19a7abb5b49eab1f3694e7b5a16a5fc2856f5c"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5a550dc7a3b50b116323e3d376241829fd326ac47bc195e04eb33a8170902a9"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:85abd057699b98fce40b41737afb234fef05c67e116f6f3650782c10862c43da"}, + {file = "cryptography-41.0.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f39812f70fc5c71a15aa3c97b2bbe213c3f2a460b79bd21c40d033bb34a9bf36"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:742ae5e9a2310e9dade7932f9576606836ed174da3c7d26bc3d3ab4bd49b9f65"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:35f3f288e83c3f6f10752467c48919a7a94b7d88cc00b0668372a0d2ad4f8ead"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4d03186af98b1c01a4eda396b137f29e4e3fb0173e30f885e27acec8823c1b09"}, + {file = "cryptography-41.0.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b27a7fd4229abef715e064269d98a7e2909ebf92eb6912a9603c7e14c181928c"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:398ae1fc711b5eb78e977daa3cbf47cec20f2c08c5da129b7a296055fbb22aed"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7e00fb556bda398b99b0da289ce7053639d33b572847181d6483ad89835115f6"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:60e746b11b937911dc70d164060d28d273e31853bb359e2b2033c9e93e6f3c43"}, + {file = "cryptography-41.0.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3288acccef021e3c3c10d58933f44e8602cf04dba96d9796d70d537bb2f4bbc4"}, + {file = "cryptography-41.0.6.tar.gz", hash = "sha256:422e3e31d63743855e43e5a6fcc8b4acab860f560f9321b0ee6269cc7ed70cc3"}, ] [package.dependencies] @@ -600,20 +583,20 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.37" +version = "3.1.40" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.37-py3-none-any.whl", hash = "sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33"}, - {file = "GitPython-3.1.37.tar.gz", hash = "sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54"}, + {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"}, + {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" [package.extras] -test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] [[package]] name = "hiredis" @@ -998,13 +981,13 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.19.1" +version = "4.20.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"}, - {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"}, + {file = "jsonschema-4.20.0-py3-none-any.whl", hash = "sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3"}, + {file = "jsonschema-4.20.0.tar.gz", hash = "sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa"}, ] [package.dependencies] @@ -1341,13 +1324,13 @@ test = ["aiounittest", "tox", "twisted"] [[package]] name = "matrix-synapse-ldap3" -version = "0.2.2" +version = "0.3.0" description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" files = [ - {file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"}, - {file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"}, + {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, + {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, ] [package.dependencies] @@ -1628,13 +1611,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.22" +version = "8.13.23" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.22-py2.py3-none-any.whl", hash = "sha256:85ceeba9e67984ba98182c77e8e4c70093d38c0c6a0cb2bd392e0694ddaeb1f6"}, - {file = "phonenumbers-8.13.22.tar.gz", hash = "sha256:001664c90f59b8954766c2db85adafc8dbc96177efeb49607ca4e64a7acaf569"}, + {file = "phonenumbers-8.13.23-py2.py3-none-any.whl", hash = "sha256:34d6cb279dd4a64714e324c71350f96e5bda3237be28d11b4c555c44701544cd"}, + {file = "phonenumbers-8.13.23.tar.gz", hash = "sha256:869e44fcaaf276eca6b953a401e2b27d57461f3a18a66cf5f13377e7bb0e228c"}, ] [[package]] @@ -1746,13 +1729,13 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes [[package]] name = "prometheus-client" -version = "0.17.1" +version = "0.19.0" description = "Python client for the Prometheus monitoring system." optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "prometheus_client-0.17.1-py3-none-any.whl", hash = "sha256:e537f37160f6807b8202a6fc4764cdd19bac5480ddd3e0d463c3002b34462101"}, - {file = "prometheus_client-0.17.1.tar.gz", hash = "sha256:21e674f39831ae3f8acde238afd9a27a37d0d2fb5a28ea094f0ce25d2cbf2091"}, + {file = "prometheus_client-0.19.0-py3-none-any.whl", hash = "sha256:c88b1e6ecf6b41cd8fb5731c7ae919bf66df6ec6fafa555cd6c0e16ca169ae92"}, + {file = "prometheus_client-0.19.0.tar.gz", hash = "sha256:4585b0d1223148c27a225b10dbec5ae9bc4c81a99a3fa80774fa6209935324e1"}, ] [package.extras] @@ -1769,6 +1752,8 @@ files = [ {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, + {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, + {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, @@ -1807,13 +1792,13 @@ psycopg2 = "*" [[package]] name = "pyasn1" -version = "0.5.0" +version = "0.5.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"}, - {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"}, + {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"}, + {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"}, ] [[package]] @@ -1843,18 +1828,18 @@ files = [ [[package]] name = "pydantic" -version = "2.4.2" +version = "2.5.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-2.4.2-py3-none-any.whl", hash = "sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1"}, - {file = "pydantic-2.4.2.tar.gz", hash = "sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7"}, + {file = "pydantic-2.5.1-py3-none-any.whl", hash = "sha256:dc5244a8939e0d9a68f1f1b5f550b2e1c879912033b1becbedb315accc75441b"}, + {file = "pydantic-2.5.1.tar.gz", hash = "sha256:0b8be5413c06aadfbe56f6dc1d45c9ed25fd43264414c571135c97dd77c2bedb"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.10.1" +pydantic-core = "2.14.3" typing-extensions = ">=4.6.1" [package.extras] @@ -1862,117 +1847,116 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.10.1" +version = "2.14.3" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic_core-2.10.1-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63"}, - {file = "pydantic_core-2.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e"}, - {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e"}, - {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e"}, - {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6"}, - {file = "pydantic_core-2.10.1-cp310-none-win32.whl", hash = "sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b"}, - {file = "pydantic_core-2.10.1-cp310-none-win_amd64.whl", hash = "sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0"}, - {file = "pydantic_core-2.10.1-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea"}, - {file = "pydantic_core-2.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8"}, - {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4"}, - {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607"}, - {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f"}, - {file = "pydantic_core-2.10.1-cp311-none-win32.whl", hash = "sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6"}, - {file = "pydantic_core-2.10.1-cp311-none-win_amd64.whl", hash = "sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27"}, - {file = "pydantic_core-2.10.1-cp311-none-win_arm64.whl", hash = "sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325"}, - {file = "pydantic_core-2.10.1-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921"}, - {file = "pydantic_core-2.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901"}, - {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d"}, - {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f"}, - {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c"}, - {file = "pydantic_core-2.10.1-cp312-none-win32.whl", hash = "sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f"}, - {file = "pydantic_core-2.10.1-cp312-none-win_amd64.whl", hash = "sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430"}, - {file = "pydantic_core-2.10.1-cp312-none-win_arm64.whl", hash = "sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15"}, - {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f"}, - {file = "pydantic_core-2.10.1-cp37-none-win32.whl", hash = "sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c"}, - {file = "pydantic_core-2.10.1-cp37-none-win_amd64.whl", hash = "sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e"}, - {file = "pydantic_core-2.10.1-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc"}, - {file = "pydantic_core-2.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302"}, - {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e"}, - {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561"}, - {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de"}, - {file = "pydantic_core-2.10.1-cp38-none-win32.whl", hash = "sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee"}, - {file = "pydantic_core-2.10.1-cp38-none-win_amd64.whl", hash = "sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e"}, - {file = "pydantic_core-2.10.1-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970"}, - {file = "pydantic_core-2.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a"}, - {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429"}, - {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7"}, - {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595"}, - {file = "pydantic_core-2.10.1-cp39-none-win32.whl", hash = "sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a"}, - {file = "pydantic_core-2.10.1-cp39-none-win_amd64.whl", hash = "sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357"}, - {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2"}, - {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132"}, - {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7"}, - {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776"}, - {file = "pydantic_core-2.10.1.tar.gz", hash = "sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82"}, + {file = "pydantic_core-2.14.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:ba44fad1d114539d6a1509966b20b74d2dec9a5b0ee12dd7fd0a1bb7b8785e5f"}, + {file = "pydantic_core-2.14.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a70d23eedd88a6484aa79a732a90e36701048a1509078d1b59578ef0ea2cdf5"}, + {file = "pydantic_core-2.14.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cc24728a1a9cef497697e53b3d085fb4d3bc0ef1ef4d9b424d9cf808f52c146"}, + {file = "pydantic_core-2.14.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab4a2381005769a4af2ffddae74d769e8a4aae42e970596208ec6d615c6fb080"}, + {file = "pydantic_core-2.14.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:905a12bf088d6fa20e094f9a477bf84bd823651d8b8384f59bcd50eaa92e6a52"}, + {file = "pydantic_core-2.14.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:38aed5a1bbc3025859f56d6a32f6e53ca173283cb95348e03480f333b1091e7d"}, + {file = "pydantic_core-2.14.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1767bd3f6370458e60c1d3d7b1d9c2751cc1ad743434e8ec84625a610c8b9195"}, + {file = "pydantic_core-2.14.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7cb0c397f29688a5bd2c0dbd44451bc44ebb9b22babc90f97db5ec3e5bb69977"}, + {file = "pydantic_core-2.14.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9ff737f24b34ed26de62d481ef522f233d3c5927279f6b7229de9b0deb3f76b5"}, + {file = "pydantic_core-2.14.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a1a39fecb5f0b19faee9a8a8176c805ed78ce45d760259a4ff3d21a7daa4dfc1"}, + {file = "pydantic_core-2.14.3-cp310-none-win32.whl", hash = "sha256:ccbf355b7276593c68fa824030e68cb29f630c50e20cb11ebb0ee450ae6b3d08"}, + {file = "pydantic_core-2.14.3-cp310-none-win_amd64.whl", hash = "sha256:536e1f58419e1ec35f6d1310c88496f0d60e4f182cacb773d38076f66a60b149"}, + {file = "pydantic_core-2.14.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:f1f46700402312bdc31912f6fc17f5ecaaaa3bafe5487c48f07c800052736289"}, + {file = "pydantic_core-2.14.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:88ec906eb2d92420f5b074f59cf9e50b3bb44f3cb70e6512099fdd4d88c2f87c"}, + {file = "pydantic_core-2.14.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:056ea7cc3c92a7d2a14b5bc9c9fa14efa794d9f05b9794206d089d06d3433dc7"}, + {file = "pydantic_core-2.14.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:076edc972b68a66870cec41a4efdd72a6b655c4098a232314b02d2bfa3bfa157"}, + {file = "pydantic_core-2.14.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e71f666c3bf019f2490a47dddb44c3ccea2e69ac882f7495c68dc14d4065eac2"}, + {file = "pydantic_core-2.14.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f518eac285c9632be337323eef9824a856f2680f943a9b68ac41d5f5bad7df7c"}, + {file = "pydantic_core-2.14.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dbab442a8d9ca918b4ed99db8d89d11b1f067a7dadb642476ad0889560dac79"}, + {file = "pydantic_core-2.14.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0653fb9fc2fa6787f2fa08631314ab7fc8070307bd344bf9471d1b7207c24623"}, + {file = "pydantic_core-2.14.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c54af5069da58ea643ad34ff32fd6bc4eebb8ae0fef9821cd8919063e0aeeaab"}, + {file = "pydantic_core-2.14.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc956f78651778ec1ab105196e90e0e5f5275884793ab67c60938c75bcca3989"}, + {file = "pydantic_core-2.14.3-cp311-none-win32.whl", hash = "sha256:5b73441a1159f1fb37353aaefb9e801ab35a07dd93cb8177504b25a317f4215a"}, + {file = "pydantic_core-2.14.3-cp311-none-win_amd64.whl", hash = "sha256:7349f99f1ef8b940b309179733f2cad2e6037a29560f1b03fdc6aa6be0a8d03c"}, + {file = "pydantic_core-2.14.3-cp311-none-win_arm64.whl", hash = "sha256:ec79dbe23702795944d2ae4c6925e35a075b88acd0d20acde7c77a817ebbce94"}, + {file = "pydantic_core-2.14.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8f5624f0f67f2b9ecaa812e1dfd2e35b256487566585160c6c19268bf2ffeccc"}, + {file = "pydantic_core-2.14.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6c2d118d1b6c9e2d577e215567eedbe11804c3aafa76d39ec1f8bc74e918fd07"}, + {file = "pydantic_core-2.14.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe863491664c6720d65ae438d4efaa5eca766565a53adb53bf14bc3246c72fe0"}, + {file = "pydantic_core-2.14.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:136bc7247e97a921a020abbd6ef3169af97569869cd6eff41b6a15a73c44ea9b"}, + {file = "pydantic_core-2.14.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aeafc7f5bbddc46213707266cadc94439bfa87ecf699444de8be044d6d6eb26f"}, + {file = "pydantic_core-2.14.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e16aaf788f1de5a85c8f8fcc9c1ca1dd7dd52b8ad30a7889ca31c7c7606615b8"}, + {file = "pydantic_core-2.14.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8fc652c354d3362e2932a79d5ac4bbd7170757a41a62c4fe0f057d29f10bebb"}, + {file = "pydantic_core-2.14.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f1b92e72babfd56585c75caf44f0b15258c58e6be23bc33f90885cebffde3400"}, + {file = "pydantic_core-2.14.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:75f3f534f33651b73f4d3a16d0254de096f43737d51e981478d580f4b006b427"}, + {file = "pydantic_core-2.14.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c9ffd823c46e05ef3eb28b821aa7bc501efa95ba8880b4a1380068e32c5bed47"}, + {file = "pydantic_core-2.14.3-cp312-none-win32.whl", hash = "sha256:12e05a76b223577a4696c76d7a6b36a0ccc491ffb3c6a8cf92d8001d93ddfd63"}, + {file = "pydantic_core-2.14.3-cp312-none-win_amd64.whl", hash = "sha256:1582f01eaf0537a696c846bea92082082b6bfc1103a88e777e983ea9fbdc2a0f"}, + {file = "pydantic_core-2.14.3-cp312-none-win_arm64.whl", hash = "sha256:96fb679c7ca12a512d36d01c174a4fbfd912b5535cc722eb2c010c7b44eceb8e"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:71ed769b58d44e0bc2701aa59eb199b6665c16e8a5b8b4a84db01f71580ec448"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:5402ee0f61e7798ea93a01b0489520f2abfd9b57b76b82c93714c4318c66ca06"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaab9dc009e22726c62fe3b850b797e7f0e7ba76d245284d1064081f512c7226"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92486a04d54987054f8b4405a9af9d482e5100d6fe6374fc3303015983fc8bda"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf08b43d1d5d1678f295f0431a4a7e1707d4652576e1d0f8914b5e0213bfeee5"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8ca13480ce16daad0504be6ce893b0ee8ec34cd43b993b754198a89e2787f7e"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44afa3c18d45053fe8d8228950ee4c8eaf3b5a7f3b64963fdeac19b8342c987f"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56814b41486e2d712a8bc02a7b1f17b87fa30999d2323bbd13cf0e52296813a1"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c3dc2920cc96f9aa40c6dc54256e436cc95c0a15562eb7bd579e1811593c377e"}, + {file = "pydantic_core-2.14.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e483b8b913fcd3b48badec54185c150cb7ab0e6487914b84dc7cde2365e0c892"}, + {file = "pydantic_core-2.14.3-cp37-none-win32.whl", hash = "sha256:364dba61494e48f01ef50ae430e392f67ee1ee27e048daeda0e9d21c3ab2d609"}, + {file = "pydantic_core-2.14.3-cp37-none-win_amd64.whl", hash = "sha256:a402ae1066be594701ac45661278dc4a466fb684258d1a2c434de54971b006ca"}, + {file = "pydantic_core-2.14.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:10904368261e4509c091cbcc067e5a88b070ed9a10f7ad78f3029c175487490f"}, + {file = "pydantic_core-2.14.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:260692420028319e201b8649b13ac0988974eeafaaef95d0dfbf7120c38dc000"}, + {file = "pydantic_core-2.14.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c1bf1a7b05a65d3b37a9adea98e195e0081be6b17ca03a86f92aeb8b110f468"}, + {file = "pydantic_core-2.14.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d7abd17a838a52140e3aeca271054e321226f52df7e0a9f0da8f91ea123afe98"}, + {file = "pydantic_core-2.14.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5c51460ede609fbb4fa883a8fe16e749964ddb459966d0518991ec02eb8dfb9"}, + {file = "pydantic_core-2.14.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d06c78074646111fb01836585f1198367b17d57c9f427e07aaa9ff499003e58d"}, + {file = "pydantic_core-2.14.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af452e69446fadf247f18ac5d153b1f7e61ef708f23ce85d8c52833748c58075"}, + {file = "pydantic_core-2.14.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3ad4968711fb379a67c8c755beb4dae8b721a83737737b7bcee27c05400b047"}, + {file = "pydantic_core-2.14.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c5ea0153482e5b4d601c25465771c7267c99fddf5d3f3bdc238ef930e6d051cf"}, + {file = "pydantic_core-2.14.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:96eb10ef8920990e703da348bb25fedb8b8653b5966e4e078e5be382b430f9e0"}, + {file = "pydantic_core-2.14.3-cp38-none-win32.whl", hash = "sha256:ea1498ce4491236d1cffa0eee9ad0968b6ecb0c1cd711699c5677fc689905f00"}, + {file = "pydantic_core-2.14.3-cp38-none-win_amd64.whl", hash = "sha256:2bc736725f9bd18a60eec0ed6ef9b06b9785454c8d0105f2be16e4d6274e63d0"}, + {file = "pydantic_core-2.14.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:1ea992659c03c3ea811d55fc0a997bec9dde863a617cc7b25cfde69ef32e55af"}, + {file = "pydantic_core-2.14.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d2b53e1f851a2b406bbb5ac58e16c4a5496038eddd856cc900278fa0da97f3fc"}, + {file = "pydantic_core-2.14.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c7f8e8a7cf8e81ca7d44bea4f181783630959d41b4b51d2f74bc50f348a090f"}, + {file = "pydantic_core-2.14.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8d3b9c91eeb372a64ec6686c1402afd40cc20f61a0866850f7d989b6bf39a41a"}, + {file = "pydantic_core-2.14.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ef3e2e407e4cad2df3c89488a761ed1f1c33f3b826a2ea9a411b0a7d1cccf1b"}, + {file = "pydantic_core-2.14.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f86f20a9d5bee1a6ede0f2757b917bac6908cde0f5ad9fcb3606db1e2968bcf5"}, + {file = "pydantic_core-2.14.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61beaa79d392d44dc19d6f11ccd824d3cccb865c4372157c40b92533f8d76dd0"}, + {file = "pydantic_core-2.14.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d41df8e10b094640a6b234851b624b76a41552f637b9fb34dc720b9fe4ef3be4"}, + {file = "pydantic_core-2.14.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c08ac60c3caa31f825b5dbac47e4875bd4954d8f559650ad9e0b225eaf8ed0c"}, + {file = "pydantic_core-2.14.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d8b3932f1a369364606417ded5412c4ffb15bedbcf797c31317e55bd5d920e"}, + {file = "pydantic_core-2.14.3-cp39-none-win32.whl", hash = "sha256:caa94726791e316f0f63049ee00dff3b34a629b0d099f3b594770f7d0d8f1f56"}, + {file = "pydantic_core-2.14.3-cp39-none-win_amd64.whl", hash = "sha256:2494d20e4c22beac30150b4be3b8339bf2a02ab5580fa6553ca274bc08681a65"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:fe272a72c7ed29f84c42fedd2d06c2f9858dc0c00dae3b34ba15d6d8ae0fbaaf"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7e63a56eb7fdee1587d62f753ccd6d5fa24fbeea57a40d9d8beaef679a24bdd6"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7692f539a26265cece1e27e366df5b976a6db6b1f825a9e0466395b314ee48b"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af46f0b7a1342b49f208fed31f5a83b8495bb14b652f621e0a6787d2f10f24ee"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6e2f9d76c00e805d47f19c7a96a14e4135238a7551a18bfd89bb757993fd0933"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:de52ddfa6e10e892d00f747bf7135d7007302ad82e243cf16d89dd77b03b649d"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:38113856c7fad8c19be7ddd57df0c3e77b1b2336459cb03ee3903ce9d5e236ce"}, + {file = "pydantic_core-2.14.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:354db020b1f8f11207b35360b92d95725621eb92656725c849a61e4b550f4acc"}, + {file = "pydantic_core-2.14.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:76fc18653a5c95e5301a52d1b5afb27c9adc77175bf00f73e94f501caf0e05ad"}, + {file = "pydantic_core-2.14.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2646f8270f932d79ba61102a15ea19a50ae0d43b314e22b3f8f4b5fabbfa6e38"}, + {file = "pydantic_core-2.14.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37dad73a2f82975ed563d6a277fd9b50e5d9c79910c4aec787e2d63547202315"}, + {file = "pydantic_core-2.14.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:113752a55a8eaece2e4ac96bc8817f134c2c23477e477d085ba89e3aa0f4dc44"}, + {file = "pydantic_core-2.14.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:8488e973547e8fb1b4193fd9faf5236cf1b7cd5e9e6dc7ff6b4d9afdc4c720cb"}, + {file = "pydantic_core-2.14.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3d1dde10bd9962b1434053239b1d5490fc31a2b02d8950a5f731bc584c7a5a0f"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2c83892c7bf92b91d30faca53bb8ea21f9d7e39f0ae4008ef2c2f91116d0464a"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:849cff945284c577c5f621d2df76ca7b60f803cc8663ff01b778ad0af0e39bb9"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa89919fbd8a553cd7d03bf23d5bc5deee622e1b5db572121287f0e64979476"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf15145b1f8056d12c67255cd3ce5d317cd4450d5ee747760d8d088d85d12a2d"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4cc6bb11f4e8e5ed91d78b9880774fbc0856cb226151b0a93b549c2b26a00c19"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:832d16f248ca0cc96929139734ec32d21c67669dcf8a9f3f733c85054429c012"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b02b5e1f54c3396c48b665050464803c23c685716eb5d82a1d81bf81b5230da4"}, + {file = "pydantic_core-2.14.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:1f2d4516c32255782153e858f9a900ca6deadfb217fd3fb21bb2b60b4e04d04d"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0a3e51c2be472b7867eb0c5d025b91400c2b73a0823b89d4303a9097e2ec6655"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:df33902464410a1f1a0411a235f0a34e7e129f12cb6340daca0f9d1390f5fe10"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27828f0227b54804aac6fb077b6bb48e640b5435fdd7fbf0c274093a7b78b69c"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e2979dc80246e18e348de51246d4c9b410186ffa3c50e77924bec436b1e36cb"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b28996872b48baf829ee75fa06998b607c66a4847ac838e6fd7473a6b2ab68e7"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ca55c9671bb637ce13d18ef352fd32ae7aba21b4402f300a63f1fb1fd18e0364"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:aecd5ed096b0e5d93fb0367fd8f417cef38ea30b786f2501f6c34eabd9062c38"}, + {file = "pydantic_core-2.14.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:44aaf1a07ad0824e407dafc637a852e9a44d94664293bbe7d8ee549c356c8882"}, + {file = "pydantic_core-2.14.3.tar.gz", hash = "sha256:3ad083df8fe342d4d8d00cc1d3c1a23f0dc84fce416eb301e69f1ddbbe124d3f"}, ] [package.dependencies] @@ -1980,20 +1964,23 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pygithub" -version = "1.59.1" +version = "2.1.1" description = "Use the full Github API v3" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.59.1-py3-none-any.whl", hash = "sha256:3d87a822e6c868142f0c2c4bf16cce4696b5a7a4d142a7bd160e1bdf75bc54a9"}, - {file = "PyGithub-1.59.1.tar.gz", hash = "sha256:c44e3a121c15bf9d3a5cc98d94c9a047a5132a9b01d22264627f58ade9ddc217"}, + {file = "PyGithub-2.1.1-py3-none-any.whl", hash = "sha256:4b528d5d6f35e991ea5fd3f942f58748f24938805cb7fcf24486546637917337"}, + {file = "PyGithub-2.1.1.tar.gz", hash = "sha256:ecf12c2809c44147bce63b047b3d2e9dac8a41b63e90fcb263c703f64936b97c"}, ] [package.dependencies] -deprecated = "*" +Deprecated = "*" pyjwt = {version = ">=2.4.0", extras = ["crypto"]} pynacl = ">=1.4.0" +python-dateutil = "*" requests = ">=2.14.0" +typing-extensions = ">=4.0.0" +urllib3 = ">=1.26.0" [[package]] name = "pygments" @@ -2011,12 +1998,12 @@ plugins = ["importlib-metadata"] [[package]] name = "pyicu" -version = "2.11" +version = "2.12" description = "Python extension wrapping the ICU C++ API" optional = true python-versions = "*" files = [ - {file = "PyICU-2.11.tar.gz", hash = "sha256:3ab531264cfe9132b3d2ac5d708da9a4649d25f6e6813730ac88cf040a08a844"}, + {file = "PyICU-2.12.tar.gz", hash = "sha256:bd7ab5efa93ad692e6daa29cd249364e521218329221726a113ca3cb281c8611"}, ] [[package]] @@ -2093,20 +2080,20 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "23.2.0" +version = "23.3.0" description = "Python wrapper module around the OpenSSL library" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "pyOpenSSL-23.2.0-py3-none-any.whl", hash = "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2"}, - {file = "pyOpenSSL-23.2.0.tar.gz", hash = "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"}, + {file = "pyOpenSSL-23.3.0-py3-none-any.whl", hash = "sha256:6756834481d9ed5470f4a9393455154bc92fe7a64b7bc6ee2c804e78c52099b2"}, + {file = "pyOpenSSL-23.3.0.tar.gz", hash = "sha256:6b2cba5cc46e822750ec3e5a81ee12819850b11303630d575e98108a079c2b12"}, ] [package.dependencies] -cryptography = ">=38.0.0,<40.0.0 || >40.0.0,<40.0.1 || >40.0.1,<42" +cryptography = ">=41.0.5,<42" [package.extras] -docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"] +docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"] test = ["flaky", "pretend", "pytest (>=3.0.1)"] [[package]] @@ -2137,7 +2124,7 @@ s2repoze = ["paste", "repoze.who", "zope.interface"] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -optional = true +optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, @@ -2285,13 +2272,13 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests-toolbelt" -version = "0.10.1" +version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ - {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"}, - {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"}, + {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, + {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, ] [package.dependencies] @@ -2438,28 +2425,28 @@ files = [ [[package]] name = "ruff" -version = "0.0.292" -description = "An extremely fast Python linter, written in Rust." +version = "0.1.6" +description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.292-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96"}, - {file = "ruff-0.0.292-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_i686.whl", hash = "sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016"}, - {file = "ruff-0.0.292-py3-none-win32.whl", hash = "sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003"}, - {file = "ruff-0.0.292-py3-none-win_amd64.whl", hash = "sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c"}, - {file = "ruff-0.0.292-py3-none-win_arm64.whl", hash = "sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68"}, - {file = "ruff-0.0.292.tar.gz", hash = "sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac"}, + {file = "ruff-0.1.6-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:88b8cdf6abf98130991cbc9f6438f35f6e8d41a02622cc5ee130a02a0ed28703"}, + {file = "ruff-0.1.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5c549ed437680b6105a1299d2cd30e4964211606eeb48a0ff7a93ef70b902248"}, + {file = "ruff-0.1.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cf5f701062e294f2167e66d11b092bba7af6a057668ed618a9253e1e90cfd76"}, + {file = "ruff-0.1.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:05991ee20d4ac4bb78385360c684e4b417edd971030ab12a4fbd075ff535050e"}, + {file = "ruff-0.1.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87455a0c1f739b3c069e2f4c43b66479a54dea0276dd5d4d67b091265f6fd1dc"}, + {file = "ruff-0.1.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:683aa5bdda5a48cb8266fcde8eea2a6af4e5700a392c56ea5fb5f0d4bfdc0240"}, + {file = "ruff-0.1.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:137852105586dcbf80c1717facb6781555c4e99f520c9c827bd414fac67ddfb6"}, + {file = "ruff-0.1.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd98138a98d48a1c36c394fd6b84cd943ac92a08278aa8ac8c0fdefcf7138f35"}, + {file = "ruff-0.1.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0cd909d25f227ac5c36d4e7e681577275fb74ba3b11d288aff7ec47e3ae745"}, + {file = "ruff-0.1.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e8fd1c62a47aa88a02707b5dd20c5ff20d035d634aa74826b42a1da77861b5ff"}, + {file = "ruff-0.1.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd89b45d374935829134a082617954120d7a1470a9f0ec0e7f3ead983edc48cc"}, + {file = "ruff-0.1.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:491262006e92f825b145cd1e52948073c56560243b55fb3b4ecb142f6f0e9543"}, + {file = "ruff-0.1.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ea284789861b8b5ca9d5443591a92a397ac183d4351882ab52f6296b4fdd5462"}, + {file = "ruff-0.1.6-py3-none-win32.whl", hash = "sha256:1610e14750826dfc207ccbcdd7331b6bd285607d4181df9c1c6ae26646d6848a"}, + {file = "ruff-0.1.6-py3-none-win_amd64.whl", hash = "sha256:4558b3e178145491e9bc3b2ee3c4b42f19d19384eaa5c59d10acf6e8f8b57e33"}, + {file = "ruff-0.1.6-py3-none-win_arm64.whl", hash = "sha256:03910e81df0d8db0e30050725a5802441c2022ea3ae4fe0609b76081731accbc"}, + {file = "ruff-0.1.6.tar.gz", hash = "sha256:1b09f29b16c6ead5ea6b097ef2764b42372aebe363722f1605ecbcd2b9207184"}, ] [[package]] @@ -2494,13 +2481,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.32.0" +version = "1.35.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.32.0.tar.gz", hash = "sha256:935e8fbd7787a3702457393b74b13d89a5afb67185bc0af85c00cb27cbd42e7c"}, - {file = "sentry_sdk-1.32.0-py2.py3-none-any.whl", hash = "sha256:eeb0b3550536f3bbc05bb1c7e0feb3a78d74acb43b607159a606ed2ec0a33a4d"}, + {file = "sentry-sdk-1.35.0.tar.gz", hash = "sha256:04e392db9a0d59bd49a51b9e3a92410ac5867556820465057c2ef89a38e953e9"}, + {file = "sentry_sdk-1.35.0-py2.py3-none-any.whl", hash = "sha256:a7865952701e46d38b41315c16c075367675c48d049b90a4cc2e41991ebc7efa"}, ] [package.dependencies] @@ -2579,20 +2566,19 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( [[package]] name = "setuptools-rust" -version = "1.7.0" +version = "1.8.1" description = "Setuptools Rust extension plugin" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "setuptools-rust-1.7.0.tar.gz", hash = "sha256:c7100999948235a38ae7e555fe199aa66c253dc384b125f5d85473bf81eae3a3"}, - {file = "setuptools_rust-1.7.0-py3-none-any.whl", hash = "sha256:071099885949132a2180d16abf907b60837e74b4085047ba7e9c0f5b365310c1"}, + {file = "setuptools-rust-1.8.1.tar.gz", hash = "sha256:94b1dd5d5308b3138d5b933c3a2b55e6d6927d1a22632e509fcea9ddd0f7e486"}, + {file = "setuptools_rust-1.8.1-py3-none-any.whl", hash = "sha256:b5324493949ccd6aa0c03890c5f6b5f02de4512e3ac1697d02e9a6c02b18aa8e"}, ] [package.dependencies] semantic-version = ">=2.8.2,<3" setuptools = ">=62.4" tomli = {version = ">=1.2.1", markers = "python_version < \"3.11\""} -typing-extensions = ">=3.7.4.3" [[package]] name = "signedjson" @@ -2906,18 +2892,17 @@ files = [ [[package]] name = "towncrier" -version = "23.6.0" +version = "23.11.0" description = "Building newsfiles for your project." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "towncrier-23.6.0-py3-none-any.whl", hash = "sha256:da552f29192b3c2b04d630133f194c98e9f14f0558669d427708e203fea4d0a5"}, - {file = "towncrier-23.6.0.tar.gz", hash = "sha256:fc29bd5ab4727c8dacfbe636f7fb5dc53b99805b62da1c96b214836159ff70c1"}, + {file = "towncrier-23.11.0-py3-none-any.whl", hash = "sha256:2e519ca619426d189e3c98c99558fe8be50c9ced13ea1fc20a4a353a95d2ded7"}, + {file = "towncrier-23.11.0.tar.gz", hash = "sha256:13937c247e3f8ae20ac44d895cf5f96a60ad46cfdcc1671759530d7837d9ee5d"}, ] [package.dependencies] click = "*" -click-default-group = "*" importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""} incremental = "*" jinja2 = "*" @@ -2928,13 +2913,13 @@ dev = ["furo", "packaging", "sphinx (>=5)", "twisted"] [[package]] name = "treq" -version = "22.2.0" +version = "23.11.0" description = "High-level Twisted HTTP Client API" optional = false python-versions = ">=3.6" files = [ - {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"}, - {file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"}, + {file = "treq-23.11.0-py3-none-any.whl", hash = "sha256:f494c2218d61cab2cabbee37cd6606d3eea9d16cf14190323095c95d22c467e9"}, + {file = "treq-23.11.0.tar.gz", hash = "sha256:0914ff929fd1632ce16797235260f8bc19d20ff7c459c1deabd65b8c68cbeac5"}, ] [package.dependencies] @@ -2942,11 +2927,11 @@ attrs = "*" hyperlink = ">=21.0.0" incremental = "*" requests = ">=2.1.0" -Twisted = {version = ">=18.7.0", extras = ["tls"]} +Twisted = {version = ">=22.10.0", extras = ["tls"]} [package.extras] -dev = ["httpbin (==0.5.0)", "pep8", "pyflakes"] -docs = ["sphinx (>=1.4.8)"] +dev = ["httpbin (==0.7.0)", "pep8", "pyflakes", "werkzeug (==2.0.3)"] +docs = ["sphinx (<7.0.0)"] [[package]] name = "twine" @@ -2972,13 +2957,13 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "23.8.0" +version = "23.10.0" description = "An asynchronous networking framework written in Python" optional = false -python-versions = ">=3.7.1" +python-versions = ">=3.8.0" files = [ - {file = "twisted-23.8.0-py3-none-any.whl", hash = "sha256:b8bdba145de120ffb36c20e6e071cce984e89fba798611ed0704216fb7f884cd"}, - {file = "twisted-23.8.0.tar.gz", hash = "sha256:3c73360add17336a622c0d811c2a2ce29866b6e59b1125fd6509b17252098a24"}, + {file = "twisted-23.10.0-py3-none-any.whl", hash = "sha256:4ae8bce12999a35f7fe6443e7f1893e6fe09588c8d2bed9c35cdce8ff2d5b444"}, + {file = "twisted-23.10.0.tar.gz", hash = "sha256:987847a0790a2c597197613686e2784fd54167df3a55d0fb17c8412305d76ce5"}, ] [package.dependencies] @@ -2991,19 +2976,18 @@ incremental = ">=22.10.0" pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""} service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""} twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""} -typing-extensions = ">=3.10.0" +typing-extensions = ">=4.2.0" zope-interface = ">=5" [package.extras] -all-non-platform = ["twisted[conch,contextvars,http2,serial,test,tls]", "twisted[conch,contextvars,http2,serial,test,tls]"] +all-non-platform = ["twisted[conch,http2,serial,test,tls]", "twisted[conch,http2,serial,test,tls]"] conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"] -contextvars = ["contextvars (>=2.4,<3)"] dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"] -dev-release = ["pydoctor (>=23.4.0,<23.5.0)", "pydoctor (>=23.4.0,<23.5.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "sphinx (>=5,<7)", "sphinx (>=5,<7)", "sphinx-rtd-theme (>=1.2,<2.0)", "sphinx-rtd-theme (>=1.2,<2.0)", "towncrier (>=22.12,<23.0)", "towncrier (>=22.12,<23.0)", "urllib3 (<2)", "urllib3 (<2)"] +dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"] gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"] -mypy = ["mypy (==0.981)", "mypy-extensions (==0.4.3)", "mypy-zope (==0.3.11)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] +mypy = ["mypy (>=1.5.1,<1.6.0)", "mypy-zope (>=1.0.1,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"] @@ -3048,13 +3032,13 @@ twisted = "*" [[package]] name = "types-bleach" -version = "6.1.0.0" +version = "6.1.0.1" description = "Typing stubs for bleach" optional = false python-versions = ">=3.7" files = [ - {file = "types-bleach-6.1.0.0.tar.gz", hash = "sha256:3cf0e55d4618890a00af1151f878b2e2a7a96433850b74e12bede7663d774532"}, - {file = "types_bleach-6.1.0.0-py3-none-any.whl", hash = "sha256:f0bc75d0f6475036ac69afebf37c41d116dfba78dae55db80437caf0fcd35c28"}, + {file = "types-bleach-6.1.0.1.tar.gz", hash = "sha256:1e43c437e734a90efe4f40ebfe831057599568d3b275939ffbd6094848a18a27"}, + {file = "types_bleach-6.1.0.1-py3-none-any.whl", hash = "sha256:f83f80e0709f13d809a9c79b958a1089df9b99e68059287beb196e38967e4ddf"}, ] [[package]] @@ -3070,13 +3054,13 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.19.0.3" +version = "4.19.0.4" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" files = [ - {file = "types-jsonschema-4.19.0.3.tar.gz", hash = "sha256:e0fc0f5d51fd0988bf193be42174a5376b0096820ff79505d9c1b66de23f0581"}, - {file = "types_jsonschema-4.19.0.3-py3-none-any.whl", hash = "sha256:5cedbb661e5ca88d95b94b79902423e3f97a389c245e5fe0ab384122f27d56b9"}, + {file = "types-jsonschema-4.19.0.4.tar.gz", hash = "sha256:994feb6632818259c4b5dbd733867824cb475029a6abc2c2b5201a2268b6e7d2"}, + {file = "types_jsonschema-4.19.0.4-py3-none-any.whl", hash = "sha256:b73c3f4ba3cd8108602d1198a438e2698d5eb6b9db206ed89a33e24729b0abe7"}, ] [package.dependencies] @@ -3106,35 +3090,35 @@ files = [ [[package]] name = "types-pillow" -version = "10.0.0.3" +version = "10.1.0.2" description = "Typing stubs for Pillow" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-Pillow-10.0.0.3.tar.gz", hash = "sha256:ae0c877d363da349bbb82c5463c9e78037290cc07d3714cb0ceaf5d2f7f5c825"}, - {file = "types_Pillow-10.0.0.3-py3-none-any.whl", hash = "sha256:54a49f3c6a3f5e95ebeee396d7773dde22ce2515d594f9c0596c0a983558f0d4"}, + {file = "types-Pillow-10.1.0.2.tar.gz", hash = "sha256:525c1c5ee67b0ac1721c40d2bc618226ef2123c347e527e14e05b920721a13b9"}, + {file = "types_Pillow-10.1.0.2-py3-none-any.whl", hash = "sha256:131078ffa547bf9a201d39ffcdc65633e108148085f4f1b07d4647fcfec6e923"}, ] [[package]] name = "types-psycopg2" -version = "2.9.21.14" +version = "2.9.21.16" description = "Typing stubs for psycopg2" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-psycopg2-2.9.21.14.tar.gz", hash = "sha256:bf73a0ac4da4e278c89bf1b01fc596d5a5ac7a356cfe6ac0249f47b9e259f868"}, - {file = "types_psycopg2-2.9.21.14-py3-none-any.whl", hash = "sha256:cd9c5350631f3bc6184ec8d48f2ed31d4ea660f89d0fffe78239450782f383c5"}, + {file = "types-psycopg2-2.9.21.16.tar.gz", hash = "sha256:44a3ae748173bb637cff31654d6bd12de9ad0c7ad73afe737df6152830ed82ed"}, + {file = "types_psycopg2-2.9.21.16-py3-none-any.whl", hash = "sha256:e2f24b651239ccfda320ab3457099af035cf37962c36c9fa26a4dc65991aebed"}, ] [[package]] name = "types-pyopenssl" -version = "23.2.0.2" +version = "23.3.0.0" description = "Typing stubs for pyOpenSSL" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-pyOpenSSL-23.2.0.2.tar.gz", hash = "sha256:6a010dac9ecd42b582d7dd2cc3e9e40486b79b3b64bb2fffba1474ff96af906d"}, - {file = "types_pyOpenSSL-23.2.0.2-py3-none-any.whl", hash = "sha256:19536aa3debfbe25a918cf0d898e9f5fbbe6f3594a429da7914bf331deb1b342"}, + {file = "types-pyOpenSSL-23.3.0.0.tar.gz", hash = "sha256:5ffb077fe70b699c88d5caab999ae80e192fe28bf6cda7989b7e79b1e4e2dcd3"}, + {file = "types_pyOpenSSL-23.3.0.0-py3-none-any.whl", hash = "sha256:00171433653265843b7469ddb9f3c86d698668064cc33ef10537822156130ebf"}, ] [package.dependencies] @@ -3142,49 +3126,38 @@ cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.11" +version = "6.0.12.12" description = "Typing stubs for PyYAML" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"}, - {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, + {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"}, + {file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"}, ] [[package]] name = "types-requests" -version = "2.31.0.2" +version = "2.31.0.10" description = "Typing stubs for requests" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"}, - {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"}, + {file = "types-requests-2.31.0.10.tar.gz", hash = "sha256:dc5852a76f1eaf60eafa81a2e50aefa3d1f015c34cf0cba130930866b1b22a92"}, + {file = "types_requests-2.31.0.10-py3-none-any.whl", hash = "sha256:b32b9a86beffa876c0c3ac99a4cd3b8b51e973fb8e3bd4e0a6bb32c7efad80fc"}, ] [package.dependencies] -types-urllib3 = "*" +urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "68.2.0.0" +version = "68.2.0.2" description = "Typing stubs for setuptools" optional = false -python-versions = "*" -files = [ - {file = "types-setuptools-68.2.0.0.tar.gz", hash = "sha256:a4216f1e2ef29d089877b3af3ab2acf489eb869ccaf905125c69d2dc3932fd85"}, - {file = "types_setuptools-68.2.0.0-py3-none-any.whl", hash = "sha256:77edcc843e53f8fc83bb1a840684841f3dc804ec94562623bfa2ea70d5a2ba1b"}, -] - -[[package]] -name = "types-urllib3" -version = "1.26.25.8" -description = "Typing stubs for urllib3" -optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-urllib3-1.26.25.8.tar.gz", hash = "sha256:ecf43c42d8ee439d732a1110b4901e9017a79a38daca26f08e42c8460069392c"}, - {file = "types_urllib3-1.26.25.8-py3-none-any.whl", hash = "sha256:95ea847fbf0bf675f50c8ae19a665baedcf07e6b4641662c4c3c72e7b2edf1a9"}, + {file = "types-setuptools-68.2.0.2.tar.gz", hash = "sha256:09efc380ad5c7f78e30bca1546f706469568cf26084cfab73ecf83dea1d28446"}, + {file = "types_setuptools-68.2.0.2-py3-none-any.whl", hash = "sha256:d5b5ff568ea2474eb573dcb783def7dadfd9b1ff638bb653b3c7051ce5aeb6d1"}, ] [[package]] @@ -3211,19 +3184,20 @@ files = [ [[package]] name = "urllib3" -version = "1.26.18" +version = "2.0.7" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.7" files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, + {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, + {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, ] [package.extras] -brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "webencodings" @@ -3458,4 +3432,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "a08543c65f18cc7e9dea648e89c18ab88fc1747aa2e029aa208f777fc3db06dd" +content-hash = "2924e80a14b32b430e70bafbd2b00893a365d9c3836c026296f4af0b9579e604" diff --git a/pyproject.toml b/pyproject.toml index 498b663bae63..b2a4fd7a2fe6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.95.0rc1" +version = "1.97.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" @@ -192,7 +192,7 @@ phonenumbers = ">=8.2.0" # we use GaugeHistogramMetric, which was added in prom-client 0.4.0. prometheus-client = ">=0.4.0" # we use `order`, which arrived in attrs 19.2.0. -# Note: 21.1.0 broke `/sync`, see #9936 +# Note: 21.1.0 broke `/sync`, see https://github.com/matrix-org/synapse/issues/9936 attrs = ">=19.2.0,!=21.1.0" netaddr = ">=0.7.18" # Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not @@ -321,7 +321,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.0.292" +ruff = "0.1.6" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" @@ -357,7 +357,7 @@ commonmark = ">=0.9.1" pygithub = ">=1.55" # The following are executed as commands by the release script. twine = "*" -# Towncrier min version comes from #3425. Rationale unclear. +# Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear. towncrier = ">=18.6.0rc1" # Used for checking the Poetry lockfile @@ -377,11 +377,12 @@ furo = ">=2022.12.7,<2024.0.0" [build-system] # The upper bounds here are defensive, intended to prevent situations like -# #13849 and #14079 where we see buildtime or runtime errors caused by build -# system changes. +# https://github.com/matrix-org/synapse/issues/13849 and +# https://github.com/matrix-org/synapse/issues/14079 where we see buildtime or +# runtime errors caused by build system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.7.0"] +requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.8.1"] build-backend = "poetry.core.masonry.api" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index f62da35a6f6a..cf70a879f678 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -25,14 +25,14 @@ name = "synapse.synapse_rust" anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.19.2", features = [ +pyo3 = { version = "0.20.0", features = [ "macros", "anyhow", "abi3", "abi3-py38", ] } -pyo3-log = "0.8.1" -pythonize = "0.19.0" +pyo3-log = "0.9.0" +pythonize = "0.20.0" regex = "1.6.0" serde = { version = "1.0.144", features = ["derive"] } serde_json = "1.0.85" diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index b192faba14d8..de2a1345448c 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -33,6 +33,7 @@ "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) + "ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24) "debian:trixie", # (EOL not specified yet) ) diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 3e0cddb527da..b1a8724b7e8a 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -216,6 +216,10 @@ extra_test_args=() test_packages="./tests/csapi ./tests ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 ./tests/msc3930 ./tests/msc3902" +# Enable dirty runs, so tests will reuse the same container where possible. +# This significantly speeds up tests, but increases the possibility of test pollution. +export COMPLEMENT_ENABLE_DIRTY_RUNS=1 + # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ @@ -274,7 +278,7 @@ fi export PASS_SYNAPSE_LOG_TESTING=1 # Run the tests! -echo "Images built; running complement" +echo "Images built; running complement with ${extra_test_args[@]} $@ $test_packages" cd "$COMPLEMENT_DIR" go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" $test_packages diff --git a/scripts-dev/schema_versions.py b/scripts-dev/schema_versions.py new file mode 100755 index 000000000000..5fd73251cdcb --- /dev/null +++ b/scripts-dev/schema_versions.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A script to calculate which versions of Synapse have backwards-compatible +database schemas. It creates a Markdown table of Synapse versions and the earliest +compatible version. + +It is compatible with the mdbook protocol for preprocessors (see +https://rust-lang.github.io/mdBook/for_developers/preprocessors.html#implementing-a-preprocessor-with-a-different-language): + +Exit 0 to denote support for all renderers: + + ./scripts-dev/schema_versions.py supports + +Parse a JSON list from stdin and add the table to the proper documetnation page: + + ./scripts-dev/schema_versions.py + +Additionally, the script supports dumping the table to stdout for debugging: + + ./scripts-dev/schema_versions.py dump +""" + +import io +import json +import sys +from collections import defaultdict +from typing import Any, Dict, Iterator, Optional, Tuple + +import git +from packaging import version + +# The schema version has moved around over the years. +SCHEMA_VERSION_FILES = ( + "synapse/storage/schema/__init__.py", + "synapse/storage/prepare_database.py", + "synapse/storage/__init__.py", + "synapse/app/homeserver.py", +) + + +# Skip versions of Synapse < v1.0, they're old and essentially not +# compatible with today's federation. +OLDEST_SHOWN_VERSION = version.parse("v1.0") + + +def get_schema_versions(tag: git.Tag) -> Tuple[Optional[int], Optional[int]]: + """Get the schema and schema compat versions for a tag.""" + schema_version = None + schema_compat_version = None + + for file in SCHEMA_VERSION_FILES: + try: + schema_file = tag.commit.tree / file + except KeyError: + continue + + # We (usually) can't execute the code since it might have unknown imports. + if file != "synapse/storage/schema/__init__.py": + with io.BytesIO(schema_file.data_stream.read()) as f: + for line in f.readlines(): + if line.startswith(b"SCHEMA_VERSION"): + schema_version = int(line.split()[2]) + + # Bail early. + break + else: + # SCHEMA_COMPAT_VERSION is sometimes across multiple lines, the easist + # thing to do is exec the code. Luckily it has only ever existed in + # a file which imports nothing else from Synapse. + locals: Dict[str, Any] = {} + exec(schema_file.data_stream.read().decode("utf-8"), {}, locals) + schema_version = locals["SCHEMA_VERSION"] + schema_compat_version = locals.get("SCHEMA_COMPAT_VERSION") + + return schema_version, schema_compat_version + + +def get_tags(repo: git.Repo) -> Iterator[git.Tag]: + """Return an iterator of tags sorted by version.""" + tags = [] + for tag in repo.tags: + # All "real" Synapse tags are of the form vX.Y.Z. + if not tag.name.startswith("v"): + continue + + # There's a weird tag from the initial react UI. + if tag.name == "v0.1": + continue + + try: + tag_version = version.parse(tag.name) + except version.InvalidVersion: + # Skip invalid versions. + continue + + # Skip pre- and post-release versions. + if tag_version.is_prerelease or tag_version.is_postrelease or tag_version.local: + continue + + # Skip old versions. + if tag_version < OLDEST_SHOWN_VERSION: + continue + + tags.append((tag_version, tag)) + + # Sort based on the version number (not lexically). + return (tag for _, tag in sorted(tags, key=lambda t: t[0])) + + +def calculate_version_chart() -> str: + repo = git.Repo(path=".") + + # Map of schema version -> Synapse versions which are at that schema version. + schema_versions = defaultdict(list) + # Map of schema version -> Synapse versions which are compatible with that + # schema version. + schema_compat_versions = defaultdict(list) + + # Find ranges of versions which are compatible with a schema version. + # + # There are two modes of operation: + # + # 1. Pre-schema_compat_version (i.e. schema_compat_version of None), then + # Synapse is compatible up/downgrading to a version with + # schema_version >= its current version. + # + # 2. Post-schema_compat_version (i.e. schema_compat_version is *not* None), + # then Synapse is compatible up/downgrading to a version with + # schema version >= schema_compat_version. + # + # This is more generous and avoids versions that cannot be rolled back. + # + # See https://github.com/matrix-org/synapse/pull/9933 which was included in v1.37.0. + for tag in get_tags(repo): + schema_version, schema_compat_version = get_schema_versions(tag) + + # If a schema compat version is given, prefer that over the schema version. + schema_versions[schema_version].append(tag.name) + schema_compat_versions[schema_compat_version or schema_version].append(tag.name) + + # Generate a table which maps the latest Synapse version compatible with each + # schema version. + result = f"| {'Versions': ^19} | Compatible version |\n" + result += f"|{'-' * (19 + 2)}|{'-' * (18 + 2)}|\n" + for schema_version, synapse_versions in schema_compat_versions.items(): + result += f"| {synapse_versions[0] + ' – ' + synapse_versions[-1]: ^19} | {schema_versions[schema_version][0]: ^18} |\n" + + return result + + +if __name__ == "__main__": + if len(sys.argv) == 3 and sys.argv[1] == "supports": + # We don't care about the renderer which is being used, which is the second argument. + sys.exit(0) + elif len(sys.argv) == 2 and sys.argv[1] == "dump": + print(calculate_version_chart()) + else: + # Expect JSON data on stdin. + context, book = json.load(sys.stdin) + + for section in book["sections"]: + if "Chapter" in section and section["Chapter"]["path"] == "upgrade.md": + section["Chapter"]["content"] = section["Chapter"]["content"].replace( + "", calculate_version_chart() + ) + + # Print the result back out to stdout. + print(json.dumps(book)) diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 19ca399d446a..92938086405c 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -50,7 +50,7 @@ def request_registration( url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),) # Get the nonce - r = requests.get(url, verify=False) + r = requests.get(url) if r.status_code != 200: _print("ERROR! Received %d %s" % (r.status_code, r.reason)) @@ -88,7 +88,7 @@ def request_registration( } _print("Sending registration request...") - r = requests.post(url, json=data, verify=False) + r = requests.post(url, json=data) if r.status_code != 200: _print("ERROR! Received %d %s" % (r.status_code, r.reason)) diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index ab2b29cf1b49..75fe0183f6b1 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -191,7 +191,7 @@ "user_directory_search_stat", "user_directory_search_pos", "users_who_share_private_rooms", - "users_in_public_room", + "users_in_public_rooms", # UI auth sessions have foreign keys so additional care needs to be taken, # the sessions are transient anyway, so ignore them. "ui_auth_sessions", @@ -348,8 +348,7 @@ async def setup_table(self, table: str) -> Tuple[str, int, int, int, int]: backward_chunk = 0 already_ported = 0 else: - forward_chunk = row["forward_rowid"] - backward_chunk = row["backward_rowid"] + forward_chunk, backward_chunk = row if total_to_port is None: already_ported, total_to_port = await self._get_total_count_to_port( diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py index 9321d6f18637..e2e3dc61b470 100644 --- a/synapse/api/auth/base.py +++ b/synapse/api/auth/base.py @@ -27,6 +27,8 @@ UnstableSpecAuthError, ) from synapse.appservice import ApplicationService +from synapse.http import get_request_user_agent +from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace from synapse.types import Requester, create_requester from synapse.util.cancellation import cancellable @@ -45,6 +47,9 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() + self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips + self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips + async def check_user_in_room( self, room_id: str, @@ -349,3 +354,46 @@ async def get_appservice_user( return create_requester( effective_user_id, app_service=app_service, device_id=effective_device_id ) + + async def _record_request( + self, request: SynapseRequest, requester: Requester + ) -> None: + """Record that this request was made. + + This updates the client_ips and monthly_active_user tables. + """ + ip_addr = request.get_client_ip_if_available() + + if ip_addr and (not requester.app_service or self._track_appservice_user_ips): + user_agent = get_request_user_agent(request) + access_token = self.get_access_token_from_request(request) + + # XXX(quenting): I'm 95% confident that we could skip setting the + # device_id to "dummy-device" for appservices, and that the only impact + # would be some rows which whould not deduplicate in the 'user_ips' + # table during the transition + recorded_device_id = ( + "dummy-device" + if requester.device_id is None and requester.app_service is not None + else requester.device_id + ) + await self.store.insert_client_ip( + user_id=requester.authenticated_entity, + access_token=access_token, + ip=ip_addr, + user_agent=user_agent, + device_id=recorded_device_id, + ) + + # Track also the puppeted user client IP if enabled and the user is puppeting + if ( + requester.user.to_string() != requester.authenticated_entity + and self._track_puppeted_user_ips + ): + await self.store.insert_client_ip( + user_id=requester.user.to_string(), + access_token=access_token, + ip=ip_addr, + user_agent=user_agent, + device_id=requester.device_id, + ) diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index 36ee9c8b8fc8..985cbb12788c 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -22,7 +22,6 @@ InvalidClientTokenError, MissingClientTokenError, ) -from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest from synapse.logging.opentracing import active_span, force_tracing, start_active_span from synapse.types import Requester, create_requester @@ -48,8 +47,6 @@ def __init__(self, hs: "HomeServer"): self._account_validity_handler = hs.get_account_validity_handler() self._macaroon_generator = hs.get_macaroon_generator() - self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips - self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users @cancellable @@ -115,9 +112,6 @@ async def _wrapped_get_user_by_req( Once get_user_by_req has set up the opentracing span, this does the actual work. """ try: - ip_addr = request.get_client_ip_if_available() - user_agent = get_request_user_agent(request) - access_token = self.get_access_token_from_request(request) # First check if it could be a request from an appservice @@ -154,38 +148,7 @@ async def _wrapped_get_user_by_req( errcode=Codes.EXPIRED_ACCOUNT, ) - if ip_addr and ( - not requester.app_service or self._track_appservice_user_ips - ): - # XXX(quenting): I'm 95% confident that we could skip setting the - # device_id to "dummy-device" for appservices, and that the only impact - # would be some rows which whould not deduplicate in the 'user_ips' - # table during the transition - recorded_device_id = ( - "dummy-device" - if requester.device_id is None and requester.app_service is not None - else requester.device_id - ) - await self.store.insert_client_ip( - user_id=requester.authenticated_entity, - access_token=access_token, - ip=ip_addr, - user_agent=user_agent, - device_id=recorded_device_id, - ) - - # Track also the puppeted user client IP if enabled and the user is puppeting - if ( - requester.user.to_string() != requester.authenticated_entity - and self._track_puppeted_user_ips - ): - await self.store.insert_client_ip( - user_id=requester.user.to_string(), - access_token=access_token, - ip=ip_addr, - user_agent=user_agent, - device_id=requester.device_id, - ) + await self._record_request(request, requester) if requester.is_guest and not allow_guest: raise AuthError( diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 31bb035cc846..7373d815343b 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -227,6 +227,10 @@ async def get_user_by_req( # so that we don't provision the user if they don't have enough permission: requester = await self.get_user_by_access_token(access_token, allow_expired) + # Do not record requests from MAS using the virtual `__oidc_admin` user. + if access_token != self._admin_token: + await self._record_request(request, requester) + if not allow_guest and requester.is_guest: raise OAuthInsufficientScopeError([SCOPE_MATRIX_API]) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index fdb2955be82b..fbd8b16ec39b 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -83,6 +83,8 @@ class Codes(str, Enum): USER_DEACTIVATED = "M_USER_DEACTIVATED" # USER_LOCKED = "M_USER_LOCKED" USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED" + NOT_YET_UPLOADED = "M_NOT_YET_UPLOADED" + CANNOT_OVERWRITE_MEDIA = "M_CANNOT_OVERWRITE_MEDIA" # Part of MSC3848 # https://github.com/matrix-org/matrix-spec-proposals/pull/3848 diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index f7c80eee210d..bcfb7a720002 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -104,8 +104,8 @@ class GenericWorkerStore( - # FIXME(#3714): We need to add UserDirectoryStore as we write directly - # rather than going via the correct worker. + # FIXME(https://github.com/matrix-org/synapse/issues/3714): We need to add + # UserDirectoryStore as we write directly rather than going via the correct worker. UserDirectoryStore, StatsStore, UIAuthWorkerStore, diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 4efbaeac0d7f..b1fcaf71a3d1 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -204,3 +204,10 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "rc_third_party_invite", defaults={"per_second": 0.0025, "burst_count": 5}, ) + + # Ratelimit create media requests: + self.rc_media_create = RatelimitSettings.parse( + config, + "rc_media_create", + defaults={"per_second": 10, "burst_count": 50}, + ) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index f6cfdd3e048a..839c026d70d7 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -141,6 +141,12 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "prevent_media_downloads_from", [] ) + self.unused_expiration_time = self.parse_duration( + config.get("unused_expiration_time", "24h") + ) + + self.max_pending_media_uploads = config.get("max_pending_media_uploads", 5) + self.media_store_path = self.ensure_directory( config.get("media_store_path", "media_store") ) diff --git a/synapse/config/server.py b/synapse/config/server.py index 72d30da30082..f9e18d2053d3 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -368,9 +368,14 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # Whether to enable user presence. presence_config = config.get("presence") or {} - self.use_presence = presence_config.get("enabled") - if self.use_presence is None: - self.use_presence = config.get("use_presence", True) + presence_enabled = presence_config.get("enabled") + if presence_enabled is None: + presence_enabled = config.get("use_presence", True) + + # Whether presence is enabled *at all*. + self.presence_enabled = bool(presence_enabled) + # Whether to internally track presence, requires that presence is enabled, + self.track_presence = self.presence_enabled and presence_enabled != "untracked" # Custom presence router module # This is the legacy way of configuring it (the config should now be put in the modules section) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f1766088fc20..6d67a8cd5c30 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -358,9 +358,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "Must only specify one instance to handle `account_data` messages." ) - if len(self.writers.receipts) != 1: + if len(self.writers.receipts) == 0: raise ConfigError( - "Must only specify one instance to handle `receipts` messages." + "Must specify at least one instance to handle `receipts` messages." ) if len(self.writers.events) == 0: diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 53af423a5a98..ac2cf83d9f46 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -17,6 +17,7 @@ from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, Dict, Iterable, @@ -45,6 +46,7 @@ if TYPE_CHECKING: from synapse.handlers.relations import BundledAggregations + from synapse.server import HomeServer # Split strings on "." but not "\." (or "\\\."). @@ -56,6 +58,13 @@ CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT +# Module API callback that allows adding fields to the unsigned section of +# events that are sent to clients. +ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK = Callable[ + [EventBase], Awaitable[JsonDict] +] + + def prune_event(event: EventBase) -> EventBase: """Returns a pruned version of the given event, which removes all keys we don't know about or think could potentially be dodgy. @@ -509,7 +518,13 @@ class EventClientSerializer: clients. """ - def serialize_event( + def __init__(self, hs: "HomeServer") -> None: + self._store = hs.get_datastores().main + self._add_extra_fields_to_unsigned_client_event_callbacks: List[ + ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK + ] = [] + + async def serialize_event( self, event: Union[JsonDict, EventBase], time_now: int, @@ -535,10 +550,21 @@ def serialize_event( serialized_event = serialize_event(event, time_now, config=config) + new_unsigned = {} + for callback in self._add_extra_fields_to_unsigned_client_event_callbacks: + u = await callback(event) + new_unsigned.update(u) + + if new_unsigned: + # We do the `update` this way round so that modules can't clobber + # existing fields. + new_unsigned.update(serialized_event["unsigned"]) + serialized_event["unsigned"] = new_unsigned + # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: if event.event_id in bundle_aggregations: - self._inject_bundled_aggregations( + await self._inject_bundled_aggregations( event, time_now, config, @@ -548,7 +574,7 @@ def serialize_event( return serialized_event - def _inject_bundled_aggregations( + async def _inject_bundled_aggregations( self, event: EventBase, time_now: int, @@ -590,7 +616,7 @@ def _inject_bundled_aggregations( # said that we should only include the `event_id`, `origin_server_ts` and # `sender` of the edit; however MSC3925 proposes extending it to the whole # of the edit, which is what we do here. - serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event( + serialized_aggregations[RelationTypes.REPLACE] = await self.serialize_event( event_aggregations.replace, time_now, config=config, @@ -600,7 +626,7 @@ def _inject_bundled_aggregations( if event_aggregations.thread: thread = event_aggregations.thread - serialized_latest_event = self.serialize_event( + serialized_latest_event = await self.serialize_event( thread.latest_event, time_now, config=config, @@ -623,7 +649,7 @@ def _inject_bundled_aggregations( "m.relations", {} ).update(serialized_aggregations) - def serialize_events( + async def serialize_events( self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, @@ -645,7 +671,7 @@ def serialize_events( The list of serialized events """ return [ - self.serialize_event( + await self.serialize_event( event, time_now, config=config, @@ -654,6 +680,14 @@ def serialize_events( for event in events ] + def register_add_extra_fields_to_unsigned_client_event_callback( + self, callback: ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK + ) -> None: + """Register a callback that returns additions to the unsigned section of + serialized events. + """ + self._add_extra_fields_to_unsigned_client_event_callbacks.append(callback) + _PowerLevel = Union[str, int] PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]] diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 6ac8d1609585..2bb2c64ebe42 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -84,7 +84,7 @@ from synapse.storage.databases.main.lock import Lock from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary -from synapse.types import JsonDict, StateMap, get_domain_from_id +from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache @@ -999,6 +999,12 @@ async def on_query_user_devices( async def on_claim_client_keys( self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool ) -> Dict[str, Any]: + if any( + not self.hs.is_mine(UserID.from_string(user_id)) + for user_id, _, _, _ in query + ): + raise SynapseError(400, "User is not hosted on this homeserver") + log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = await self._e2e_keys_handler.claim_local_one_time_keys( query, always_include_fallback_keys=always_include_fallback_keys @@ -1395,7 +1401,7 @@ def register_instances_for_edu( self._edu_type_to_instance[edu_type] = instance_names async def on_edu(self, edu_type: str, origin: str, content: dict) -> None: - if not self.config.server.use_presence and edu_type == EduTypes.PRESENCE: + if not self.config.server.track_presence and edu_type == EduTypes.PRESENCE: return # Check if we have a handler on this instance diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 7b6b1da090b1..948fde66582e 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -581,14 +581,14 @@ async def handle_event(event: EventBase) -> None: "get_joined_hosts", str(sg) ) if destinations is None: - # Add logging to help track down #13444 + # Add logging to help track down https://github.com/matrix-org/synapse/issues/13444 logger.info( "Unexpectedly did not have cached destinations for %s / %s", sg, event.event_id, ) else: - # Add logging to help track down #13444 + # Add logging to help track down https://github.com/matrix-org/synapse/issues/13444 logger.info( "Unexpectedly did not have cached prev group for %s", event.event_id, @@ -844,7 +844,7 @@ async def send_presence_to_destinations( destinations (list[str]) """ - if not states or not self.hs.config.server.use_presence: + if not states or not self.hs.config.server.track_presence: # No-op if presence is disabled. return diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 2c2baeac675e..d06f8e3296ac 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -283,7 +283,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> start, limit, user_id ) for media in media_ids: - writer.write_media_id(media["media_id"], media) + writer.write_media_id(media.media_id, attr.asdict(media)) logger.info( "[%s] Written %d media_ids of %s", diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index c200a45f3a3c..873dadc3bdf7 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -47,6 +47,7 @@ DeviceListUpdates, JsonDict, JsonMapping, + MultiWriterStreamToken, RoomAlias, RoomStreamToken, StreamKeyType, @@ -217,7 +218,7 @@ async def handle_room_events(events: Iterable[EventBase]) -> None: def notify_interested_services_ephemeral( self, stream_key: StreamKeyType, - new_token: Union[int, RoomStreamToken], + new_token: Union[int, RoomStreamToken, MultiWriterStreamToken], users: Collection[Union[str, UserID]], ) -> None: """ @@ -259,19 +260,6 @@ def notify_interested_services_ephemeral( ): return - # Assert that new_token is an integer (and not a RoomStreamToken). - # All of the supported streams that this function handles use an - # integer to track progress (rather than a RoomStreamToken - a - # vector clock implementation) as they don't support multiple - # stream writers. - # - # As a result, we simply assert that new_token is an integer. - # If we do end up needing to pass a RoomStreamToken down here - # in the future, using RoomStreamToken.stream (the minimum stream - # position) to convert to an ascending integer value should work. - # Additional context: https://github.com/matrix-org/synapse/pull/11137 - assert isinstance(new_token, int) - # Ignore to-device messages if the feature flag is not enabled if ( stream_key == StreamKeyType.TO_DEVICE @@ -286,6 +274,9 @@ def notify_interested_services_ephemeral( ): return + # We know we're not a `RoomStreamToken` at this point. + assert not isinstance(new_token, RoomStreamToken) + # Check whether there are any appservices which have registered to receive # ephemeral events. # @@ -327,7 +318,7 @@ async def _notify_interested_services_ephemeral( self, services: List[ApplicationService], stream_key: StreamKeyType, - new_token: int, + new_token: Union[int, MultiWriterStreamToken], users: Collection[Union[str, UserID]], ) -> None: logger.debug("Checking interested services for %s", stream_key) @@ -340,6 +331,7 @@ async def _notify_interested_services_ephemeral( # # Instead we simply grab the latest typing updates in _handle_typing # and, if they apply to this application service, send it off. + assert isinstance(new_token, int) events = await self._handle_typing(service, new_token) if events: self.scheduler.enqueue_for_appservice(service, ephemeral=events) @@ -350,15 +342,23 @@ async def _notify_interested_services_ephemeral( (service.id, stream_key) ): if stream_key == StreamKeyType.RECEIPT: + assert isinstance(new_token, MultiWriterStreamToken) + + # We store appservice tokens as integers, so we ignore + # the `instance_map` components and instead simply + # follow the base stream position. + new_token = MultiWriterStreamToken(stream=new_token.stream) + events = await self._handle_receipts(service, new_token) self.scheduler.enqueue_for_appservice(service, ephemeral=events) # Persist the latest handled stream token for this appservice await self.store.set_appservice_stream_type_pos( - service, "read_receipt", new_token + service, "read_receipt", new_token.stream ) elif stream_key == StreamKeyType.PRESENCE: + assert isinstance(new_token, int) events = await self._handle_presence(service, users, new_token) self.scheduler.enqueue_for_appservice(service, ephemeral=events) @@ -368,6 +368,7 @@ async def _notify_interested_services_ephemeral( ) elif stream_key == StreamKeyType.TO_DEVICE: + assert isinstance(new_token, int) # Retrieve a list of to-device message events, as well as the # maximum stream token of the messages we were able to retrieve. to_device_messages = await self._get_to_device_messages( @@ -383,6 +384,7 @@ async def _notify_interested_services_ephemeral( ) elif stream_key == StreamKeyType.DEVICE_LIST: + assert isinstance(new_token, int) device_list_summary = await self._get_device_list_summary( service, new_token ) @@ -432,7 +434,7 @@ async def _handle_typing( return typing async def _handle_receipts( - self, service: ApplicationService, new_token: int + self, service: ApplicationService, new_token: MultiWriterStreamToken ) -> List[JsonMapping]: """ Return the latest read receipts that the given application service should receive. @@ -455,15 +457,17 @@ async def _handle_receipts( from_key = await self.store.get_type_stream_id_for_appservice( service, "read_receipt" ) - if new_token is not None and new_token <= from_key: + if new_token is not None and new_token.stream <= from_key: logger.debug( "Rejecting token lower than or equal to stored: %s" % (new_token,) ) return [] + from_token = MultiWriterStreamToken(stream=from_key) + receipts_source = self.event_sources.sources.receipt receipts, _ = await receipts_source.get_new_events_as( - service=service, from_key=from_key, to_key=new_token + service=service, from_key=from_token, to_key=new_token ) return receipts diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 6a8f8f2fd18a..370f4041fbc6 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -103,10 +103,10 @@ async def deactivate_account( # Attempt to unbind any known bound threepids to this account from identity # server(s). bound_threepids = await self.store.user_get_bound_threepids(user_id) - for threepid in bound_threepids: + for medium, address in bound_threepids: try: result = await self._identity_handler.try_unbind_threepid( - user_id, threepid["medium"], threepid["address"], id_server + user_id, medium, address, id_server ) except Exception: # Do we want this to be a fatal error or should we carry on? diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 3ce96ef3cb72..98e6e4256343 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -328,6 +328,9 @@ async def get_user_ids_changed( return result async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: + if not self.hs.is_mine(UserID.from_string(user_id)): + raise SynapseError(400, "User is not hosted on this homeserver") + stream_id, devices = await self.store.get_e2e_device_keys_for_federation_query( user_id ) @@ -380,7 +383,7 @@ async def handle_room_un_partial_stated(self, room_id: str) -> None: ) DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000 - DEVICE_MSGS_DELETE_SLEEP_MS = 1000 + DEVICE_MSGS_DELETE_SLEEP_MS = 100 async def _delete_device_messages( self, @@ -393,15 +396,17 @@ async def _delete_device_messages( up_to_stream_id = task.params["up_to_stream_id"] # Delete the messages in batches to avoid too much DB load. + from_stream_id = None while True: - res = await self.store.delete_messages_for_device( + from_stream_id, _ = await self.store.delete_messages_for_device_between( user_id=user_id, device_id=device_id, - up_to_stream_id=up_to_stream_id, + from_stream_id=from_stream_id, + to_stream_id=up_to_stream_id, limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, ) - if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: + if from_stream_id is None: return TaskStatus.COMPLETE, None, None await self.clock.sleep(DeviceHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 8c6432035d1c..70fa931d1796 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -542,6 +542,12 @@ async def on_federation_query_client_keys( device_keys_query: Dict[str, Optional[List[str]]] = query_body.get( "device_keys", {} ) + if any( + not self.is_mine(UserID.from_string(user_id)) + for user_id in device_keys_query + ): + raise SynapseError(400, "User is not hosted on this homeserver") + res = await self.query_local_devices( device_keys_query, include_displaynames=( @@ -659,6 +665,20 @@ async def claim_one_time_keys( timeout: Optional[int], always_include_fallback_keys: bool, ) -> JsonDict: + """ + Args: + query: A chain of maps from (user_id, device_id, algorithm) to the requested + number of keys to claim. + user: The user who is claiming these keys. + timeout: How long to wait for any federation key claim requests before + giving up. + always_include_fallback_keys: always include a fallback key for local users' + devices, even if we managed to claim a one-time-key. + + Returns: a heterogeneous dict with two keys: + one_time_keys: chain of maps user ID -> device ID -> key ID -> key. + failures: map from remote destination to a JsonDict describing the error. + """ local_query: List[Tuple[str, str, str, int]] = [] remote_queries: Dict[str, Dict[str, Dict[str, Dict[str, int]]]] = {} @@ -739,6 +759,16 @@ async def claim_client_keys(destination: str) -> None: async def upload_keys_for_user( self, user_id: str, device_id: str, keys: JsonDict ) -> JsonDict: + """ + Args: + user_id: user whose keys are being uploaded. + device_id: device whose keys are being uploaded. + keys: the body of a /keys/upload request. + + Returns a dictionary with one field: + "one_time_keys": A mapping from algorithm to number of keys for that + algorithm, including those previously persisted. + """ # This can only be called from the main process. assert isinstance(self.device_handler, DeviceHandler) @@ -1420,19 +1450,25 @@ async def _retrieve_cross_signing_keys_for_remote_user( return desired_key_data - async def is_cross_signing_set_up_for_user(self, user_id: str) -> bool: + async def check_cross_signing_setup(self, user_id: str) -> Tuple[bool, bool]: """Checks if the user has cross-signing set up Args: user_id: The user to check - Returns: - True if the user has cross-signing set up, False otherwise + Returns: a 2-tuple of booleans + - whether the user has cross-signing set up, and + - whether the user's master cross-signing key may be replaced without UIA. """ - existing_master_key = await self.store.get_e2e_cross_signing_key( - user_id, "master" - ) - return existing_master_key is not None + ( + exists, + ts_replacable_without_uia_before, + ) = await self.store.get_master_cross_signing_key_updatable_before(user_id) + + if ts_replacable_without_uia_before is None: + return exists, False + else: + return exists, self.clock.time_msec() < ts_replacable_without_uia_before def _check_cross_signing_key( diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index d12803bf0f31..756825061cf9 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -120,7 +120,7 @@ async def get_stream( events.extend(to_add) - chunks = self._event_serializer.serialize_events( + chunks = await self._event_serializer.serialize_events( events, time_now, config=SerializeEventConfig( diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 0cc8e990d96a..f4c17894aa7a 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -88,7 +88,7 @@ ) from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer, concurrently_execute -from synapse.util.iterutils import batch_iter, partition +from synapse.util.iterutils import batch_iter, partition, sorted_topologically_batched from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr @@ -748,7 +748,7 @@ async def _get_missing_events_for_pdu( # fetching fresh state for the room if the missing event # can't be found, which slightly reduces our security. # it may also increase our DAG extremity count for the room, - # causing additional state resolution? See #1760. + # causing additional state resolution? See https://github.com/matrix-org/synapse/issues/1760. # However, fetching state doesn't hold the linearizer lock # apparently. # @@ -1669,14 +1669,13 @@ async def _auth_and_persist_outliers( # XXX: it might be possible to kick this process off in parallel with fetching # the events. - while event_map: - # build a list of events whose auth events are not in the queue. - roots = tuple( - ev - for ev in event_map.values() - if not any(aid in event_map for aid in ev.auth_event_ids()) - ) + # We need to persist an event's auth events before the event. + auth_graph = { + ev: [event_map[e_id] for e_id in ev.auth_event_ids() if e_id in event_map] + for ev in event_map.values() + } + for roots in sorted_topologically_batched(event_map.values(), auth_graph): if not roots: # if *none* of the remaining events are ready, that means # we have a loop. This either means a bug in our logic, or that @@ -1698,9 +1697,6 @@ async def _auth_and_persist_outliers( await self._auth_and_persist_outliers_inner(room_id, roots) - for ev in roots: - del event_map[ev.event_id] - async def _auth_and_persist_outliers_inner( self, room_id: str, fetched_events: Collection[EventBase] ) -> None: diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 472879c964cc..c041b67993c1 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -19,6 +19,8 @@ import urllib.parse from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple +import attr + from synapse.api.errors import ( CodeMessageException, Codes, @@ -357,9 +359,9 @@ async def send_threepid_validation( # Check to see if a session already exists and that it is not yet # marked as validated - if session and session.get("validated_at") is None: - session_id = session["session_id"] - last_send_attempt = session["last_send_attempt"] + if session and session.validated_at is None: + session_id = session.session_id + last_send_attempt = session.last_send_attempt # Check that the send_attempt is higher than previous attempts if send_attempt <= last_send_attempt: @@ -480,7 +482,6 @@ async def validate_threepid_session( # We don't actually know which medium this 3PID is. Thus we first assume it's email, # and if validation fails we try msisdn - validation_session = None # Try to validate as email if self.hs.config.email.can_verify_email: @@ -488,19 +489,18 @@ async def validate_threepid_session( validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True ) - - if validation_session: - return validation_session + if validation_session: + return attr.asdict(validation_session) # Try to validate as msisdn if self.hs.config.registration.account_threepid_delegate_msisdn: # Ask our delegated msisdn identity server - validation_session = await self.threepid_from_creds( + return await self.threepid_from_creds( self.hs.config.registration.account_threepid_delegate_msisdn, threepid_creds, ) - return validation_session + return None async def proxy_msisdn_submit_token( self, id_server: str, client_secret: str, sid: str, token: str diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index c34bd7db954f..c4bec955febe 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -145,7 +145,7 @@ async def _snapshot_all_rooms( joined_rooms = [r.room_id for r in room_list if r.membership == Membership.JOIN] receipt = await self.store.get_linearized_receipts_for_rooms( joined_rooms, - to_key=int(now_token.receipt_key), + to_key=now_token.receipt_key, ) receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id) @@ -173,7 +173,7 @@ async def handle_room(event: RoomsForUser) -> None: d["inviter"] = event.sender invite_event = await self.store.get_event(event.event_id) - d["invite"] = self._event_serializer.serialize_event( + d["invite"] = await self._event_serializer.serialize_event( invite_event, time_now, config=serializer_options, @@ -225,7 +225,7 @@ async def handle_room(event: RoomsForUser) -> None: d["messages"] = { "chunk": ( - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( messages, time_now=time_now, config=serializer_options, @@ -235,7 +235,7 @@ async def handle_room(event: RoomsForUser) -> None: "end": await end_token.to_string(self.store), } - d["state"] = self._event_serializer.serialize_events( + d["state"] = await self._event_serializer.serialize_events( current_state.values(), time_now=time_now, config=serializer_options, @@ -387,7 +387,7 @@ async def _room_initial_sync_parted( "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( messages, time_now, config=serialize_options ) ), @@ -396,7 +396,7 @@ async def _room_initial_sync_parted( }, "state": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( room_state.values(), time_now, config=serialize_options ) ), @@ -420,7 +420,7 @@ async def _room_initial_sync_joined( time_now = self.clock.time_msec() serialize_options = SerializeEventConfig(requester=requester) # Don't bundle aggregations as this is a deprecated API. - state = self._event_serializer.serialize_events( + state = await self._event_serializer.serialize_events( current_state.values(), time_now, config=serialize_options, @@ -439,7 +439,7 @@ async def _room_initial_sync_joined( async def get_presence() -> List[JsonDict]: # If presence is disabled, return an empty list - if not self.hs.config.server.use_presence: + if not self.hs.config.server.presence_enabled: return [] states = await presence_handler.get_states( @@ -497,7 +497,7 @@ async def get_receipts() -> List[JsonMapping]: "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( messages, time_now, config=serialize_options ) ), diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 41a35ce510ff..811a41f161b6 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -244,7 +244,7 @@ async def get_state_events( ) room_state = room_state_events[membership_event_id] - events = self._event_serializer.serialize_events( + events = await self._event_serializer.serialize_events( room_state.values(), self.clock.time_msec(), config=SerializeEventConfig(requester=requester), @@ -999,7 +999,26 @@ async def create_and_send_nonmember_event( raise ShadowBanError() if ratelimit: - await self.request_ratelimiter.ratelimit(requester, update=False) + room_id = event_dict["room_id"] + try: + room_version = await self.store.get_room_version(room_id) + except NotFoundError: + # The room doesn't exist. + raise AuthError(403, f"User {requester.user} not in room {room_id}") + + if room_version.updated_redaction_rules: + redacts = event_dict["content"].get("redacts") + else: + redacts = event_dict.get("redacts") + + is_admin_redaction = await self.is_admin_redaction( + event_type=event_dict["type"], + sender=event_dict["sender"], + redacts=redacts, + ) + await self.request_ratelimiter.ratelimit( + requester, is_admin_redaction=is_admin_redaction, update=False + ) # We limit the number of concurrent event sends in a room so that we # don't fork the DAG too much. If we don't limit then we can end up in @@ -1508,6 +1527,18 @@ async def _persist_events( first_event.room_id ) if writer_instance != self._instance_name: + # Ratelimit before sending to the other event persister, to + # ensure that we correctly have ratelimits on both the event + # creators and event persisters. + if ratelimit: + for event, _ in events_and_context: + is_admin_redaction = await self.is_admin_redaction( + event.type, event.sender, event.redacts + ) + await self.request_ratelimiter.ratelimit( + requester, is_admin_redaction=is_admin_redaction + ) + try: result = await self.send_events( instance_name=writer_instance, @@ -1538,6 +1569,7 @@ async def _persist_events( # stream_ordering entry manually (as it was persisted on # another worker). event.internal_metadata.stream_ordering = stream_id + return event event = await self.persist_and_notify_client_events( @@ -1696,21 +1728,9 @@ async def persist_and_notify_client_events( # can apply different ratelimiting. We do this by simply checking # it's not a self-redaction (to avoid having to look up whether the # user is actually admin or not). - is_admin_redaction = False - if event.type == EventTypes.Redaction: - assert event.redacts is not None - - original_event = await self.store.get_event( - event.redacts, - redact_behaviour=EventRedactBehaviour.as_is, - get_prev_content=False, - allow_rejected=False, - allow_none=True, - ) - - is_admin_redaction = bool( - original_event and event.sender != original_event.sender - ) + is_admin_redaction = await self.is_admin_redaction( + event.type, event.sender, event.redacts + ) await self.request_ratelimiter.ratelimit( requester, is_admin_redaction=is_admin_redaction @@ -1930,6 +1950,27 @@ async def _notify() -> None: return persisted_events[-1] + async def is_admin_redaction( + self, event_type: str, sender: str, redacts: Optional[str] + ) -> bool: + """Return whether the event is a redaction made by an admin, and thus + should use a different ratelimiter. + """ + if event_type != EventTypes.Redaction: + return False + + assert redacts is not None + + original_event = await self.store.get_event( + redacts, + redact_behaviour=EventRedactBehaviour.as_is, + get_prev_content=False, + allow_rejected=False, + allow_none=True, + ) + + return bool(original_event and sender != original_event.sender) + async def _maybe_kick_guest_users( self, event: EventBase, context: EventContext ) -> None: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 878f267a4e45..87e51bca485a 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -657,7 +657,7 @@ async def get_messages( chunk = { "chunk": ( - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( events, time_now, config=serialize_options, @@ -669,7 +669,7 @@ async def get_messages( } if state: - chunk["state"] = self._event_serializer.serialize_events( + chunk["state"] = await self._event_serializer.serialize_events( state, time_now, config=serialize_options ) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index dfc0b9db070f..4137fd50b130 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -192,7 +192,8 @@ def __init__(self, hs: "HomeServer"): self.state = hs.get_state_handler() self.is_mine_id = hs.is_mine_id - self._presence_enabled = hs.config.server.use_presence + self._presence_enabled = hs.config.server.presence_enabled + self._track_presence = hs.config.server.track_presence self._federation = None if hs.should_send_federation(): @@ -512,7 +513,7 @@ def __init__(self, hs: "HomeServer"): ) async def _on_shutdown(self) -> None: - if self._presence_enabled: + if self._track_presence: self.hs.get_replication_command_handler().send_command( ClearUserSyncsCommand(self.instance_id) ) @@ -524,7 +525,7 @@ def send_user_sync( is_syncing: bool, last_sync_ms: int, ) -> None: - if self._presence_enabled: + if self._track_presence: self.hs.get_replication_command_handler().send_user_sync( self.instance_id, user_id, device_id, is_syncing, last_sync_ms ) @@ -571,7 +572,7 @@ async def user_syncing( Called by the sync and events servlets to record that a user has connected to this worker and is waiting for some events. """ - if not affect_presence or not self._presence_enabled: + if not affect_presence or not self._track_presence: return _NullContextManager() # Note that this causes last_active_ts to be incremented which is not @@ -702,8 +703,8 @@ async def set_state( user_id = target_user.to_string() - # If presence is disabled, no-op - if not self._presence_enabled: + # If tracking of presence is disabled, no-op + if not self._track_presence: return # Proxy request to instance that writes presence @@ -723,7 +724,7 @@ async def bump_presence_active_time( with the app. """ # If presence is disabled, no-op - if not self._presence_enabled: + if not self._track_presence: return # Proxy request to instance that writes presence @@ -760,7 +761,7 @@ def __init__(self, hs: "HomeServer"): ] = {} now = self.clock.time_msec() - if self._presence_enabled: + if self._track_presence: for state in self.user_to_current_state.values(): # Create a psuedo-device to properly handle time outs. This will # be overridden by any "real" devices within SYNC_ONLINE_TIMEOUT. @@ -831,7 +832,7 @@ def __init__(self, hs: "HomeServer"): self.external_sync_linearizer = Linearizer(name="external_sync_linearizer") - if self._presence_enabled: + if self._track_presence: # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. @@ -839,6 +840,9 @@ def __init__(self, hs: "HomeServer"): 30, self.clock.looping_call, self._handle_timeouts, 5000 ) + # Presence information is persisted, whether or not it is being tracked + # internally. + if self._presence_enabled: self.clock.call_later( 60, self.clock.looping_call, @@ -854,7 +858,7 @@ def __init__(self, hs: "HomeServer"): ) # Used to handle sending of presence to newly joined users/servers - if self._presence_enabled: + if self._track_presence: self.notifier.add_replication_callback(self.notify_new_event) # Presence is best effort and quickly heals itself, so lets just always @@ -905,7 +909,9 @@ async def _persist_unpersisted_changes(self) -> None: ) async def _update_states( - self, new_states: Iterable[UserPresenceState], force_notify: bool = False + self, + new_states: Iterable[UserPresenceState], + force_notify: bool = False, ) -> None: """Updates presence of users. Sets the appropriate timeouts. Pokes the notifier and federation if and only if the changed presence state @@ -943,7 +949,7 @@ async def _update_states( for new_state in new_states: user_id = new_state.user_id - # Its fine to not hit the database here, as the only thing not in + # It's fine to not hit the database here, as the only thing not in # the current state cache are OFFLINE states, where the only field # of interest is last_active which is safe enough to assume is 0 # here. @@ -957,6 +963,9 @@ async def _update_states( is_mine=self.is_mine_id(user_id), wheel_timer=self.wheel_timer, now=now, + # When overriding disabled presence, don't kick off all the + # wheel timers. + persist=not self._track_presence, ) if force_notify: @@ -1072,7 +1081,7 @@ async def bump_presence_active_time( with the app. """ # If presence is disabled, no-op - if not self._presence_enabled: + if not self._track_presence: return user_id = user.to_string() @@ -1124,7 +1133,7 @@ async def user_syncing( client that is being used by a user. presence_state: The presence state indicated in the sync request """ - if not affect_presence or not self._presence_enabled: + if not affect_presence or not self._track_presence: return _NullContextManager() curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0) @@ -1284,7 +1293,7 @@ async def _persist_and_notify(self, states: List[UserPresenceState]) -> None: async def incoming_presence(self, origin: str, content: JsonDict) -> None: """Called when we receive a `m.presence` EDU from a remote server.""" - if not self._presence_enabled: + if not self._track_presence: return now = self.clock.time_msec() @@ -1359,7 +1368,7 @@ async def set_state( raise SynapseError(400, "Invalid presence state") # If presence is disabled, no-op - if not self._presence_enabled: + if not self._track_presence: return user_id = target_user.to_string() @@ -1807,7 +1816,7 @@ async def get_new_events( # the same token repeatedly. # # Hence this guard where we just return nothing so that the sync - # doesn't return. C.f. #5503. + # doesn't return. C.f. https://github.com/matrix-org/synapse/issues/5503. return [], max_token # Figure out which other users this user should explicitly receive @@ -2118,6 +2127,7 @@ def handle_update( is_mine: bool, wheel_timer: WheelTimer, now: int, + persist: bool, ) -> Tuple[UserPresenceState, bool, bool]: """Given a presence update: 1. Add any appropriate timers. @@ -2129,6 +2139,8 @@ def handle_update( is_mine: Whether the user is ours wheel_timer now: Time now in ms + persist: True if this state should persist until another update occurs. + Skips insertion into wheel timers. Returns: 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: @@ -2146,14 +2158,15 @@ def handle_update( if is_mine: if new_state.state == PresenceState.ONLINE: # Idle timer - wheel_timer.insert( - now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER - ) + if not persist: + wheel_timer.insert( + now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER + ) active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY new_state = new_state.copy_and_replace(currently_active=active) - if active: + if active and not persist: wheel_timer.insert( now=now, obj=user_id, @@ -2162,11 +2175,12 @@ def handle_update( if new_state.state != PresenceState.OFFLINE: # User has stopped syncing - wheel_timer.insert( - now=now, - obj=user_id, - then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, - ) + if not persist: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, + ) last_federate = new_state.last_federation_update_ts if now - last_federate > FEDERATION_PING_INTERVAL: @@ -2174,7 +2188,7 @@ def handle_update( new_state = new_state.copy_and_replace(last_federation_update_ts=now) federation_ping = True - if new_state.state == PresenceState.BUSY: + if new_state.state == PresenceState.BUSY and not persist: wheel_timer.insert( now=now, obj=user_id, @@ -2182,11 +2196,13 @@ def handle_update( ) else: - wheel_timer.insert( - now=now, - obj=user_id, - then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT, - ) + # An update for a remote user was received. + if not persist: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT, + ) # Check whether the change was something worth notifying about if should_notify(prev_state, new_state, is_mine): diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index c2109036ec38..1027fbfd2839 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import random -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, Union from synapse.api.errors import ( AuthError, @@ -23,6 +23,7 @@ StoreError, SynapseError, ) +from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util.caches.descriptors import cached from synapse.util.stringutils import parse_and_validate_mxc_uri @@ -306,7 +307,9 @@ async def check_avatar_size_and_mime_type(self, mxc: str) -> bool: server_name = host if self._is_mine_server_name(server_name): - media_info = await self.store.get_local_media(media_id) + media_info: Optional[ + Union[LocalMedia, RemoteMedia] + ] = await self.store.get_local_media(media_id) else: media_info = await self.store.get_cached_remote_media(server_name, media_id) @@ -322,12 +325,12 @@ async def check_avatar_size_and_mime_type(self, mxc: str) -> bool: if self.max_avatar_size: # Ensure avatar does not exceed max allowed avatar size - if media_info["media_length"] > self.max_avatar_size: + if media_info.media_length > self.max_avatar_size: logger.warning( "Forbidding avatar change to %s: %d bytes is above the allowed size " "limit", mxc, - media_info["media_length"], + media_info.media_length, ) return False @@ -335,12 +338,12 @@ async def check_avatar_size_and_mime_type(self, mxc: str) -> bool: # Ensure the avatar's file type is allowed if ( self.allowed_avatar_mimetypes - and media_info["media_type"] not in self.allowed_avatar_mimetypes + and media_info.media_type not in self.allowed_avatar_mimetypes ): logger.warning( "Forbidding avatar change to %s: mimetype %s not allowed", mxc, - media_info["media_type"], + media_info.media_type, ) return False diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 69ac468f75ad..b5f7a8b47e1c 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -20,6 +20,7 @@ from synapse.types import ( JsonDict, JsonMapping, + MultiWriterStreamToken, ReadReceipt, StreamKeyType, UserID, @@ -200,7 +201,7 @@ async def received_client_receipt( await self.federation_sender.send_read_receipt(receipt) -class ReceiptEventSource(EventSource[int, JsonMapping]): +class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.config = hs.config @@ -273,13 +274,12 @@ def filter_out_private_receipts( async def get_new_events( self, user: UserID, - from_key: int, + from_key: MultiWriterStreamToken, limit: int, room_ids: Iterable[str], is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[JsonMapping], int]: - from_key = int(from_key) + ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: to_key = self.get_current_key() if from_key == to_key: @@ -296,8 +296,11 @@ async def get_new_events( return events, to_key async def get_new_events_as( - self, from_key: int, to_key: int, service: ApplicationService - ) -> Tuple[List[JsonMapping], int]: + self, + from_key: MultiWriterStreamToken, + to_key: MultiWriterStreamToken, + service: ApplicationService, + ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: """Returns a set of new read receipt events that an appservice may be interested in. @@ -312,8 +315,6 @@ async def get_new_events_as( appservice may be interested in. * The current read receipt stream token. """ - from_key = int(from_key) - if from_key == to_key: return [], to_key @@ -333,5 +334,5 @@ async def get_new_events_as( return events, to_key - def get_current_key(self) -> int: + def get_current_key(self) -> MultiWriterStreamToken: return self.store.get_max_receipt_stream_id() diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 9b13448cdd7a..a15983afae95 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -167,7 +167,7 @@ async def get_relations( now = self._clock.time_msec() serialize_options = SerializeEventConfig(requester=requester) return_value: JsonDict = { - "chunk": self._event_serializer.serialize_events( + "chunk": await self._event_serializer.serialize_events( events, now, bundle_aggregations=aggregations, @@ -177,7 +177,9 @@ async def get_relations( if include_original_event: # Do not bundle aggregations when retrieving the original event because # we want the content before relations are applied to it. - return_value["original_event"] = self._event_serializer.serialize_event( + return_value[ + "original_event" + ] = await self._event_serializer.serialize_event( event, now, bundle_aggregations=None, @@ -602,7 +604,7 @@ async def get_threads( ) now = self._clock.time_msec() - serialized_events = self._event_serializer.serialize_events( + serialized_events = await self._event_serializer.serialize_events( events, now, bundle_aggregations=aggregations ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 97c9f01245f2..f865bed1ec2c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -269,7 +269,7 @@ async def _upgrade_room( self, requester: Requester, old_room_id: str, - old_room: Dict[str, Any], + old_room: Tuple[bool, str, bool], new_room_id: str, new_version: RoomVersion, tombstone_event: EventBase, @@ -279,7 +279,7 @@ async def _upgrade_room( Args: requester: the user requesting the upgrade old_room_id: the id of the room to be replaced - old_room: a dict containing room information for the room to be replaced, + old_room: a tuple containing room information for the room to be replaced, as returned by `RoomWorkerStore.get_room`. new_room_id: the id of the replacement room new_version: the version to upgrade the room to @@ -299,7 +299,7 @@ async def _upgrade_room( await self.store.store_room( room_id=new_room_id, room_creator_user_id=user_id, - is_public=old_room["is_public"], + is_public=old_room[0], room_version=new_version, ) @@ -698,6 +698,7 @@ async def create_room( config: JsonDict, ratelimit: bool = True, creator_join_profile: Optional[JsonDict] = None, + ignore_forced_encryption: bool = False, ) -> Tuple[str, Optional[RoomAlias], int]: """Creates a new room. @@ -714,6 +715,8 @@ async def create_room( derived from the user's profile. If set, should contain the values to go in the body of the 'join' event (typically `avatar_url` and/or `displayname`. + ignore_forced_encryption: + Ignore encryption forced by `encryption_enabled_by_default_for_room_type` setting. Returns: A 3-tuple containing: @@ -1015,6 +1018,7 @@ async def _send_events_for_new_room( room_alias: Optional[RoomAlias] = None, power_level_content_override: Optional[JsonDict] = None, creator_join_profile: Optional[JsonDict] = None, + ignore_forced_encryption: bool = False, ) -> Tuple[int, str, int]: """Sends the initial events into a new room. Sends the room creation, membership, and power level events into the room sequentially, then creates and batches up the @@ -1049,6 +1053,8 @@ async def _send_events_for_new_room( creator_join_profile: Set to override the displayname and avatar for the creating user in this room. + ignore_forced_encryption: + Ignore encryption forced by `encryption_enabled_by_default_for_room_type` setting. Returns: A tuple containing the stream ID, event ID and depth of the last @@ -1251,7 +1257,7 @@ async def create_event( ) events_to_send.append((event, context)) - if config["encrypted"]: + if config["encrypted"] and not ignore_forced_encryption: encryption_event, encryption_context = await create_event( EventTypes.RoomEncryption, {"algorithm": RoomEncryptionAlgorithms.DEFAULT}, @@ -1939,9 +1945,10 @@ async def shutdown_room( else: logger.info("Shutting down room %r", room_id) - users = await self.store.get_users_in_room(room_id) - for user_id in users: - if not self.hs.is_mine_id(user_id): + users = await self.store.get_local_users_related_to_room(room_id) + for user_id, membership in users: + # If the user is not in the room (or is banned), nothing to do. + if membership not in (Membership.JOIN, Membership.INVITE, Membership.KNOCK): continue logger.info("Kicking %r from %r...", user_id, room_id) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 36e2db897540..2947e154be61 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -33,6 +33,7 @@ RequestSendFailed, SynapseError, ) +from synapse.storage.databases.main.room import LargestRoomStats from synapse.types import JsonDict, JsonMapping, ThirdPartyInstanceID from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.caches.response_cache import ResponseCache @@ -170,26 +171,24 @@ async def _get_public_room_list( ignore_non_federatable=from_federation, ) - def build_room_entry(room: JsonDict) -> JsonDict: + def build_room_entry(room: LargestRoomStats) -> JsonDict: entry = { - "room_id": room["room_id"], - "name": room["name"], - "topic": room["topic"], - "canonical_alias": room["canonical_alias"], - "num_joined_members": room["joined_members"], - "avatar_url": room["avatar"], - "world_readable": room["history_visibility"] + "room_id": room.room_id, + "name": room.name, + "topic": room.topic, + "canonical_alias": room.canonical_alias, + "num_joined_members": room.joined_members, + "avatar_url": room.avatar, + "world_readable": room.history_visibility == HistoryVisibility.WORLD_READABLE, - "guest_can_join": room["guest_access"] == "can_join", - "join_rule": room["join_rules"], - "room_type": room["room_type"], + "guest_can_join": room.guest_access == "can_join", + "join_rule": room.join_rules, + "room_type": room.room_type, } # Filter out Nones – rather omit the field altogether return {k: v for k, v in entry.items() if v is not None} - results = [build_room_entry(r) for r in results] - response: JsonDict = {} num_results = len(results) if limit is not None: @@ -212,33 +211,33 @@ def build_room_entry(room: JsonDict) -> JsonDict: # If there was a token given then we assume that there # must be previous results. response["prev_batch"] = RoomListNextBatch( - last_joined_members=initial_entry["num_joined_members"], - last_room_id=initial_entry["room_id"], + last_joined_members=initial_entry.joined_members, + last_room_id=initial_entry.room_id, direction_is_forward=False, ).to_token() if more_to_come: response["next_batch"] = RoomListNextBatch( - last_joined_members=final_entry["num_joined_members"], - last_room_id=final_entry["room_id"], + last_joined_members=final_entry.joined_members, + last_room_id=final_entry.room_id, direction_is_forward=True, ).to_token() else: if has_batch_token: response["next_batch"] = RoomListNextBatch( - last_joined_members=final_entry["num_joined_members"], - last_room_id=final_entry["room_id"], + last_joined_members=final_entry.joined_members, + last_room_id=final_entry.room_id, direction_is_forward=True, ).to_token() if more_to_come: response["prev_batch"] = RoomListNextBatch( - last_joined_members=initial_entry["num_joined_members"], - last_room_id=initial_entry["room_id"], + last_joined_members=initial_entry.joined_members, + last_room_id=initial_entry.room_id, direction_is_forward=False, ).to_token() - response["chunk"] = results + response["chunk"] = [build_room_entry(r) for r in results] response["total_room_count_estimate"] = await self.store.count_public_rooms( network_tuple, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 918eb203e2ea..eddc2af9ba4c 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1260,7 +1260,8 @@ async def transfer_room_state_on_room_upgrade( # Add new room to the room directory if the old room was there # Remove old room from the room directory old_room = await self.store.get_room(old_room_id) - if old_room is not None and old_room["is_public"]: + # If the old room exists and is public. + if old_room is not None and old_room[0]: await self.store.set_room_is_public(old_room_id, False) await self.store.set_room_is_public(room_id, True) diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index dd559b4c450f..1dfb12e065b8 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -703,24 +703,24 @@ async def _build_room_entry(self, room_id: str, for_federation: bool) -> JsonDic # there should always be an entry assert stats is not None, "unable to retrieve stats for %s" % (room_id,) - entry = { - "room_id": stats["room_id"], - "name": stats["name"], - "topic": stats["topic"], - "canonical_alias": stats["canonical_alias"], - "num_joined_members": stats["joined_members"], - "avatar_url": stats["avatar"], - "join_rule": stats["join_rules"], + entry: JsonDict = { + "room_id": stats.room_id, + "name": stats.name, + "topic": stats.topic, + "canonical_alias": stats.canonical_alias, + "num_joined_members": stats.joined_members, + "avatar_url": stats.avatar, + "join_rule": stats.join_rules, "world_readable": ( - stats["history_visibility"] == HistoryVisibility.WORLD_READABLE + stats.history_visibility == HistoryVisibility.WORLD_READABLE ), - "guest_can_join": stats["guest_access"] == "can_join", - "room_type": stats["room_type"], + "guest_can_join": stats.guest_access == "can_join", + "room_type": stats.room_type, } if self._msc3266_enabled: - entry["im.nheko.summary.version"] = stats["version"] - entry["im.nheko.summary.encryption"] = stats["encryption"] + entry["im.nheko.summary.version"] = stats.version + entry["im.nheko.summary.encryption"] = stats.encryption # Federation requests need to provide additional information so the # requested server is able to filter the response appropriately. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index aad4706f148a..f51ed9d5bbb4 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -374,13 +374,13 @@ async def _search( serialize_options = SerializeEventConfig(requester=requester) for context in contexts.values(): - context["events_before"] = self._event_serializer.serialize_events( + context["events_before"] = await self._event_serializer.serialize_events( context["events_before"], time_now, bundle_aggregations=aggregations, config=serialize_options, ) - context["events_after"] = self._event_serializer.serialize_events( + context["events_after"] = await self._event_serializer.serialize_events( context["events_after"], time_now, bundle_aggregations=aggregations, @@ -390,7 +390,7 @@ async def _search( results = [ { "rank": search_result.rank_map[e.event_id], - "result": self._event_serializer.serialize_event( + "result": await self._event_serializer.serialize_event( e, time_now, bundle_aggregations=aggregations, @@ -409,7 +409,7 @@ async def _search( if state_results: rooms_cat_res["state"] = { - room_id: self._event_serializer.serialize_events( + room_id: await self._event_serializer.serialize_events( state_events, time_now, config=serialize_options ) for room_id, state_events in state_results.items() diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index e9a544e754f2..389dc5298af6 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -806,7 +806,7 @@ def is_allowed_mime_type(content_type: str) -> bool: media_id = profile["avatar_url"].split("/")[-1] if self._is_mine_server_name(server_name): media = await self._media_repo.store.get_local_media(media_id) - if media is not None and upload_name == media["upload_name"]: + if media is not None and upload_name == media.upload_name: logger.info("skipping saving the user avatar") return True @@ -1206,10 +1206,7 @@ async def revoke_sessions_for_provider_session_id( # We have no guarantee that all the devices of that session are for the same # `user_id`. Hence, we have to iterate over the list of devices and log them out # one by one. - for device in devices: - user_id = device["user_id"] - device_id = device["device_id"] - + for user_id, device_id in devices: # If the user_id associated with that device/session is not the one we got # out of the `sub` claim, skip that device and show log an error. if expected_user_id is not None and user_id != expected_user_id: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f131c0e8e0c7..bf0106c6e7ab 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -57,6 +57,7 @@ DeviceListUpdates, JsonDict, JsonMapping, + MultiWriterStreamToken, MutableStateMap, Requester, RoomStreamToken, @@ -398,7 +399,7 @@ async def current_sync_callback( # # If that happens, we mustn't cache it, so that when the client comes back # with the same cache token, we don't immediately return the same empty - # result, causing a tightloop. (#8518) + # result, causing a tightloop. (https://github.com/matrix-org/synapse/issues/8518) if result.next_batch == since_token: cache_context.should_cache = False @@ -477,7 +478,11 @@ async def ephemeral_by_room( event_copy = {k: v for (k, v) in event.items() if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - receipt_key = since_token.receipt_key if since_token else 0 + receipt_key = ( + since_token.receipt_key + if since_token + else MultiWriterStreamToken(stream=0) + ) receipt_source = self.event_sources.sources.receipt receipts, receipt_key = await receipt_source.get_new_events( @@ -998,7 +1003,7 @@ async def compute_state_delta( # always make sure we LL ourselves so we know we're in the room # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 # We only need apply this on full state syncs given we disabled - # LL for incr syncs in #3840. + # LL for incr syncs in https://github.com/matrix-org/synapse/pull/3840. # We don't insert ourselves into `members_to_fetch`, because in some # rare cases (an empty event batch with a now_token after the user's # leave in a partial state room which another local user has @@ -1512,7 +1517,7 @@ async def generate_sync_result( # Presence data is included if the server has it enabled and not filtered out. include_presence_data = bool( - self.hs_config.server.use_presence + self.hs_config.server.presence_enabled and not sync_config.filter_collection.blocks_all_presence() ) # Device list updates are sent if a since token is provided. diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 78a75bfed6ee..ab8f7610e9a1 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -187,9 +187,9 @@ async def _check_threepid(self, medium: str, authdict: dict) -> dict: if row: threepid = { - "medium": row["medium"], - "address": row["address"], - "validated_at": row["validated_at"], + "medium": row.medium, + "address": row.address, + "validated_at": row.validated_at, } # Valid threepid returned, delete from the db diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 75717ba4f99f..3c19ea56f8d2 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -184,8 +184,8 @@ async def handle_local_profile_change( """Called to update index of our local user profiles when they change irrespective of any rooms the user may be in. """ - # FIXME(#3714): We should probably do this in the same worker as all - # the other changes. + # FIXME(https://github.com/matrix-org/synapse/issues/3714): We should + # probably do this in the same worker as all the other changes. if await self.store.should_include_local_user_in_dir(user_id): await self.store.update_profile_in_user_dir( @@ -194,8 +194,8 @@ async def handle_local_profile_change( async def handle_local_user_deactivated(self, user_id: str) -> None: """Called when a user ID is deactivated""" - # FIXME(#3714): We should probably do this in the same worker as all - # the other changes. + # FIXME(https://github.com/matrix-org/synapse/issues/3714): We should + # probably do this in the same worker as all the other changes. await self.store.remove_from_user_dir(user_id) async def _unsafe_process(self) -> None: diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py index 636efc33e8f3..59b914b87ecd 100644 --- a/synapse/http/connectproxyclient.py +++ b/synapse/http/connectproxyclient.py @@ -59,7 +59,7 @@ def as_proxy_authorization_value(self) -> bytes: a Proxy-Authorization header. """ # Encode as base64 and prepend the authorization type - return b"Basic " + base64.encodebytes(self.username_password) + return b"Basic " + base64.b64encode(self.username_password) @attr.s(auto_attribs=True) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 08c7fc1631d8..d5013e8e97c1 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -465,7 +465,7 @@ async def _send_request_with_optional_trailing_slash( """Wrapper for _send_request which can optionally retry the request upon receiving a combination of a 400 HTTP response code and a 'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3 - due to #3622. + due to https://github.com/matrix-org/synapse/issues/3622. Args: request: details of request to be sent @@ -958,9 +958,9 @@ async def put_json( requests). try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end - of the request. Workaround for #3622 in Synapse <= v0.99.3. This - will be attempted before backing off if backing off has been - enabled. + of the request. Workaround for https://github.com/matrix-org/synapse/issues/3622 + in Synapse <= v0.99.3. This will be attempted before backing off if + backing off has been enabled. parser: The parser to use to decode the response. Defaults to parsing as JSON. backoff_on_all_error_codes: Back off if we get any error response @@ -1155,7 +1155,8 @@ async def get_json( try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of - the request. Workaround for #3622 in Synapse <= v0.99.3. + the request. Workaround for https://github.com/matrix-org/synapse/issues/3622 + in Synapse <= v0.99.3. parser: The parser to use to decode the response. Defaults to parsing as JSON. @@ -1250,7 +1251,8 @@ async def get_json_with_headers( try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of - the request. Workaround for #3622 in Synapse <= v0.99.3. + the request. Workaround for https://github.com/matrix-org/synapse/issues/3622 + in Synapse <= v0.99.3. parser: The parser to use to decode the response. Defaults to parsing as JSON. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 4454fe29a5db..e297fa9c8ba0 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -1019,11 +1019,14 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: if not opentracing: return func + # getfullargspec is somewhat expensive, so ensure it is only called a single + # time (the function signature shouldn't change anyway). + argspec = inspect.getfullargspec(func) + @contextlib.contextmanager def _wrapping_logic( - func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + _func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> Generator[None, None, None]: - argspec = inspect.getfullargspec(func) # We use `[1:]` to skip the `self` object reference and `start=1` to # make the index line up with `argspec.args`. # diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 860e5ddca2e3..9d88a711cf5c 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -83,6 +83,12 @@ "audio/x-flac", ] +# Default timeout_ms for download and thumbnail requests +DEFAULT_MAX_TIMEOUT_MS = 20_000 + +# Maximum allowed timeout_ms for download and thumbnail requests +MAXIMUM_ALLOWED_MAX_TIMEOUT_MS = 60_000 + def respond_404(request: SynapseRequest) -> None: assert request.path is not None diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 7fd46901f71e..bf976b9e7c2c 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -19,6 +19,7 @@ from io import BytesIO from typing import IO, TYPE_CHECKING, Dict, List, Optional, Set, Tuple +import attr from matrix_common.types.mxc_uri import MXCUri import twisted.internet.error @@ -26,13 +27,16 @@ from twisted.internet.defer import Deferred from synapse.api.errors import ( + Codes, FederationDeniedError, HttpResponseException, NotFoundError, RequestSendFailed, SynapseError, + cs_error, ) from synapse.config.repository import ThumbnailRequirement +from synapse.http.server import respond_with_json from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread from synapse.logging.opentracing import trace @@ -50,6 +54,7 @@ from synapse.media.thumbnailer import Thumbnailer, ThumbnailError from synapse.media.url_previewer import UrlPreviewer from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.types import UserID from synapse.util.async_helpers import Linearizer from synapse.util.retryutils import NotRetryingDestination @@ -78,6 +83,8 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.max_upload_size = hs.config.media.max_upload_size self.max_image_pixels = hs.config.media.max_image_pixels + self.unused_expiration_time = hs.config.media.unused_expiration_time + self.max_pending_media_uploads = hs.config.media.max_pending_media_uploads Thumbnailer.set_limits(self.max_image_pixels) @@ -183,6 +190,117 @@ def mark_recently_accessed(self, server_name: Optional[str], media_id: str) -> N else: self.recently_accessed_locals.add(media_id) + @trace + async def create_media_id(self, auth_user: UserID) -> Tuple[str, int]: + """Create and store a media ID for a local user and return the MXC URI and its + expiration. + + Args: + auth_user: The user_id of the uploader + + Returns: + A tuple containing the MXC URI of the stored content and the timestamp at + which the MXC URI expires. + """ + media_id = random_string(24) + now = self.clock.time_msec() + await self.store.store_local_media_id( + media_id=media_id, + time_now_ms=now, + user_id=auth_user, + ) + return f"mxc://{self.server_name}/{media_id}", now + self.unused_expiration_time + + @trace + async def reached_pending_media_limit(self, auth_user: UserID) -> Tuple[bool, int]: + """Check if the user is over the limit for pending media uploads. + + Args: + auth_user: The user_id of the uploader + + Returns: + A tuple with a boolean and an integer indicating whether the user has too + many pending media uploads and the timestamp at which the first pending + media will expire, respectively. + """ + pending, first_expiration_ts = await self.store.count_pending_media( + user_id=auth_user + ) + return pending >= self.max_pending_media_uploads, first_expiration_ts + + @trace + async def verify_can_upload(self, media_id: str, auth_user: UserID) -> None: + """Verify that the media ID can be uploaded to by the given user. This + function checks that: + + * the media ID exists + * the media ID does not already have content + * the user uploading is the same as the one who created the media ID + * the media ID has not expired + + Args: + media_id: The media ID to verify + auth_user: The user_id of the uploader + """ + media = await self.store.get_local_media(media_id) + if media is None: + raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND) + + if media.user_id != auth_user.to_string(): + raise SynapseError( + 403, + "Only the creator of the media ID can upload to it", + errcode=Codes.FORBIDDEN, + ) + + if media.media_length is not None: + raise SynapseError( + 409, + "Media ID already has content", + errcode=Codes.CANNOT_OVERWRITE_MEDIA, + ) + + expired_time_ms = self.clock.time_msec() - self.unused_expiration_time + if media.created_ts < expired_time_ms: + raise NotFoundError("Media ID has expired") + + @trace + async def update_content( + self, + media_id: str, + media_type: str, + upload_name: Optional[str], + content: IO, + content_length: int, + auth_user: UserID, + ) -> None: + """Update the content of the given media ID. + + Args: + media_id: The media ID to replace. + media_type: The content type of the file. + upload_name: The name of the file, if provided. + content: A file like object that is the content to store + content_length: The length of the content + auth_user: The user_id of the uploader + """ + file_info = FileInfo(server_name=None, file_id=media_id) + fname = await self.media_storage.store_file(content, file_info) + logger.info("Stored local media in file %r", fname) + + await self.store.update_local_media( + media_id=media_id, + media_type=media_type, + upload_name=upload_name, + media_length=content_length, + user_id=auth_user, + ) + + try: + await self._generate_thumbnails(None, media_id, media_id, media_type) + except Exception as e: + logger.info("Failed to generate thumbnails: %s", e) + @trace async def create_content( self, @@ -229,8 +347,74 @@ async def create_content( return MXCUri(self.server_name, media_id) + def respond_not_yet_uploaded(self, request: SynapseRequest) -> None: + respond_with_json( + request, + 504, + cs_error("Media has not been uploaded yet", code=Codes.NOT_YET_UPLOADED), + send_cors=True, + ) + + async def get_local_media_info( + self, request: SynapseRequest, media_id: str, max_timeout_ms: int + ) -> Optional[LocalMedia]: + """Gets the info dictionary for given local media ID. If the media has + not been uploaded yet, this function will wait up to ``max_timeout_ms`` + milliseconds for the media to be uploaded. + + Args: + request: The incoming request. + media_id: The media ID of the content. (This is the same as + the file_id for local content.) + max_timeout_ms: the maximum number of milliseconds to wait for the + media to be uploaded. + + Returns: + Either the info dictionary for the given local media ID or + ``None``. If ``None``, then no further processing is necessary as + this function will send the necessary JSON response. + """ + wait_until = self.clock.time_msec() + max_timeout_ms + while True: + # Get the info for the media + media_info = await self.store.get_local_media(media_id) + if not media_info: + logger.info("Media %s is unknown", media_id) + respond_404(request) + return None + + if media_info.quarantined_by: + logger.info("Media %s is quarantined", media_id) + respond_404(request) + return None + + # The file has been uploaded, so stop looping + if media_info.media_length is not None: + return media_info + + # Check if the media ID has expired and still hasn't been uploaded to. + now = self.clock.time_msec() + expired_time_ms = now - self.unused_expiration_time + if media_info.created_ts < expired_time_ms: + logger.info("Media %s has expired without being uploaded", media_id) + respond_404(request) + return None + + if now >= wait_until: + break + + await self.clock.sleep(0.5) + + logger.info("Media %s has not yet been uploaded", media_id) + self.respond_not_yet_uploaded(request) + return None + async def get_local_media( - self, request: SynapseRequest, media_id: str, name: Optional[str] + self, + request: SynapseRequest, + media_id: str, + name: Optional[str], + max_timeout_ms: int, ) -> None: """Responds to requests for local media, if exists, or returns 404. @@ -240,23 +424,24 @@ async def get_local_media( the file_id for local content.) name: Optional name that, if specified, will be used as the filename in the Content-Disposition header of the response. + max_timeout_ms: the maximum number of milliseconds to wait for the + media to be uploaded. Returns: Resolves once a response has successfully been written to request """ - media_info = await self.store.get_local_media(media_id) - if not media_info or media_info["quarantined_by"]: - respond_404(request) + media_info = await self.get_local_media_info(request, media_id, max_timeout_ms) + if not media_info: return self.mark_recently_accessed(None, media_id) - media_type = media_info["media_type"] + media_type = media_info.media_type if not media_type: media_type = "application/octet-stream" - media_length = media_info["media_length"] - upload_name = name if name else media_info["upload_name"] - url_cache = media_info["url_cache"] + media_length = media_info.media_length + upload_name = name if name else media_info.upload_name + url_cache = media_info.url_cache file_info = FileInfo(None, media_id, url_cache=bool(url_cache)) @@ -271,6 +456,7 @@ async def get_remote_media( server_name: str, media_id: str, name: Optional[str], + max_timeout_ms: int, ) -> None: """Respond to requests for remote media. @@ -280,6 +466,8 @@ async def get_remote_media( media_id: The media ID of the content (as defined by the remote server). name: Optional name that, if specified, will be used as the filename in the Content-Disposition header of the response. + max_timeout_ms: the maximum number of milliseconds to wait for the + media to be uploaded. Returns: Resolves once a response has successfully been written to request @@ -305,27 +493,33 @@ async def get_remote_media( key = (server_name, media_id) async with self.remote_media_linearizer.queue(key): responder, media_info = await self._get_remote_media_impl( - server_name, media_id + server_name, media_id, max_timeout_ms ) # We deliberately stream the file outside the lock - if responder: - media_type = media_info["media_type"] - media_length = media_info["media_length"] - upload_name = name if name else media_info["upload_name"] + if responder and media_info: + upload_name = name if name else media_info.upload_name await respond_with_responder( - request, responder, media_type, media_length, upload_name + request, + responder, + media_info.media_type, + media_info.media_length, + upload_name, ) else: respond_404(request) - async def get_remote_media_info(self, server_name: str, media_id: str) -> dict: + async def get_remote_media_info( + self, server_name: str, media_id: str, max_timeout_ms: int + ) -> RemoteMedia: """Gets the media info associated with the remote file, downloading if necessary. Args: server_name: Remote server_name where the media originated. media_id: The media ID of the content (as defined by the remote server). + max_timeout_ms: the maximum number of milliseconds to wait for the + media to be uploaded. Returns: The media info of the file @@ -341,7 +535,7 @@ async def get_remote_media_info(self, server_name: str, media_id: str) -> dict: key = (server_name, media_id) async with self.remote_media_linearizer.queue(key): responder, media_info = await self._get_remote_media_impl( - server_name, media_id + server_name, media_id, max_timeout_ms ) # Ensure we actually use the responder so that it releases resources @@ -352,8 +546,8 @@ async def get_remote_media_info(self, server_name: str, media_id: str) -> dict: return media_info async def _get_remote_media_impl( - self, server_name: str, media_id: str - ) -> Tuple[Optional[Responder], dict]: + self, server_name: str, media_id: str, max_timeout_ms: int + ) -> Tuple[Optional[Responder], RemoteMedia]: """Looks for media in local cache, if not there then attempt to download from remote server. @@ -361,6 +555,8 @@ async def _get_remote_media_impl( server_name: Remote server_name where the media originated. media_id: The media ID of the content (as defined by the remote server). + max_timeout_ms: the maximum number of milliseconds to wait for the + media to be uploaded. Returns: A tuple of responder and the media info of the file. @@ -373,15 +569,17 @@ async def _get_remote_media_impl( # If we have an entry in the DB, try and look for it if media_info: - file_id = media_info["filesystem_id"] + file_id = media_info.filesystem_id file_info = FileInfo(server_name, file_id) - if media_info["quarantined_by"]: + if media_info.quarantined_by: logger.info("Media is quarantined") raise NotFoundError() - if not media_info["media_type"]: - media_info["media_type"] = "application/octet-stream" + if not media_info.media_type: + media_info = attr.evolve( + media_info, media_type="application/octet-stream" + ) responder = await self.media_storage.fetch_media(file_info) if responder: @@ -391,8 +589,7 @@ async def _get_remote_media_impl( try: media_info = await self._download_remote_file( - server_name, - media_id, + server_name, media_id, max_timeout_ms ) except SynapseError: raise @@ -403,9 +600,9 @@ async def _get_remote_media_impl( if not media_info: raise e - file_id = media_info["filesystem_id"] - if not media_info["media_type"]: - media_info["media_type"] = "application/octet-stream" + file_id = media_info.filesystem_id + if not media_info.media_type: + media_info = attr.evolve(media_info, media_type="application/octet-stream") file_info = FileInfo(server_name, file_id) # We generate thumbnails even if another process downloaded the media @@ -415,7 +612,7 @@ async def _get_remote_media_impl( # otherwise they'll request thumbnails and get a 404 if they're not # ready yet. await self._generate_thumbnails( - server_name, media_id, file_id, media_info["media_type"] + server_name, media_id, file_id, media_info.media_type ) responder = await self.media_storage.fetch_media(file_info) @@ -425,7 +622,8 @@ async def _download_remote_file( self, server_name: str, media_id: str, - ) -> dict: + max_timeout_ms: int, + ) -> RemoteMedia: """Attempt to download the remote file from the given server name, using the given file_id as the local id. @@ -434,7 +632,8 @@ async def _download_remote_file( media_id: The media ID of the content (as defined by the remote server). This is different than the file_id, which is locally generated. - file_id: Local file ID + max_timeout_ms: the maximum number of milliseconds to wait for the + media to be uploaded. Returns: The media info of the file. @@ -458,7 +657,8 @@ async def _download_remote_file( # tell the remote server to 404 if it doesn't # recognise the server_name, to make sure we don't # end up with a routing loop. - "allow_remote": "false" + "allow_remote": "false", + "timeout_ms": str(max_timeout_ms), }, ) except RequestSendFailed as e: @@ -518,7 +718,7 @@ async def _download_remote_file( origin=server_name, media_id=media_id, media_type=media_type, - time_now_ms=self.clock.time_msec(), + time_now_ms=time_now_ms, upload_name=upload_name, media_length=length, filesystem_id=file_id, @@ -526,15 +726,17 @@ async def _download_remote_file( logger.info("Stored remote media in file %r", fname) - media_info = { - "media_type": media_type, - "media_length": length, - "upload_name": upload_name, - "created_ts": time_now_ms, - "filesystem_id": file_id, - } - - return media_info + return RemoteMedia( + media_origin=server_name, + media_id=media_id, + media_type=media_type, + media_length=length, + upload_name=upload_name, + created_ts=time_now_ms, + filesystem_id=file_id, + last_access_ts=time_now_ms, + quarantined_by=None, + ) def _get_thumbnail_requirements( self, media_type: str @@ -949,10 +1151,7 @@ async def delete_old_remote_media(self, before_ts: int) -> Dict[str, int]: deleted = 0 - for media in old_media: - origin = media["media_origin"] - media_id = media["media_id"] - file_id = media["filesystem_id"] + for origin, media_id, file_id in old_media: key = (origin, media_id) logger.info("Deleting: %r", key) diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 9b5a3dd5f405..44aac21de63f 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -240,15 +240,14 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: cache_result = await self.store.get_url_cache(url, ts) if ( cache_result - and cache_result["expires_ts"] > ts - and cache_result["response_code"] / 100 == 2 + and cache_result.expires_ts > ts + and cache_result.response_code // 100 == 2 ): # It may be stored as text in the database, not as bytes (such as # PostgreSQL). If so, encode it back before handing it on. - og = cache_result["og"] - if isinstance(og, str): - og = og.encode("utf8") - return og + if isinstance(cache_result.og, str): + return cache_result.og.encode("utf8") + return cache_result.og # If this URL can be accessed via an allowed oEmbed, use that instead. url_to_download = url diff --git a/synapse/metrics/_reactor_metrics.py b/synapse/metrics/_reactor_metrics.py index a2c6e6842d0c..dd486dd3e288 100644 --- a/synapse/metrics/_reactor_metrics.py +++ b/synapse/metrics/_reactor_metrics.py @@ -12,17 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -import select +import logging import time -from typing import Any, Iterable, List, Tuple +from selectors import SelectSelector, _PollLikeSelector # type: ignore[attr-defined] +from typing import Any, Callable, Iterable from prometheus_client import Histogram, Metric from prometheus_client.core import REGISTRY, GaugeMetricFamily -from twisted.internet import reactor +from twisted.internet import reactor, selectreactor +from twisted.internet.asyncioreactor import AsyncioSelectorReactor from synapse.metrics._types import Collector +try: + from selectors import KqueueSelector +except ImportError: + + class KqueueSelector: # type: ignore[no-redef] + pass + + +try: + from twisted.internet.epollreactor import EPollReactor +except ImportError: + + class EPollReactor: # type: ignore[no-redef] + pass + + +try: + from twisted.internet.pollreactor import PollReactor +except ImportError: + + class PollReactor: # type: ignore[no-redef] + pass + + +logger = logging.getLogger(__name__) + # # Twisted reactor metrics # @@ -34,52 +62,100 @@ ) -class EpollWrapper: - """a wrapper for an epoll object which records the time between polls""" +class CallWrapper: + """A wrapper for a callable which records the time between calls""" - def __init__(self, poller: "select.epoll"): # type: ignore[name-defined] + def __init__(self, wrapped: Callable[..., Any]): self.last_polled = time.time() - self._poller = poller + self._wrapped = wrapped - def poll(self, *args, **kwargs) -> List[Tuple[int, int]]: # type: ignore[no-untyped-def] - # record the time since poll() was last called. This gives a good proxy for + def __call__(self, *args, **kwargs) -> Any: # type: ignore[no-untyped-def] + # record the time since this was last called. This gives a good proxy for # how long it takes to run everything in the reactor - ie, how long anything # waiting for the next tick will have to wait. tick_time.observe(time.time() - self.last_polled) - ret = self._poller.poll(*args, **kwargs) + ret = self._wrapped(*args, **kwargs) self.last_polled = time.time() return ret + +class ObjWrapper: + """A wrapper for an object which wraps a specified method in CallWrapper. + + Other methods/attributes are passed to the original object. + + This is necessary when the wrapped object does not allow the attribute to be + overwritten. + """ + + def __init__(self, wrapped: Any, method_name: str): + self._wrapped = wrapped + self._method_name = method_name + self._wrapped_method = CallWrapper(getattr(wrapped, method_name)) + def __getattr__(self, item: str) -> Any: - return getattr(self._poller, item) + if item == self._method_name: + return self._wrapped_method + + return getattr(self._wrapped, item) class ReactorLastSeenMetric(Collector): - def __init__(self, epoll_wrapper: EpollWrapper): - self._epoll_wrapper = epoll_wrapper + def __init__(self, call_wrapper: CallWrapper): + self._call_wrapper = call_wrapper def collect(self) -> Iterable[Metric]: cm = GaugeMetricFamily( "python_twisted_reactor_last_seen", "Seconds since the Twisted reactor was last seen", ) - cm.add_metric([], time.time() - self._epoll_wrapper.last_polled) + cm.add_metric([], time.time() - self._call_wrapper.last_polled) yield cm +# Twisted has already select a reasonable reactor for us, so assumptions can be +# made about the shape. +wrapper = None try: - # if the reactor has a `_poller` attribute, which is an `epoll` object - # (ie, it's an EPollReactor), we wrap the `epoll` with a thing that will - # measure the time between ticks - from select import epoll # type: ignore[attr-defined] - - poller = reactor._poller # type: ignore[attr-defined] -except (AttributeError, ImportError): - pass -else: - if isinstance(poller, epoll): - poller = EpollWrapper(poller) - reactor._poller = poller # type: ignore[attr-defined] - REGISTRY.register(ReactorLastSeenMetric(poller)) + if isinstance(reactor, (PollReactor, EPollReactor)): + reactor._poller = ObjWrapper(reactor._poller, "poll") # type: ignore[attr-defined] + wrapper = reactor._poller._wrapped_method # type: ignore[attr-defined] + + elif isinstance(reactor, selectreactor.SelectReactor): + # Twisted uses a module-level _select function. + wrapper = selectreactor._select = CallWrapper(selectreactor._select) + + elif isinstance(reactor, AsyncioSelectorReactor): + # For asyncio look at the underlying asyncio event loop. + asyncio_loop = reactor._asyncioEventloop # A sub-class of BaseEventLoop, + + # A sub-class of BaseSelector. + selector = asyncio_loop._selector # type: ignore[attr-defined] + + if isinstance(selector, SelectSelector): + wrapper = selector._select = CallWrapper(selector._select) # type: ignore[attr-defined] + + # poll, epoll, and /dev/poll. + elif isinstance(selector, _PollLikeSelector): + selector._selector = ObjWrapper(selector._selector, "poll") # type: ignore[attr-defined] + wrapper = selector._selector._wrapped_method # type: ignore[attr-defined] + + elif isinstance(selector, KqueueSelector): + selector._selector = ObjWrapper(selector._selector, "control") # type: ignore[attr-defined] + wrapper = selector._selector._wrapped_method # type: ignore[attr-defined] + + else: + # E.g. this does not support the (Windows-only) ProactorEventLoop. + logger.warning( + "Skipping configuring ReactorLastSeenMetric: unexpected asyncio loop selector: %r via %r", + selector, + asyncio_loop, + ) +except Exception as e: + logger.warning("Configuring ReactorLastSeenMetric failed: %r", e) + + +if wrapper: + REGISTRY.register(ReactorLastSeenMetric(wrapper)) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 0786d2063565..812144a128a0 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -23,6 +23,7 @@ Generator, Iterable, List, + Mapping, Optional, Tuple, TypeVar, @@ -39,6 +40,7 @@ from synapse.api import errors from synapse.api.errors import SynapseError +from synapse.api.presence import UserPresenceState from synapse.config import ConfigError from synapse.events import EventBase from synapse.events.presence_router import ( @@ -46,6 +48,7 @@ GET_USERS_FOR_STATES_CALLBACK, PresenceRouter, ) +from synapse.events.utils import ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, @@ -257,6 +260,7 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self.custom_template_dir = hs.config.server.custom_template_directory self._callbacks = hs.get_module_api_callbacks() self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled + self._event_serializer = hs.get_event_client_serializer() try: app_name = self._hs.config.email.email_app_name @@ -488,6 +492,25 @@ def register_web_resource(self, path: str, resource: Resource) -> None: """ self._hs.register_module_web_resource(path, resource) + def register_add_extra_fields_to_unsigned_client_event_callbacks( + self, + *, + add_field_to_unsigned_callback: Optional[ + ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK + ] = None, + ) -> None: + """Registers a callback that can be used to add fields to the unsigned + section of events. + + The callback is called every time an event is sent down to a client. + + Added in Synapse 1.96.0 + """ + if add_field_to_unsigned_callback is not None: + self._event_serializer.register_add_extra_fields_to_unsigned_client_event_callback( + add_field_to_unsigned_callback + ) + ######################################################################### # The following methods can be called by the module at any point in time. @@ -1184,6 +1207,37 @@ async def send_local_online_presence_to(self, users: Iterable[str]) -> None: presence_events, [destination] ) + async def set_presence_for_users( + self, users: Mapping[str, Tuple[str, Optional[str]]] + ) -> None: + """ + Update the internal presence state of users. + + This can be used for either local or remote users. + + Note that this method can only be run on the process that is configured to write to the + presence stream. By default, this is the main process. + + Added in Synapse v1.96.0. + """ + + # We pull out the presence handler here to break a cyclic + # dependency between the presence router and module API. + presence_handler = self._hs.get_presence_handler() + + from synapse.handlers.presence import PresenceHandler + + assert isinstance(presence_handler, PresenceHandler) + + states = await presence_handler.current_state_for_users(users.keys()) + for user_id, (state, status_msg) in users.items(): + prev_state = states.setdefault(user_id, UserPresenceState.default(user_id)) + states[user_id] = prev_state.copy_and_replace( + state=state, status_msg=status_msg + ) + + await presence_handler._update_states(states.values(), force_notify=True) + def looping_background_call( self, f: Callable, @@ -1806,7 +1860,8 @@ async def room_is_in_public_room_list(self, room_id: str) -> bool: if not room: return False - return room.get("is_public", False) + # The first item is whether the room is public. + return room[0] async def add_room_to_public_room_list(self, room_id: str) -> None: """Publishes a room to the public room list. diff --git a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py index ecaeef35118c..7419785aff34 100644 --- a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py @@ -295,7 +295,8 @@ async def check_event_allowed( raise except SynapseError as e: # FIXME: Being able to throw SynapseErrors is relied upon by - # some modules. PR #10386 accidentally broke this ability. + # some modules. PR https://github.com/matrix-org/synapse/pull/10386 + # accidentally broke this ability. # That said, we aren't keen on exposing this implementation detail # to modules and we should one day have a proper way to do what # is wanted. diff --git a/synapse/notifier.py b/synapse/notifier.py index 99e7715896fd..ee0bd84f1ecc 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -21,11 +21,13 @@ Dict, Iterable, List, + Literal, Optional, Set, Tuple, TypeVar, Union, + overload, ) import attr @@ -44,6 +46,7 @@ from synapse.streams.config import PaginationConfig from synapse.types import ( JsonDict, + MultiWriterStreamToken, PersistedEventPosition, RoomStreamToken, StrCollection, @@ -127,7 +130,7 @@ def __init__( def notify( self, stream_key: StreamKeyType, - stream_id: Union[int, RoomStreamToken], + stream_id: Union[int, RoomStreamToken, MultiWriterStreamToken], time_now_ms: int, ) -> None: """Notify any listeners for this user of a new event from an @@ -452,10 +455,48 @@ def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken) -> None: except Exception: logger.exception("Error pusher pool of event") + @overload + def on_new_event( + self, + stream_key: Literal[StreamKeyType.ROOM], + new_token: RoomStreamToken, + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[StrCollection] = None, + ) -> None: + ... + + @overload + def on_new_event( + self, + stream_key: Literal[StreamKeyType.RECEIPT], + new_token: MultiWriterStreamToken, + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[StrCollection] = None, + ) -> None: + ... + + @overload + def on_new_event( + self, + stream_key: Literal[ + StreamKeyType.ACCOUNT_DATA, + StreamKeyType.DEVICE_LIST, + StreamKeyType.PRESENCE, + StreamKeyType.PUSH_RULES, + StreamKeyType.TO_DEVICE, + StreamKeyType.TYPING, + StreamKeyType.UN_PARTIAL_STATED_ROOMS, + ], + new_token: int, + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[StrCollection] = None, + ) -> None: + ... + def on_new_event( self, stream_key: StreamKeyType, - new_token: Union[int, RoomStreamToken], + new_token: Union[int, RoomStreamToken, MultiWriterStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[StrCollection] = None, ) -> None: diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 14784312dcb7..5934b1ef34a1 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -25,10 +25,13 @@ Sequence, Tuple, Union, + cast, ) from prometheus_client import Counter +from twisted.internet.defer import Deferred + from synapse.api.constants import ( MAIN_TIMELINE, EventContentFields, @@ -40,11 +43,15 @@ from synapse.event_auth import auth_types_for_event, get_user_power_level from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.state import POWER_KEY from synapse.storage.databases.main.roommember import EventIdMembership +from synapse.storage.roommember import ProfileInfo from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator from synapse.types import JsonValue from synapse.types.state import StateFilter +from synapse.util import unwrapFirstError +from synapse.util.async_helpers import gather_results from synapse.util.caches import register_cache from synapse.util.metrics import measure_func from synapse.visibility import filter_event_for_clients_with_state @@ -342,15 +349,41 @@ async def _action_for_event_by_user( rules_by_user = await self._get_rules_for_event(event) actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {} - room_member_count = await self.store.get_number_joined_users_in_room( - event.room_id - ) - + # Gather a bunch of info in parallel. + # + # This has a lot of ignored types and casting due to the use of @cached + # decorated functions passed into run_in_background. + # + # See https://github.com/matrix-org/synapse/issues/16606 ( - power_levels, - sender_power_level, - ) = await self._get_power_levels_and_sender_level( - event, context, event_id_to_event + room_member_count, + (power_levels, sender_power_level), + related_events, + profiles, + ) = await make_deferred_yieldable( + cast( + "Deferred[Tuple[int, Tuple[dict, Optional[int]], Dict[str, Dict[str, JsonValue]], Mapping[str, ProfileInfo]]]", + gather_results( + ( + run_in_background( # type: ignore[call-arg] + self.store.get_number_joined_users_in_room, event.room_id # type: ignore[arg-type] + ), + run_in_background( + self._get_power_levels_and_sender_level, + event, + context, + event_id_to_event, + ), + run_in_background(self._related_events, event), + run_in_background( # type: ignore[call-arg] + self.store.get_subset_users_in_room_with_profiles, + event.room_id, # type: ignore[arg-type] + rules_by_user.keys(), # type: ignore[arg-type] + ), + ), + consumeErrors=True, + ).addErrback(unwrapFirstError), + ) ) # Find the event's thread ID. @@ -366,8 +399,6 @@ async def _action_for_event_by_user( # the parent is part of a thread. thread_id = await self.store.get_thread_id(relation.parent_id) - related_events = await self._related_events(event) - # It's possible that old room versions have non-integer power levels (floats or # strings; even the occasional `null`). For old rooms, we interpret these as if # they were integers. Do this here for the `@room` power level threshold. @@ -400,11 +431,6 @@ async def _action_for_event_by_user( self.hs.config.experimental.msc1767_enabled, # MSC3931 flag ) - users = rules_by_user.keys() - profiles = await self.store.get_subset_users_in_room_with_profiles( - event.room_id, users - ) - for uid, rules in rules_by_user.items(): if event.sender == uid: continue diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 63cf24a14d94..38701aea7240 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -238,7 +238,7 @@ async def send_request( data[_STREAM_POSITION_KEY] = { "streams": { - stream.NAME: stream.current_token(local_instance_name) + stream.NAME: stream.minimal_local_current_token() for stream in streams }, "instance_name": local_instance_name, @@ -433,7 +433,7 @@ async def _check_auth_and_handle( if self.WAIT_FOR_STREAMS: response[_STREAM_POSITION_KEY] = { - stream.NAME: stream.current_token(self._instance_name) + stream.NAME: stream.minimal_local_current_token() for stream in self._streams } diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index d5337fe5882e..1312b6f21e71 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -126,8 +126,9 @@ async def on_rdata( StreamKeyType.ACCOUNT_DATA, token, users=[row.user_id for row in rows] ) elif stream_name == ReceiptsStream.NAME: + new_token = self.store.get_max_receipt_stream_id() self.notifier.on_new_event( - StreamKeyType.RECEIPT, token, rooms=[row.room_id for row in rows] + StreamKeyType.RECEIPT, new_token, rooms=[row.room_id for row in rows] ) await self._pusher_pool.on_new_receipts({row.user_id for row in rows}) elif stream_name == ToDeviceStream.NAME: @@ -279,14 +280,6 @@ async def on_position( # may be streaming. self.notifier.notify_replication() - def on_remote_server_up(self, server: str) -> None: - """Called when get a new REMOTE_SERVER_UP command.""" - - # Let's wake up the transaction queue for the server in case we have - # pending stuff to send to it. - if self.send_handler: - self.send_handler.wake_destination(server) - async def wait_for_stream_position( self, instance_name: str, @@ -405,9 +398,6 @@ def __init__(self, hs: "HomeServer"): self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") - def wake_destination(self, server: str) -> None: - self.federation_sender.wake_destination(server) - async def process_replication_rows( self, stream_name: str, token: int, rows: list ) -> None: diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index b668bb5da1de..c14a18ba2efd 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -257,6 +257,11 @@ def __init__(self, hs: "HomeServer"): if hs.config.redis.redis_enabled: self._notifier.add_lock_released_callback(self.on_lock_released) + # Marks if we should send POSITION commands for all streams ASAP. This + # is checked by the `ReplicationStreamer` which manages sending + # RDATA/POSITION commands + self._should_announce_positions = True + def subscribe_to_channel(self, channel_name: str) -> None: """ Indicates that we wish to subscribe to a Redis channel by name. @@ -397,29 +402,23 @@ def get_streams_to_replicate(self) -> List[Stream]: return self._streams_to_replicate def on_REPLICATE(self, conn: IReplicationConnection, cmd: ReplicateCommand) -> None: - self.send_positions_to_connection(conn) + self.send_positions_to_connection() - def send_positions_to_connection(self, conn: IReplicationConnection) -> None: + def send_positions_to_connection(self) -> None: """Send current position of all streams this process is source of to the connection. """ - # We respond with current position of all streams this instance - # replicates. - for stream in self.get_streams_to_replicate(): - # Note that we use the current token as the prev token here (rather - # than stream.last_token), as we can't be sure that there have been - # no rows written between last token and the current token (since we - # might be racing with the replication sending bg process). - current_token = stream.current_token(self._instance_name) - self.send_command( - PositionCommand( - stream.NAME, - self._instance_name, - current_token, - current_token, - ) - ) + self._should_announce_positions = True + self._notifier.notify_replication() + + def should_announce_positions(self) -> bool: + """Check if we should send POSITION commands for all streams ASAP.""" + return self._should_announce_positions + + def will_announce_positions(self) -> None: + """Mark that we're about to send POSITIONs out for all streams.""" + self._should_announce_positions = False def on_USER_SYNC( self, conn: IReplicationConnection, cmd: UserSyncCommand @@ -588,6 +587,21 @@ def on_POSITION(self, conn: IReplicationConnection, cmd: PositionCommand) -> Non logger.debug("Handling '%s %s'", cmd.NAME, cmd.to_line()) + # Check if we can early discard this position. We can only do so for + # connected streams. + stream = self._streams[cmd.stream_name] + if stream.can_discard_position( + cmd.instance_name, cmd.prev_token, cmd.new_token + ) and self.is_stream_connected(conn, cmd.stream_name): + logger.debug( + "Discarding redundant POSITION %s/%s %s %s", + cmd.instance_name, + cmd.stream_name, + cmd.prev_token, + cmd.new_token, + ) + return + self._add_command_to_stream_queue(conn, cmd) async def _process_position( @@ -599,6 +613,18 @@ async def _process_position( """ stream = self._streams[stream_name] + if stream.can_discard_position( + cmd.instance_name, cmd.prev_token, cmd.new_token + ) and self.is_stream_connected(conn, cmd.stream_name): + logger.debug( + "Discarding redundant POSITION %s/%s %s %s", + cmd.instance_name, + cmd.stream_name, + cmd.prev_token, + cmd.new_token, + ) + return + # We're about to go and catch up with the stream, so remove from set # of connected streams. for streams in self._streams_by_connection.values(): @@ -611,10 +637,14 @@ async def _process_position( # Find where we previously streamed up to. current_token = stream.current_token(cmd.instance_name) - # If the position token matches our current token then we're up to - # date and there's nothing to do. Otherwise, fetch all updates - # between then and now. - missing_updates = cmd.prev_token != current_token + # If the incoming previous position is less than our current position + # then we're up to date and there's nothing to do. Otherwise, fetch + # all updates between then and now. + # + # Note: We also have to check that `current_token` is at most the + # new position, to handle the case where the stream gets "reset" + # (e.g. for `caches` and `typing` after the writer's restart). + missing_updates = not (cmd.prev_token <= current_token <= cmd.new_token) while missing_updates: # Note: There may very well not be any new updates, but we check to # make sure. This can particularly happen for the event stream where @@ -622,8 +652,9 @@ async def _process_position( # for why this can happen. logger.info( - "Fetching replication rows for '%s' between %i and %i", + "Fetching replication rows for '%s' / %s between %i and %i", stream_name, + cmd.instance_name, current_token, cmd.new_token, ) @@ -644,7 +675,7 @@ async def _process_position( [stream.parse_row(row) for row in rows], ) - logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) + logger.info("Caught up with stream '%s' to %i", stream_name, current_token) # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position( @@ -653,12 +684,17 @@ async def _process_position( self._streams_by_connection.setdefault(conn, set()).add(stream_name) + def is_stream_connected( + self, conn: IReplicationConnection, stream_name: str + ) -> bool: + """Return if stream has been successfully connected and is ready to + receive updates""" + return stream_name in self._streams_by_connection.get(conn, ()) + def on_REMOTE_SERVER_UP( self, conn: IReplicationConnection, cmd: RemoteServerUpCommand ) -> None: """Called when get a new REMOTE_SERVER_UP command.""" - self._replication_data_handler.on_remote_server_up(cmd.data) - self._notifier.notify_remote_server_up(cmd.data) def on_LOCK_RELEASED( diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 7e96145b3b22..1fa37bb8888c 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -141,7 +141,7 @@ async def _send_subscribe(self) -> None: # We send out our positions when there is a new connection in case the # other side missed updates. We do this for Redis connections as the # otherside won't know we've connected and so won't issue a REPLICATE. - self.synapse_handler.send_positions_to_connection(self) + self.synapse_handler.send_positions_to_connection() def messageReceived(self, pattern: str, channel: str, message: str) -> None: """Received a message from redis.""" diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 1d9a29d22ee6..d15828f2d3b0 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -27,7 +27,7 @@ from synapse.replication.tcp.commands import PositionCommand from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol from synapse.replication.tcp.streams import EventsStream -from synapse.replication.tcp.streams._base import StreamRow, Token +from synapse.replication.tcp.streams._base import CachesStream, StreamRow, Token from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -123,7 +123,7 @@ def on_notifier_poke(self) -> None: # We check up front to see if anything has actually changed, as we get # poked because of changes that happened on other instances. - if all( + if not self.command_handler.should_announce_positions() and all( stream.last_token == stream.current_token(self._instance_name) for stream in self.streams ): @@ -158,6 +158,21 @@ async def _run_notifier_loop(self) -> None: all_streams = list(all_streams) random.shuffle(all_streams) + if self.command_handler.should_announce_positions(): + # We need to send out POSITIONs for all streams, usually + # because a worker has reconnected. + self.command_handler.will_announce_positions() + + for stream in all_streams: + self.command_handler.send_command( + PositionCommand( + stream.NAME, + self._instance_name, + stream.last_token, + stream.last_token, + ) + ) + for stream in all_streams: if stream.last_token == stream.current_token( self._instance_name @@ -204,6 +219,23 @@ async def _run_notifier_loop(self) -> None: # The token has advanced but there is no data to # send, so we send a `POSITION` to inform other # workers of the updated position. + # + # There are two reasons for this: 1) this instance + # requested a stream ID but didn't use it, or 2) + # this instance advanced its own stream position due + # to receiving notifications about other instances + # advancing their stream position. + + # We skip sending `POSITION` for the `caches` stream + # for the second case as a) it generates a lot of + # traffic as every worker would echo each write, and + # b) nothing cares if a given worker's caches stream + # position lags. + if stream.NAME == CachesStream.NAME: + # If there haven't been any writes since the + # `last_token` then we're in the second case. + if stream.minimal_local_current_token() <= last_token: + continue # Note: `last_token` may not *actually* be the # last token we sent out in a RDATA or POSITION. diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index c6088a0f99f8..7514429d99f1 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -33,6 +33,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.util.id_generators import AbstractStreamIdGenerator logger = logging.getLogger(__name__) @@ -107,22 +108,10 @@ def parse_row(cls, row: StreamRow) -> Any: def __init__( self, local_instance_name: str, - current_token_function: Callable[[str], Token], update_function: UpdateFunction, ): """Instantiate a Stream - `current_token_function` and `update_function` are callbacks which - should be implemented by subclasses. - - `current_token_function` takes an instance name, which is a writer to - the stream, and returns the position in the stream of the writer (as - viewed from the current process). On the writer process this is where - the writer has successfully written up to, whereas on other processes - this is the position which we have received updates up to over - replication. (Note that most streams have a single writer and so their - implementations ignore the instance name passed in). - `update_function` is called to get updates for this stream between a pair of stream tokens. See the `UpdateFunction` type definition for more info. @@ -133,12 +122,38 @@ def __init__( update_function: callback go get stream updates, as above """ self.local_instance_name = local_instance_name - self.current_token = current_token_function self.update_function = update_function # The token from which we last asked for updates self.last_token = self.current_token(self.local_instance_name) + def current_token(self, instance_name: str) -> Token: + """This takes an instance name, which is a writer to + the stream, and returns the position in the stream of the writer (as + viewed from the current process). + """ + # We can't make this an abstract class as it makes mypy unhappy. + raise NotImplementedError() + + def minimal_local_current_token(self) -> Token: + """Tries to return a minimal current token for the local instance, + i.e. for writers this would be the last successful write. + + If local instance is not a writer (or has written yet) then falls back + to returning the normal "current token". + """ + raise NotImplementedError() + + def can_discard_position( + self, instance_name: str, prev_token: int, new_token: int + ) -> bool: + """Whether or not a position command for this stream can be discarded. + + Useful for streams that can never go backwards and where we already know + the stream ID for the instance has advanced. + """ + return False + def discard_updates_and_advance(self) -> None: """Called when the stream should advance but the updates would be discarded, e.g. when there are no currently connected workers. @@ -156,6 +171,14 @@ async def get_updates(self) -> StreamUpdateResult: and `limited` is whether there are more updates to fetch. """ current_token = self.current_token(self.local_instance_name) + + # If the minimum current token for the local instance is less than or + # equal to the last thing we published, we know that there are no + # updates. + if self.last_token >= self.minimal_local_current_token(): + self.last_token = current_token + return [], current_token, False + updates, current_token, limited = await self.get_updates_since( self.local_instance_name, self.last_token, current_token ) @@ -190,6 +213,33 @@ async def get_updates_since( return updates, upto_token, limited +class _StreamFromIdGen(Stream): + """Helper class for simple streams that use a stream ID generator""" + + def __init__( + self, + local_instance_name: str, + update_function: UpdateFunction, + stream_id_gen: "AbstractStreamIdGenerator", + ): + self._stream_id_gen = stream_id_gen + super().__init__(local_instance_name, update_function) + + def current_token(self, instance_name: str) -> Token: + return self._stream_id_gen.get_current_token_for_writer(instance_name) + + def minimal_local_current_token(self) -> Token: + return self._stream_id_gen.get_minimal_local_current_token() + + def can_discard_position( + self, instance_name: str, prev_token: int, new_token: int + ) -> bool: + # These streams can't go backwards, so we know we can ignore any + # positions where the tokens are from before the current token. + + return new_token <= self.current_token(instance_name) + + def current_token_without_instance( current_token: Callable[[], int] ) -> Callable[[str], int]: @@ -242,17 +292,29 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - self._current_token, self.store.get_all_new_backfill_event_rows, ) - def _current_token(self, instance_name: str) -> int: + def current_token(self, instance_name: str) -> Token: # The backfill stream over replication operates on *positive* numbers, # which means we need to negate it. return -self.store._backfill_id_gen.get_current_token_for_writer(instance_name) + def minimal_local_current_token(self) -> Token: + # The backfill stream over replication operates on *positive* numbers, + # which means we need to negate it. + return -self.store._backfill_id_gen.get_minimal_local_current_token() + + def can_discard_position( + self, instance_name: str, prev_token: int, new_token: int + ) -> bool: + # Backfill stream can't go backwards, so we know we can ignore any + # positions where the tokens are from before the current token. -class PresenceStream(Stream): + return new_token <= self.current_token(instance_name) + + +class PresenceStream(_StreamFromIdGen): @attr.s(slots=True, frozen=True, auto_attribs=True) class PresenceStreamRow: user_id: str @@ -283,9 +345,7 @@ def __init__(self, hs: "HomeServer"): update_function = make_http_update_function(hs, self.NAME) super().__init__( - hs.get_instance_name(), - current_token_without_instance(store.get_current_presence_token), - update_function, + hs.get_instance_name(), update_function, store._presence_id_gen ) @@ -305,13 +365,18 @@ class PresenceFederationStreamRow: ROW_TYPE = PresenceFederationStreamRow def __init__(self, hs: "HomeServer"): - federation_queue = hs.get_presence_handler().get_federation_queue() + self._federation_queue = hs.get_presence_handler().get_federation_queue() super().__init__( hs.get_instance_name(), - federation_queue.get_current_token, - federation_queue.get_replication_rows, + self._federation_queue.get_replication_rows, ) + def current_token(self, instance_name: str) -> Token: + return self._federation_queue.get_current_token(instance_name) + + def minimal_local_current_token(self) -> Token: + return self._federation_queue.get_current_token(self.local_instance_name) + class TypingStream(Stream): @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -341,20 +406,25 @@ def __init__(self, hs: "HomeServer"): update_function: Callable[ [str, int, int, int], Awaitable[Tuple[List[Tuple[int, Any]], int, bool]] ] = typing_writer_handler.get_all_typing_updates - current_token_function = typing_writer_handler.get_current_token + self.current_token_function = typing_writer_handler.get_current_token else: # Query the typing writer process update_function = make_http_update_function(hs, self.NAME) - current_token_function = hs.get_typing_handler().get_current_token + self.current_token_function = hs.get_typing_handler().get_current_token super().__init__( hs.get_instance_name(), - current_token_without_instance(current_token_function), update_function, ) + def current_token(self, instance_name: str) -> Token: + return self.current_token_function() -class ReceiptsStream(Stream): + def minimal_local_current_token(self) -> Token: + return self.current_token_function() + + +class ReceiptsStream(_StreamFromIdGen): @attr.s(slots=True, frozen=True, auto_attribs=True) class ReceiptsStreamRow: room_id: str @@ -371,12 +441,12 @@ def __init__(self, hs: "HomeServer"): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_max_receipt_stream_id), store.get_all_updated_receipts, + store._receipts_id_gen, ) -class PushRulesStream(Stream): +class PushRulesStream(_StreamFromIdGen): """A user has changed their push rules""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -387,20 +457,16 @@ class PushRulesStreamRow: ROW_TYPE = PushRulesStreamRow def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastores().main + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - self._current_token, - self.store.get_all_push_rule_updates, + store.get_all_push_rule_updates, + store._push_rules_stream_id_gen, ) - def _current_token(self, instance_name: str) -> int: - push_rules_token = self.store.get_max_push_rules_stream_id() - return push_rules_token - -class PushersStream(Stream): +class PushersStream(_StreamFromIdGen): """A user has added/changed/removed a pusher""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -418,8 +484,8 @@ def __init__(self, hs: "HomeServer"): super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_pushers_stream_token), store.get_all_updated_pushers_rows, + store._pushers_id_gen, ) @@ -447,15 +513,30 @@ class CachesStreamRow: ROW_TYPE = CachesStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastores().main + self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - store.get_cache_stream_token_for_writer, - store.get_all_updated_caches, + self.store.get_all_updated_caches, ) + def current_token(self, instance_name: str) -> Token: + return self.store.get_cache_stream_token_for_writer(instance_name) + + def minimal_local_current_token(self) -> Token: + if self.store._cache_id_gen: + return self.store._cache_id_gen.get_minimal_local_current_token() + return self.current_token(self.local_instance_name) + + def can_discard_position( + self, instance_name: str, prev_token: int, new_token: int + ) -> bool: + # Caches streams can't go backwards, so we know we can ignore any + # positions where the tokens are from before the current token. + + return new_token <= self.current_token(instance_name) + -class DeviceListsStream(Stream): +class DeviceListsStream(_StreamFromIdGen): """Either a user has updated their devices or a remote server needs to be told about a device update. """ @@ -473,8 +554,8 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(self.store.get_device_stream_token), self._update_function, + self.store._device_list_id_gen, ) async def _update_function( @@ -525,7 +606,7 @@ async def _update_function( return updates, upper_limit_token, devices_limited or signatures_limited -class ToDeviceStream(Stream): +class ToDeviceStream(_StreamFromIdGen): """New to_device messages for a client""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -539,12 +620,12 @@ def __init__(self, hs: "HomeServer"): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_to_device_stream_token), store.get_all_new_device_messages, + store._to_device_msg_id_gen, ) -class AccountDataStream(Stream): +class AccountDataStream(_StreamFromIdGen): """Global or per room account data was changed""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -560,8 +641,8 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(self.store.get_max_account_data_stream_id), self._update_function, + self.store._account_data_id_gen, ) async def _update_function( diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index da6d948e1ba3..57138fea80bc 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -19,10 +19,10 @@ import attr from synapse.replication.tcp.streams._base import ( - Stream, StreamRow, StreamUpdateResult, Token, + _StreamFromIdGen, ) if TYPE_CHECKING: @@ -139,7 +139,7 @@ class EventsStreamAllStateRow(BaseEventsStreamRow): TypeToRow = {Row.TypeId: Row for Row in _EventRows} -class EventsStream(Stream): +class EventsStream(_StreamFromIdGen): """We received a new event, or an event went from being an outlier to not""" NAME = "events" @@ -147,9 +147,7 @@ class EventsStream(Stream): def __init__(self, hs: "HomeServer"): self._store = hs.get_datastores().main super().__init__( - hs.get_instance_name(), - self._store._stream_id_gen.get_current_token_for_writer, - self._update_function, + hs.get_instance_name(), self._update_function, self._store._stream_id_gen ) async def _update_function( @@ -159,6 +157,12 @@ async def _update_function( current_token: Token, target_row_count: int, ) -> StreamUpdateResult: + # The events stream cannot be "reset", so its safe to return early if + # the from token is larger than the current token (the DB query will + # trivially return 0 rows anyway). + if from_token >= current_token: + return [], current_token, False + # the events stream merges together three separate sources: # * new events # * current_state changes diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index 4046bdec6931..7f5af5852c1c 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -18,6 +18,7 @@ from synapse.replication.tcp.streams._base import ( Stream, + Token, current_token_without_instance, make_http_update_function, ) @@ -47,7 +48,7 @@ def __init__(self, hs: "HomeServer"): # will be a real FederationSender, which has stubs for current_token and # get_replication_rows.) federation_sender = hs.get_federation_sender() - current_token = current_token_without_instance( + self.current_token_func = current_token_without_instance( federation_sender.get_current_token ) update_function: Callable[ @@ -57,15 +58,21 @@ def __init__(self, hs: "HomeServer"): elif hs.should_send_federation(): # federation sender: Query master process update_function = make_http_update_function(hs, self.NAME) - current_token = self._stub_current_token + self.current_token_func = self._stub_current_token else: # other worker: stub out the update function (we're not interested in # any updates so when we get a POSITION we do nothing) update_function = self._stub_update_function - current_token = self._stub_current_token + self.current_token_func = self._stub_current_token - super().__init__(hs.get_instance_name(), current_token, update_function) + super().__init__(hs.get_instance_name(), update_function) + + def current_token(self, instance_name: str) -> Token: + return self.current_token_func(instance_name) + + def minimal_local_current_token(self) -> Token: + return self.current_token(self.local_instance_name) @staticmethod def _stub_current_token(instance_name: str) -> int: diff --git a/synapse/replication/tcp/streams/partial_state.py b/synapse/replication/tcp/streams/partial_state.py index a8ce5ffd7289..ad181d7e9331 100644 --- a/synapse/replication/tcp/streams/partial_state.py +++ b/synapse/replication/tcp/streams/partial_state.py @@ -15,7 +15,7 @@ import attr -from synapse.replication.tcp.streams import Stream +from synapse.replication.tcp.streams._base import _StreamFromIdGen if TYPE_CHECKING: from synapse.server import HomeServer @@ -27,7 +27,7 @@ class UnPartialStatedRoomStreamRow: room_id: str -class UnPartialStatedRoomStream(Stream): +class UnPartialStatedRoomStream(_StreamFromIdGen): """ Stream to notify about rooms becoming un-partial-stated; that is, when the background sync finishes such that we now have full state for @@ -41,8 +41,8 @@ def __init__(self, hs: "HomeServer"): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - store.get_un_partial_stated_rooms_token, store.get_un_partial_stated_rooms_from_stream, + store._un_partial_stated_rooms_stream_id_gen, ) @@ -56,7 +56,7 @@ class UnPartialStatedEventStreamRow: rejection_status_changed: bool -class UnPartialStatedEventStream(Stream): +class UnPartialStatedEventStream(_StreamFromIdGen): """ Stream to notify about events becoming un-partial-stated. """ @@ -68,6 +68,6 @@ def __init__(self, hs: "HomeServer"): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - store.get_un_partial_stated_events_token, store.get_un_partial_stated_events_from_stream, + store._un_partial_stated_events_stream_id_gen, ) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 9bd0d764f849..91edfd45d72e 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -88,6 +88,7 @@ UserByThreePid, UserMembershipRestServlet, UserRegisterServlet, + UserReplaceMasterCrossSigningKeyRestServlet, UserRestServletV2, UsersRestServletV2, UserTokenRestServlet, @@ -292,6 +293,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ListDestinationsRestServlet(hs).register(http_server) RoomMessagesRestServlet(hs).register(http_server) RoomTimestampToEventRestServlet(hs).register(http_server) + UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server) UserByExternalId(hs).register(http_server) UserByThreePid(hs).register(http_server) diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index 8a617af599bb..a6ce787da1af 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -85,7 +85,19 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: destinations, total = await self._store.get_destinations_paginate( start, limit, destination, order_by, direction ) - response = {"destinations": destinations, "total": total} + response = { + "destinations": [ + { + "destination": r[0], + "retry_last_ts": r[1], + "retry_interval": r[2], + "failure_ts": r[3], + "last_successful_stream_ordering": r[4], + } + for r in destinations + ], + "total": total, + } if (start + limit) < total: response["next_token"] = str(start + len(destinations)) diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index b7637dff0bdb..8cf52688545a 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -17,6 +17,8 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple +import attr + from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer @@ -418,7 +420,7 @@ async def on_GET( start, limit, user_id, order_by, direction ) - ret = {"media": media, "total": total} + ret = {"media": [attr.asdict(m) for m in media], "total": total} if (start + limit) < total: ret["next_token"] = start + len(media) @@ -477,7 +479,7 @@ async def on_DELETE( ) deleted_media, total = await self.media_repository.delete_local_media_ids( - [row["media_id"] for row in media] + [m.media_id for m in media] ) return HTTPStatus.OK, {"deleted_media": deleted_media, "total": total} diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py index ffce92d45ee1..f3e06d3da3bb 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py @@ -77,7 +77,18 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) valid = parse_boolean(request, "valid") token_list = await self.store.get_registration_tokens(valid) - return HTTPStatus.OK, {"registration_tokens": token_list} + return HTTPStatus.OK, { + "registration_tokens": [ + { + "token": t[0], + "uses_allowed": t[1], + "pending": t[2], + "completed": t[3], + "expiry_time": t[4], + } + for t in token_list + ] + } class NewRegistrationTokenRestServlet(RestServlet): diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 436718c8b227..7e40bea8aa18 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -16,6 +16,8 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast from urllib import parse as urlparse +import attr + from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter @@ -306,10 +308,13 @@ async def on_GET( raise NotFoundError("Room not found") members = await self.store.get_users_in_room(room_id) - ret["joined_local_devices"] = await self.store.count_devices_by_users(members) - ret["forgotten"] = await self.store.is_locally_forgotten_room(room_id) + result = attr.asdict(ret) + result["joined_local_devices"] = await self.store.count_devices_by_users( + members + ) + result["forgotten"] = await self.store.is_locally_forgotten_room(room_id) - return HTTPStatus.OK, ret + return HTTPStatus.OK, result async def on_DELETE( self, request: SynapseRequest, room_id: str @@ -408,8 +413,8 @@ async def on_GET( ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - ret = await self.store.get_room(room_id) - if not ret: + room = await self.store.get_room(room_id) + if not room: raise NotFoundError("Room not found") members = await self.store.get_users_in_room(room_id) @@ -437,14 +442,14 @@ async def on_GET( ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - ret = await self.store.get_room(room_id) - if not ret: + room = await self.store.get_room(room_id) + if not room: raise NotFoundError("Room not found") event_ids = await self._storage_controllers.state.get_current_state_ids(room_id) events = await self.store.get_events(event_ids.values()) now = self.clock.time_msec() - room_state = self._event_serializer.serialize_events(events.values(), now) + room_state = await self._event_serializer.serialize_events(events.values(), now) ret = {"state": room_state} return HTTPStatus.OK, ret @@ -724,7 +729,17 @@ async def on_GET( room_id, _ = await self.resolve_room_id(room_identifier) extremities = await self.store.get_forward_extremities_for_room(room_id) - return HTTPStatus.OK, {"count": len(extremities), "results": extremities} + result = [ + { + "event_id": ex[0], + "state_group": ex[1], + "depth": ex[2], + "received_ts": ex[3], + } + for ex in extremities + ] + + return HTTPStatus.OK, {"count": len(extremities), "results": result} class RoomEventContextServlet(RestServlet): @@ -779,22 +794,22 @@ async def on_GET( time_now = self.clock.time_msec() results = { - "events_before": self._event_serializer.serialize_events( + "events_before": await self._event_serializer.serialize_events( event_context.events_before, time_now, bundle_aggregations=event_context.aggregations, ), - "event": self._event_serializer.serialize_event( + "event": await self._event_serializer.serialize_event( event_context.event, time_now, bundle_aggregations=event_context.aggregations, ), - "events_after": self._event_serializer.serialize_events( + "events_after": await self._event_serializer.serialize_events( event_context.events_after, time_now, bundle_aggregations=event_context.aggregations, ), - "state": self._event_serializer.serialize_events( + "state": await self._event_serializer.serialize_events( event_context.state, time_now ), "start": event_context.start, diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index 19780e4b4ca6..75d8a37ccfd8 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -108,7 +108,18 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: users_media, total = await self.store.get_users_media_usage_paginate( start, limit, from_ts, until_ts, order_by, direction, search_term ) - ret = {"users": users_media, "total": total} + ret = { + "users": [ + { + "user_id": r[0], + "displayname": r[1], + "media_count": r[2], + "media_length": r[3], + } + for r in users_media + ], + "total": total, + } if (start + limit) < total: ret["next_token"] = start + len(users_media) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 7fe16130e764..77446970cb83 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -18,6 +18,8 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +import attr + from synapse.api.constants import Direction, UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( @@ -161,11 +163,13 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) # If support for MSC3866 is not enabled, don't show the approval flag. + filter = None if not self._msc3866_enabled: - for user in users: - del user["approved"] - ret = {"users": users, "total": total} + def _filter(a: attr.Attribute) -> bool: + return a.name != "approved" + + ret = {"users": [attr.asdict(u, filter=filter) for u in users], "total": total} if (start + limit) < total: ret["next_token"] = str(start + len(users)) @@ -626,6 +630,12 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")): raise SynapseError(HTTPStatus.FORBIDDEN, "HMAC incorrect") + should_issue_refresh_token = body.get("refresh_token", False) + if not isinstance(should_issue_refresh_token, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "refresh_token must be a boolean" + ) + # Reuse the parts of RegisterRestServlet to reduce code duplication from synapse.rest.client.register import RegisterRestServlet @@ -641,7 +651,9 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: approved=True, ) - result = await register._create_registration_details(user_id, body) + result = await register._create_registration_details( + user_id, body, should_issue_refresh_token=should_issue_refresh_token + ) return HTTPStatus.OK, result @@ -1266,6 +1278,46 @@ async def on_GET( } +class UserReplaceMasterCrossSigningKeyRestServlet(RestServlet): + """Allow a given user to replace their master cross-signing key without UIA. + + This replacement is permitted for a limited period (currently 10 minutes). + + While this is exposed via the admin API, this is intended for use by the + Matrix Authentication Service rather than server admins. + """ + + PATTERNS = admin_patterns( + "/users/(?P[^/]*)/_allow_cross_signing_replacement_without_uia" + ) + REPLACEMENT_PERIOD_MS = 10 * 60 * 1000 # 10 minutes + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + + async def on_POST( + self, + request: SynapseRequest, + user_id: str, + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + if user_id is None: + raise NotFoundError("User not found") + + timestamp = ( + await self._store.allow_master_cross_signing_key_replacement_without_uia( + user_id, self.REPLACEMENT_PERIOD_MS + ) + ) + + if timestamp is None: + raise NotFoundError("User has no master cross-signing key") + + return HTTPStatus.OK, {"updatable_without_uia_before_ms": timestamp} + + class UserByExternalId(RestServlet): """Find a user based on an external ID from an auth provider""" diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 641390cb304d..0c0e82627db9 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -299,19 +299,16 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - # allow ASes to deactivate their own users - if requester.app_service: - await self._deactivate_account_handler.deactivate_account( - requester.user.to_string(), body.erase, requester + # allow ASes to deactivate their own users: + # ASes don't need user-interactive auth + if not requester.app_service: + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body.dict(exclude_unset=True), + "deactivate your account", ) - return 200, {} - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body.dict(exclude_unset=True), - "deactivate your account", - ) result = await self._deactivate_account_handler.deactivate_account( requester.user.to_string(), body.erase, requester, id_server=body.id_server ) diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index 82944ca71183..3534c3c259e2 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -147,7 +147,7 @@ async def on_GET(self, request: Request, room_id: str) -> Tuple[int, JsonDict]: if room is None: raise NotFoundError("Unknown room") - return 200, {"visibility": "public" if room["is_public"] else "private"} + return 200, {"visibility": "public" if room[0] else "private"} class PutBody(RequestBodyModel): visibility: Literal["public", "private"] = "public" diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 3eca4fe21ff1..5705f812a53e 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -93,7 +93,7 @@ async def on_GET( event = await self.event_handler.get_event(requester.user, None, event_id) if event: - result = self._event_serializer.serialize_event( + result = await self._event_serializer.serialize_event( event, self.clock.time_msec(), config=SerializeEventConfig(requester=requester), diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 70b8be1aa237..add8045439e7 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -376,9 +376,10 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: user_id = requester.user.to_string() body = parse_json_object_from_request(request) - is_cross_signing_setup = ( - await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id) - ) + ( + is_cross_signing_setup, + master_key_updatable_without_uia, + ) = await self.e2e_keys_handler.check_cross_signing_setup(user_id) # Before MSC3967 we required UIA both when setting up cross signing for the # first time and when resetting the device signing key. With MSC3967 we only @@ -386,9 +387,14 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # time. Because there is no UIA in MSC3861, for now we throw an error if the # user tries to reset the device signing key when MSC3861 is enabled, but allow # first-time setup. + # + # XXX: We now have a get-out clause by which MAS can temporarily mark the master + # key as replaceable. It should do its own equivalent of user interactive auth + # before doing so. if self.hs.config.experimental.msc3861.enabled: - # There is no way to reset the device signing key with MSC3861 - if is_cross_signing_setup: + # The auth service has to explicitly mark the master key as replaceable + # without UIA to reset the device signing key with MSC3861. + if is_cross_signing_setup and not master_key_updatable_without_uia: raise SynapseError( HTTPStatus.NOT_IMPLEMENTED, "Resetting cross signing keys is not yet supported with MSC3861", diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index e7fe1332e776..5688d8593d18 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -87,7 +87,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "actions": pa.actions, "ts": pa.received_ts, "event": ( - self._event_serializer.serialize_event( + await self._event_serializer.serialize_event( notif_events[pa.event_id], now, config=serialize_options, diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py index d578faa96984..054a391f2630 100644 --- a/synapse/rest/client/presence.py +++ b/synapse/rest/client/presence.py @@ -42,15 +42,13 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.auth = hs.get_auth() - self._use_presence = hs.config.server.use_presence - async def on_GET( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) - if not self._use_presence: + if not self.hs.config.server.presence_enabled: return 200, {"presence": "offline"} if requester.user != user: @@ -96,7 +94,7 @@ async def on_PUT( except Exception: raise SynapseError(400, "Unable to parse state") - if self._use_presence: + if self.hs.config.server.track_presence: await self.presence_handler.set_state(user, requester.device_id, state) return 200, {} diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 553938ce9d13..96f57269119e 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -859,7 +859,7 @@ async def on_GET( # per MSC2676, /rooms/{roomId}/event/{eventId}, should return the # *original* event, rather than the edited version - event_dict = self._event_serializer.serialize_event( + event_dict = await self._event_serializer.serialize_event( event, self.clock.time_msec(), bundle_aggregations=aggregations, @@ -911,25 +911,25 @@ async def on_GET( time_now = self.clock.time_msec() serializer_options = SerializeEventConfig(requester=requester) results = { - "events_before": self._event_serializer.serialize_events( + "events_before": await self._event_serializer.serialize_events( event_context.events_before, time_now, bundle_aggregations=event_context.aggregations, config=serializer_options, ), - "event": self._event_serializer.serialize_event( + "event": await self._event_serializer.serialize_event( event_context.event, time_now, bundle_aggregations=event_context.aggregations, config=serializer_options, ), - "events_after": self._event_serializer.serialize_events( + "events_after": await self._event_serializer.serialize_events( event_context.events_after, time_now, bundle_aggregations=event_context.aggregations, config=serializer_options, ), - "state": self._event_serializer.serialize_events( + "state": await self._event_serializer.serialize_events( event_context.state, time_now, config=serializer_options, diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 42bdd3bb108b..33fde6c6f82e 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -384,7 +384,7 @@ async def encode_invited( """ invited = {} for room in rooms: - invite = self._event_serializer.serialize_event( + invite = await self._event_serializer.serialize_event( room.invite, time_now, config=serialize_options ) unsigned = dict(invite.get("unsigned", {})) @@ -415,7 +415,7 @@ async def encode_knocked( """ knocked = {} for room in rooms: - knock = self._event_serializer.serialize_event( + knock = await self._event_serializer.serialize_event( room.knock, time_now, config=serialize_options ) @@ -506,10 +506,10 @@ async def encode_room( event.room_id, ) - serialized_state = self._event_serializer.serialize_events( + serialized_state = await self._event_serializer.serialize_events( state_events, time_now, config=serialize_options ) - serialized_timeline = self._event_serializer.serialize_events( + serialized_timeline = await self._event_serializer.serialize_events( timeline_events, time_now, config=serialize_options, diff --git a/synapse/rest/media/create_resource.py b/synapse/rest/media/create_resource.py new file mode 100644 index 000000000000..994afdf13ca4 --- /dev/null +++ b/synapse/rest/media/create_resource.py @@ -0,0 +1,83 @@ +# Copyright 2023 Beeper Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re +from typing import TYPE_CHECKING + +from synapse.api.errors import LimitExceededError +from synapse.api.ratelimiting import Ratelimiter +from synapse.http.server import respond_with_json +from synapse.http.servlet import RestServlet +from synapse.http.site import SynapseRequest + +if TYPE_CHECKING: + from synapse.media.media_repository import MediaRepository + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class CreateResource(RestServlet): + PATTERNS = [re.compile("/_matrix/media/v1/create")] + + def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): + super().__init__() + + self.media_repo = media_repo + self.clock = hs.get_clock() + self.auth = hs.get_auth() + self.max_pending_media_uploads = hs.config.media.max_pending_media_uploads + + # A rate limiter for creating new media IDs. + self._create_media_rate_limiter = Ratelimiter( + store=hs.get_datastores().main, + clock=self.clock, + cfg=hs.config.ratelimiting.rc_media_create, + ) + + async def on_POST(self, request: SynapseRequest) -> None: + requester = await self.auth.get_user_by_req(request) + + # If the create media requests for the user are over the limit, drop them. + await self._create_media_rate_limiter.ratelimit(requester) + + ( + reached_pending_limit, + first_expiration_ts, + ) = await self.media_repo.reached_pending_media_limit(requester.user) + if reached_pending_limit: + raise LimitExceededError( + limiter_name="max_pending_media_uploads", + retry_after_ms=first_expiration_ts - self.clock.time_msec(), + ) + + content_uri, unused_expires_at = await self.media_repo.create_media_id( + requester.user + ) + + logger.info( + "Created Media URI %r that if unused will expire at %d", + content_uri, + unused_expires_at, + ) + respond_with_json( + request, + 200, + { + "content_uri": content_uri, + "unused_expires_at": unused_expires_at, + }, + send_cors=True, + ) diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index 65b9ff52faaf..60cd87548c13 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -17,9 +17,13 @@ from typing import TYPE_CHECKING, Optional from synapse.http.server import set_corp_headers, set_cors_headers -from synapse.http.servlet import RestServlet, parse_boolean +from synapse.http.servlet import RestServlet, parse_boolean, parse_integer from synapse.http.site import SynapseRequest -from synapse.media._base import respond_404 +from synapse.media._base import ( + DEFAULT_MAX_TIMEOUT_MS, + MAXIMUM_ALLOWED_MAX_TIMEOUT_MS, + respond_404, +) from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: @@ -65,12 +69,16 @@ async def on_GET( ) # Limited non-standard form of CSP for IE11 request.setHeader(b"X-Content-Security-Policy", b"sandbox;") - request.setHeader( - b"Referrer-Policy", - b"no-referrer", + request.setHeader(b"Referrer-Policy", b"no-referrer") + max_timeout_ms = parse_integer( + request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS ) + max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS) + if self._is_mine_server_name(server_name): - await self.media_repo.get_local_media(request, media_id, file_name) + await self.media_repo.get_local_media( + request, media_id, file_name, max_timeout_ms + ) else: allow_remote = parse_boolean(request, "allow_remote", default=True) if not allow_remote: @@ -83,5 +91,5 @@ async def on_GET( return await self.media_repo.get_remote_media( - request, server_name, media_id, file_name + request, server_name, media_id, file_name, max_timeout_ms ) diff --git a/synapse/rest/media/media_repository_resource.py b/synapse/rest/media/media_repository_resource.py index 2089bb10296c..ca65116b8487 100644 --- a/synapse/rest/media/media_repository_resource.py +++ b/synapse/rest/media/media_repository_resource.py @@ -18,10 +18,11 @@ from synapse.http.server import HttpServer, JsonResource from .config_resource import MediaConfigResource +from .create_resource import CreateResource from .download_resource import DownloadResource from .preview_url_resource import PreviewUrlResource from .thumbnail_resource import ThumbnailResource -from .upload_resource import UploadResource +from .upload_resource import AsyncUploadServlet, UploadServlet if TYPE_CHECKING: from synapse.server import HomeServer @@ -91,8 +92,9 @@ def register_servlets(http_server: HttpServer, hs: "HomeServer") -> None: # Note that many of these should not exist as v1 endpoints, but empirically # a lot of traffic still goes to them. - - UploadResource(hs, media_repo).register(http_server) + CreateResource(hs, media_repo).register(http_server) + UploadServlet(hs, media_repo).register(http_server) + AsyncUploadServlet(hs, media_repo).register(http_server) DownloadResource(hs, media_repo).register(http_server) ThumbnailResource(hs, media_repo, media_repo.media_storage).register( http_server diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index 85b6bdbe7254..681f2a5a273f 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -23,6 +23,8 @@ from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.media._base import ( + DEFAULT_MAX_TIMEOUT_MS, + MAXIMUM_ALLOWED_MAX_TIMEOUT_MS, FileInfo, ThumbnailInfo, respond_404, @@ -75,15 +77,19 @@ async def on_GET( method = parse_string(request, "method", "scale") # TODO Parse the Accept header to get an prioritised list of thumbnail types. m_type = "image/png" + max_timeout_ms = parse_integer( + request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS + ) + max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS) if self._is_mine_server_name(server_name): if self.dynamic_thumbnails: await self._select_or_generate_local_thumbnail( - request, media_id, width, height, method, m_type + request, media_id, width, height, method, m_type, max_timeout_ms ) else: await self._respond_local_thumbnail( - request, media_id, width, height, method, m_type + request, media_id, width, height, method, m_type, max_timeout_ms ) self.media_repo.mark_recently_accessed(None, media_id) else: @@ -95,14 +101,21 @@ async def on_GET( respond_404(request) return - if self.dynamic_thumbnails: - await self._select_or_generate_remote_thumbnail( - request, server_name, media_id, width, height, method, m_type - ) - else: - await self._respond_remote_thumbnail( - request, server_name, media_id, width, height, method, m_type - ) + remote_resp_function = ( + self._select_or_generate_remote_thumbnail + if self.dynamic_thumbnails + else self._respond_remote_thumbnail + ) + await remote_resp_function( + request, + server_name, + media_id, + width, + height, + method, + m_type, + max_timeout_ms, + ) self.media_repo.mark_recently_accessed(server_name, media_id) async def _respond_local_thumbnail( @@ -113,15 +126,12 @@ async def _respond_local_thumbnail( height: int, method: str, m_type: str, + max_timeout_ms: int, ) -> None: - media_info = await self.store.get_local_media(media_id) - + media_info = await self.media_repo.get_local_media_info( + request, media_id, max_timeout_ms + ) if not media_info: - respond_404(request) - return - if media_info["quarantined_by"]: - logger.info("Media is quarantined") - respond_404(request) return thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) @@ -134,7 +144,7 @@ async def _respond_local_thumbnail( thumbnail_infos, media_id, media_id, - url_cache=bool(media_info["url_cache"]), + url_cache=bool(media_info.url_cache), server_name=None, ) @@ -146,15 +156,13 @@ async def _select_or_generate_local_thumbnail( desired_height: int, desired_method: str, desired_type: str, + max_timeout_ms: int, ) -> None: - media_info = await self.store.get_local_media(media_id) + media_info = await self.media_repo.get_local_media_info( + request, media_id, max_timeout_ms + ) if not media_info: - respond_404(request) - return - if media_info["quarantined_by"]: - logger.info("Media is quarantined") - respond_404(request) return thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) @@ -168,7 +176,7 @@ async def _select_or_generate_local_thumbnail( file_info = FileInfo( server_name=None, file_id=media_id, - url_cache=media_info["url_cache"], + url_cache=bool(media_info.url_cache), thumbnail=info, ) @@ -188,7 +196,7 @@ async def _select_or_generate_local_thumbnail( desired_height, desired_method, desired_type, - url_cache=bool(media_info["url_cache"]), + url_cache=bool(media_info.url_cache), ) if file_path: @@ -206,14 +214,20 @@ async def _select_or_generate_remote_thumbnail( desired_height: int, desired_method: str, desired_type: str, + max_timeout_ms: int, ) -> None: - media_info = await self.media_repo.get_remote_media_info(server_name, media_id) + media_info = await self.media_repo.get_remote_media_info( + server_name, media_id, max_timeout_ms + ) + if not media_info: + respond_404(request) + return thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) - file_id = media_info["filesystem_id"] + file_id = media_info.filesystem_id for info in thumbnail_infos: t_w = info.width == desired_width @@ -224,7 +238,7 @@ async def _select_or_generate_remote_thumbnail( if t_w and t_h and t_method and t_type: file_info = FileInfo( server_name=server_name, - file_id=media_info["filesystem_id"], + file_id=file_id, thumbnail=info, ) @@ -263,11 +277,16 @@ async def _respond_remote_thumbnail( height: int, method: str, m_type: str, + max_timeout_ms: int, ) -> None: # TODO: Don't download the whole remote file # We should proxy the thumbnail from the remote server instead of # downloading the remote file and generating our own thumbnails. - media_info = await self.media_repo.get_remote_media_info(server_name, media_id) + media_info = await self.media_repo.get_remote_media_info( + server_name, media_id, max_timeout_ms + ) + if not media_info: + return thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id @@ -280,7 +299,7 @@ async def _respond_remote_thumbnail( m_type, thumbnail_infos, media_id, - media_info["filesystem_id"], + media_info.filesystem_id, url_cache=False, server_name=server_name, ) diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 949326d85dac..62d3e228a874 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -15,7 +15,7 @@ import logging import re -from typing import IO, TYPE_CHECKING, Dict, List, Optional +from typing import IO, TYPE_CHECKING, Dict, List, Optional, Tuple from synapse.api.errors import Codes, SynapseError from synapse.http.server import respond_with_json @@ -29,23 +29,24 @@ logger = logging.getLogger(__name__) +# The name of the lock to use when uploading media. +_UPLOAD_MEDIA_LOCK_NAME = "upload_media" -class UploadResource(RestServlet): - PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/upload")] +class BaseUploadServlet(RestServlet): def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): super().__init__() self.media_repo = media_repo self.filepaths = media_repo.filepaths self.store = hs.get_datastores().main - self.clock = hs.get_clock() + self.server_name = hs.hostname self.auth = hs.get_auth() self.max_upload_size = hs.config.media.max_upload_size - self.clock = hs.get_clock() - async def on_POST(self, request: SynapseRequest) -> None: - requester = await self.auth.get_user_by_req(request) + def _get_file_metadata( + self, request: SynapseRequest + ) -> Tuple[int, Optional[str], str]: raw_content_length = request.getHeader("Content-Length") if raw_content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) @@ -88,6 +89,16 @@ async def on_POST(self, request: SynapseRequest) -> None: # disposition = headers.getRawHeaders(b"Content-Disposition")[0] # TODO(markjh): parse content-dispostion + return content_length, upload_name, media_type + + +class UploadServlet(BaseUploadServlet): + PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/upload$")] + + async def on_POST(self, request: SynapseRequest) -> None: + requester = await self.auth.get_user_by_req(request) + content_length, upload_name, media_type = self._get_file_metadata(request) + try: content: IO = request.content # type: ignore content_uri = await self.media_repo.create_content( @@ -103,3 +114,53 @@ async def on_POST(self, request: SynapseRequest) -> None: respond_with_json( request, 200, {"content_uri": str(content_uri)}, send_cors=True ) + + +class AsyncUploadServlet(BaseUploadServlet): + PATTERNS = [ + re.compile( + "/_matrix/media/v3/upload/(?P[^/]*)/(?P[^/]*)$" + ) + ] + + async def on_PUT( + self, request: SynapseRequest, server_name: str, media_id: str + ) -> None: + requester = await self.auth.get_user_by_req(request) + + if server_name != self.server_name: + raise SynapseError( + 404, + "Non-local server name specified", + errcode=Codes.NOT_FOUND, + ) + + lock = await self.store.try_acquire_lock(_UPLOAD_MEDIA_LOCK_NAME, media_id) + if not lock: + raise SynapseError( + 409, + "Media ID cannot be overwritten", + errcode=Codes.CANNOT_OVERWRITE_MEDIA, + ) + + async with lock: + await self.media_repo.verify_can_upload(media_id, requester.user) + content_length, upload_name, media_type = self._get_file_metadata(request) + + try: + content: IO = request.content # type: ignore + await self.media_repo.update_content( + media_id, + media_type, + upload_name, + content, + content_length, + requester.user, + ) + except SpamMediaException: + # For uploading of media we want to respond with a 400, instead of + # the default 404, as that would just be confusing. + raise SynapseError(400, "Bad content") + + logger.info("Uploaded content for media ID %r", media_id) + respond_with_json(request, 200, {}, send_cors=True) diff --git a/synapse/server.py b/synapse/server.py index 71ead524d684..5bfb4ba4eb9c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -786,7 +786,7 @@ def get_oidc_handler(self) -> "OidcHandler": @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer() + return EventClientSerializer(self) @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 9732dbdb6e66..44b999677a1e 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -178,6 +178,8 @@ async def get_or_create_notice_room_for_user(self, user_id: str) -> str: "avatar_url": self._config.servernotices.server_notices_mxid_avatar_url, } + # `ignore_forced_encryption` is used to bypass `encryption_enabled_by_default_for_room_type` + # setting if it set, since the server notices will not be encrypted anyway. room_id, _, _ = await self._room_creation_handler.create_room( requester, config={ @@ -187,6 +189,7 @@ async def get_or_create_notice_room_for_user(self, user_id: str) -> str: }, ratelimit=False, creator_join_profile=join_profile, + ignore_forced_encryption=True, ) self.maybe_get_notice_room_for_user.invalidate((user_id,)) @@ -226,6 +229,7 @@ async def maybe_invite_user_to_room(self, user_id: str, room_id: str) -> None: target=UserID.from_string(user_id), room_id=room_id, action="invite", + ratelimit=False, ) async def _update_notice_user_profile_if_changed( @@ -268,5 +272,6 @@ async def _update_notice_user_profile_if_changed( target=UserID.from_string(self.server_notices_mxid), room_id=room_id, action="join", + ratelimit=False, content={"displayname": display_name, "avatar_url": avatar_url}, ) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 12829d3d7d13..62fbd055348a 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -28,6 +28,7 @@ Sequence, Tuple, Type, + cast, ) import attr @@ -48,7 +49,11 @@ if TYPE_CHECKING: from synapse.server import HomeServer - from synapse.storage.database import DatabasePool, LoggingTransaction + from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, + ) logger = logging.getLogger(__name__) @@ -488,14 +493,14 @@ async def do_next_background_update(self, sleep: bool = True) -> bool: True if we have finished running all the background updates, otherwise False """ - def get_background_updates_txn(txn: Cursor) -> List[Dict[str, Any]]: + def get_background_updates_txn(txn: Cursor) -> List[Tuple[str, Optional[str]]]: txn.execute( """ SELECT update_name, depends_on FROM background_updates ORDER BY ordering, update_name """ ) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, Optional[str]]], txn.fetchall()) if not self._current_background_update: all_pending_updates = await self.db_pool.runInteraction( @@ -507,14 +512,13 @@ def get_background_updates_txn(txn: Cursor) -> List[Dict[str, Any]]: return True # find the first update which isn't dependent on another one in the queue. - pending = {update["update_name"] for update in all_pending_updates} - for upd in all_pending_updates: - depends_on = upd["depends_on"] + pending = {update_name for update_name, depends_on in all_pending_updates} + for update_name, depends_on in all_pending_updates: if not depends_on or depends_on not in pending: break logger.info( "Not starting on bg update %s until %s is done", - upd["update_name"], + update_name, depends_on, ) else: @@ -524,7 +528,7 @@ def get_background_updates_txn(txn: Cursor) -> List[Dict[str, Any]]: "another: dependency cycle?" ) - self._current_background_update = upd["update_name"] + self._current_background_update = update_name # We have a background update to run, otherwise we would have returned # early. @@ -746,10 +750,10 @@ async def create_index_in_background( The named index will be dropped upon completion of the new index. """ - def create_index_psql(conn: Connection) -> None: + def create_index_psql(conn: "LoggingDatabaseConnection") -> None: conn.rollback() # postgres insists on autocommit for the index - conn.set_session(autocommit=True) # type: ignore + conn.engine.attempt_to_set_autocommit(conn.conn, True) try: c = conn.cursor() @@ -793,9 +797,9 @@ def create_index_psql(conn: Connection) -> None: undo_timeout_sql = f"SET statement_timeout = {default_timeout}" conn.cursor().execute(undo_timeout_sql) - conn.set_session(autocommit=False) # type: ignore + conn.engine.attempt_to_set_autocommit(conn.conn, False) - def create_index_sqlite(conn: Connection) -> None: + def create_index_sqlite(conn: "LoggingDatabaseConnection") -> None: # Sqlite doesn't support concurrent creation of indexes. # # We assume that sqlite doesn't give us invalid indices; however @@ -825,7 +829,9 @@ def create_index_sqlite(conn: Connection) -> None: c.execute(sql) if isinstance(self.db_pool.engine, engines.PostgresEngine): - runner: Optional[Callable[[Connection], None]] = create_index_psql + runner: Optional[ + Callable[[LoggingDatabaseConnection], None] + ] = create_index_psql elif psql_only: runner = None else: diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index f39ae2d63536..1529c86cc50a 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -542,13 +542,15 @@ async def _calculate_current_state(self, room_id: str) -> StateMap[str]: return await res.get_state(self._state_controller, StateFilter.all()) async def _persist_event_batch( - self, _room_id: str, task: _PersistEventsTask + self, room_id: str, task: _PersistEventsTask ) -> Dict[str, str]: """Callback for the _event_persist_queue Calculates the change to current state and forward extremities, and persists the given events and with those updates. + Assumes that we are only persisting events for one room at a time. + Returns: A dictionary of event ID to event ID we didn't persist as we already had another event persisted with the same TXN ID. @@ -594,140 +596,23 @@ async def _persist_event_batch( # We can't easily parallelize these since different chunks # might contain the same event. :( - # NB: Assumes that we are only persisting events for one room - # at a time. - - # map room_id->set[event_ids] giving the new forward - # extremities in each room - new_forward_extremities: Dict[str, Set[str]] = {} - - # map room_id->(to_delete, to_insert) where to_delete is a list - # of type/state keys to remove from current state, and to_insert - # is a map (type,key)->event_id giving the state delta in each - # room - state_delta_for_room: Dict[str, DeltaState] = {} + new_forward_extremities = None + state_delta_for_room = None if not backfilled: with Measure(self._clock, "_calculate_state_and_extrem"): - # Work out the new "current state" for each room. + # Work out the new "current state" for the room. # We do this by working out what the new extremities are and then # calculating the state from that. - events_by_room: Dict[str, List[Tuple[EventBase, EventContext]]] = {} - for event, context in chunk: - events_by_room.setdefault(event.room_id, []).append( - (event, context) - ) - - for room_id, ev_ctx_rm in events_by_room.items(): - latest_event_ids = ( - await self.main_store.get_latest_event_ids_in_room(room_id) - ) - new_latest_event_ids = await self._calculate_new_extremities( - room_id, ev_ctx_rm, latest_event_ids - ) - - if new_latest_event_ids == latest_event_ids: - # No change in extremities, so no change in state - continue - - # there should always be at least one forward extremity. - # (except during the initial persistence of the send_join - # results, in which case there will be no existing - # extremities, so we'll `continue` above and skip this bit.) - assert new_latest_event_ids, "No forward extremities left!" - - new_forward_extremities[room_id] = new_latest_event_ids - - len_1 = ( - len(latest_event_ids) == 1 - and len(new_latest_event_ids) == 1 - ) - if len_1: - all_single_prev_not_state = all( - len(event.prev_event_ids()) == 1 - and not event.is_state() - for event, ctx in ev_ctx_rm - ) - # Don't bother calculating state if they're just - # a long chain of single ancestor non-state events. - if all_single_prev_not_state: - continue - - state_delta_counter.inc() - if len(new_latest_event_ids) == 1: - state_delta_single_event_counter.inc() - - # This is a fairly handwavey check to see if we could - # have guessed what the delta would have been when - # processing one of these events. - # What we're interested in is if the latest extremities - # were the same when we created the event as they are - # now. When this server creates a new event (as opposed - # to receiving it over federation) it will use the - # forward extremities as the prev_events, so we can - # guess this by looking at the prev_events and checking - # if they match the current forward extremities. - for ev, _ in ev_ctx_rm: - prev_event_ids = set(ev.prev_event_ids()) - if latest_event_ids == prev_event_ids: - state_delta_reuse_delta_counter.inc() - break - - logger.debug("Calculating state delta for room %s", room_id) - with Measure( - self._clock, "persist_events.get_new_state_after_events" - ): - res = await self._get_new_state_after_events( - room_id, - ev_ctx_rm, - latest_event_ids, - new_latest_event_ids, - ) - current_state, delta_ids, new_latest_event_ids = res - - # there should always be at least one forward extremity. - # (except during the initial persistence of the send_join - # results, in which case there will be no existing - # extremities, so we'll `continue` above and skip this bit.) - assert new_latest_event_ids, "No forward extremities left!" - - new_forward_extremities[room_id] = new_latest_event_ids - - # If either are not None then there has been a change, - # and we need to work out the delta (or use that - # given) - delta = None - if delta_ids is not None: - # If there is a delta we know that we've - # only added or replaced state, never - # removed keys entirely. - delta = DeltaState([], delta_ids) - elif current_state is not None: - with Measure( - self._clock, "persist_events.calculate_state_delta" - ): - delta = await self._calculate_state_delta( - room_id, current_state - ) - - if delta: - # If we have a change of state then lets check - # whether we're actually still a member of the room, - # or if our last user left. If we're no longer in - # the room then we delete the current state and - # extremities. - is_still_joined = await self._is_server_still_joined( - room_id, - ev_ctx_rm, - delta, - ) - if not is_still_joined: - logger.info("Server no longer in room %s", room_id) - delta.no_longer_in_room = True - - state_delta_for_room[room_id] = delta + ( + new_forward_extremities, + state_delta_for_room, + ) = await self._calculate_new_forward_extremities_and_state_delta( + room_id, chunk + ) await self.persist_events_store._persist_events_and_state_updates( + room_id, chunk, state_delta_for_room=state_delta_for_room, new_forward_extremities=new_forward_extremities, @@ -737,6 +622,117 @@ async def _persist_event_batch( return replaced_events + async def _calculate_new_forward_extremities_and_state_delta( + self, room_id: str, ev_ctx_rm: List[Tuple[EventBase, EventContext]] + ) -> Tuple[Optional[Set[str]], Optional[DeltaState]]: + """Calculates the new forward extremities and state delta for a room + given events to persist. + + Assumes that we are only persisting events for one room at a time. + + Returns: + A tuple of: + A set of str giving the new forward extremities the room + + The state delta for the room. + """ + + latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id) + new_latest_event_ids = await self._calculate_new_extremities( + room_id, ev_ctx_rm, latest_event_ids + ) + + if new_latest_event_ids == latest_event_ids: + # No change in extremities, so no change in state + return (None, None) + + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + + new_forward_extremities = new_latest_event_ids + + len_1 = len(latest_event_ids) == 1 and len(new_latest_event_ids) == 1 + if len_1: + all_single_prev_not_state = all( + len(event.prev_event_ids()) == 1 and not event.is_state() + for event, ctx in ev_ctx_rm + ) + # Don't bother calculating state if they're just + # a long chain of single ancestor non-state events. + if all_single_prev_not_state: + return (new_forward_extremities, None) + + state_delta_counter.inc() + if len(new_latest_event_ids) == 1: + state_delta_single_event_counter.inc() + + # This is a fairly handwavey check to see if we could + # have guessed what the delta would have been when + # processing one of these events. + # What we're interested in is if the latest extremities + # were the same when we created the event as they are + # now. When this server creates a new event (as opposed + # to receiving it over federation) it will use the + # forward extremities as the prev_events, so we can + # guess this by looking at the prev_events and checking + # if they match the current forward extremities. + for ev, _ in ev_ctx_rm: + prev_event_ids = set(ev.prev_event_ids()) + if latest_event_ids == prev_event_ids: + state_delta_reuse_delta_counter.inc() + break + + logger.debug("Calculating state delta for room %s", room_id) + with Measure(self._clock, "persist_events.get_new_state_after_events"): + res = await self._get_new_state_after_events( + room_id, + ev_ctx_rm, + latest_event_ids, + new_latest_event_ids, + ) + current_state, delta_ids, new_latest_event_ids = res + + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + + new_forward_extremities = new_latest_event_ids + + # If either are not None then there has been a change, + # and we need to work out the delta (or use that + # given) + delta = None + if delta_ids is not None: + # If there is a delta we know that we've + # only added or replaced state, never + # removed keys entirely. + delta = DeltaState([], delta_ids) + elif current_state is not None: + with Measure(self._clock, "persist_events.calculate_state_delta"): + delta = await self._calculate_state_delta(room_id, current_state) + + if delta: + # If we have a change of state then lets check + # whether we're actually still a member of the room, + # or if our last user left. If we're no longer in + # the room then we delete the current state and + # extremities. + is_still_joined = await self._is_server_still_joined( + room_id, + ev_ctx_rm, + delta, + ) + if not is_still_joined: + logger.info("Server no longer in room %s", room_id) + delta.no_longer_in_room = True + + return (new_forward_extremities, delta) + async def _calculate_new_extremities( self, room_id: str, diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 81f661160ce2..eb34de4df5ae 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -18,7 +18,6 @@ import time import types from collections import defaultdict -from sys import intern from time import monotonic as monotonic_time from typing import ( TYPE_CHECKING, @@ -35,7 +34,6 @@ Tuple, Type, TypeVar, - Union, cast, overload, ) @@ -421,6 +419,16 @@ def execute(self, sql: str, parameters: SQLQueryParameters = ()) -> None: self._do_execute(self.txn.execute, sql, parameters) def executemany(self, sql: str, *args: Any) -> None: + """Repeatedly execute the same piece of SQL with different parameters. + + See https://peps.python.org/pep-0249/#executemany. Note in particular that + + > Use of this method for an operation which produces one or more result sets + > constitutes undefined behavior + + so you can't use this for e.g. a SELECT, an UPDATE ... RETURNING, or a + DELETE FROM... RETURNING. + """ # TODO: we should add a type for *args here. Looking at Cursor.executemany # and DBAPI2 it ought to be Sequence[_Parameter], but we pass in # Iterable[Iterable[Any]] in execute_batch and execute_values above, which mypy @@ -606,13 +614,16 @@ async def _check_safe_to_upsert(self) -> None: If the background updates have not completed, wait 15 sec and check again. """ - updates = await self.simple_select_list( - "background_updates", - keyvalues=None, - retcols=["update_name"], - desc="check_background_updates", + updates = cast( + List[Tuple[str]], + await self.simple_select_list( + "background_updates", + keyvalues=None, + retcols=["update_name"], + desc="check_background_updates", + ), ) - background_update_names = [x["update_name"] for x in updates] + background_update_names = [x[0] for x in updates] for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items(): if update_name not in background_update_names: @@ -1030,57 +1041,20 @@ def inner_func(conn: _PoolConnection, *args: P.args, **kwargs: P.kwargs) -> R: self._db_pool.runWithConnection(inner_func, *args, **kwargs) ) - @staticmethod - def cursor_to_dict(cursor: Cursor) -> List[Dict[str, Any]]: - """Converts a SQL cursor into an list of dicts. - - Args: - cursor: The DBAPI cursor which has executed a query. - Returns: - A list of dicts where the key is the column header. - """ - assert cursor.description is not None, "cursor.description was None" - col_headers = [intern(str(column[0])) for column in cursor.description] - results = [dict(zip(col_headers, row)) for row in cursor] - return results - - @overload - async def execute( - self, desc: str, decoder: Literal[None], query: str, *args: Any - ) -> List[Tuple[Any, ...]]: - ... - - @overload - async def execute( - self, desc: str, decoder: Callable[[Cursor], R], query: str, *args: Any - ) -> R: - ... - - async def execute( - self, - desc: str, - decoder: Optional[Callable[[Cursor], R]], - query: str, - *args: Any, - ) -> Union[List[Tuple[Any, ...]], R]: + async def execute(self, desc: str, query: str, *args: Any) -> List[Tuple[Any, ...]]: """Runs a single query for a result set. Args: desc: description of the transaction, for logging and metrics - decoder - The function which can resolve the cursor results to - something meaningful. query - The query string to execute *args - Query args. Returns: The result of decoder(results) """ - def interaction(txn: LoggingTransaction) -> Union[List[Tuple[Any, ...]], R]: + def interaction(txn: LoggingTransaction) -> List[Tuple[Any, ...]]: txn.execute(query, args) - if decoder: - return decoder(txn) - else: - return txn.fetchall() + return txn.fetchall() return await self.runInteraction(desc, interaction) @@ -1142,8 +1116,8 @@ async def simple_insert_many( def simple_insert_many_txn( txn: LoggingTransaction, table: str, - keys: Collection[str], - values: Iterable[Iterable[Any]], + keys: Sequence[str], + values: Collection[Iterable[Any]], ) -> None: """Executes an INSERT query on the named table. @@ -1156,6 +1130,9 @@ def simple_insert_many_txn( keys: list of column names values: for each row, a list of values in the same order as `keys` """ + # If there's nothing to insert, then skip executing the query. + if not values: + return if isinstance(txn.database_engine, PostgresEngine): # We use `execute_values` as it can be a lot faster than `execute_batch`, @@ -1427,12 +1404,12 @@ def simple_upsert_txn_native_upsert( allvalues.update(values) latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values) - sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %s DO %s" % ( + sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %sDO %s" % ( table, ", ".join(k for k in allvalues), ", ".join("?" for _ in allvalues), ", ".join(k for k in keyvalues), - f"WHERE {where_clause}" if where_clause else "", + f"WHERE {where_clause} " if where_clause else "", latter, ) txn.execute(sql, list(allvalues.values())) @@ -1481,7 +1458,7 @@ def simple_upsert_many_txn( key_names: Collection[str], key_values: Collection[Iterable[Any]], value_names: Collection[str], - value_values: Iterable[Iterable[Any]], + value_values: Collection[Iterable[Any]], ) -> None: """ Upsert, many times. @@ -1494,6 +1471,19 @@ def simple_upsert_many_txn( value_values: A list of each row's value column values. Ignored if value_names is empty. """ + # If there's nothing to upsert, then skip executing the query. + if not key_values: + return + + # No value columns, therefore make a blank list so that the following + # zip() works correctly. + if not value_names: + value_values = [() for x in range(len(key_values))] + elif len(value_values) != len(key_values): + raise ValueError( + f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number." + ) + if table not in self._unsafe_to_upsert_tables: return self.simple_upsert_many_txn_native_upsert( txn, table, key_names, key_values, value_names, value_values @@ -1528,10 +1518,6 @@ def simple_upsert_many_txn_emulated( value_values: A list of each row's value column values. Ignored if value_names is empty. """ - # No value columns, therefore make a blank list so that the following - # zip() works correctly. - if not value_names: - value_values = [() for x in range(len(key_values))] # Lock the table just once, to prevent it being done once per row. # Note that, according to Postgres' documentation, once obtained, @@ -1569,10 +1555,7 @@ def simple_upsert_many_txn_native_upsert( allnames.extend(value_names) if not value_names: - # No value columns, therefore make a blank list so that the - # following zip() works correctly. latter = "NOTHING" - value_values = [() for x in range(len(key_values))] else: latter = "UPDATE SET " + ", ".join( k + "=EXCLUDED." + k for k in value_names @@ -1614,7 +1597,7 @@ async def simple_select_one( retcols: Collection[str], allow_none: Literal[False] = False, desc: str = "simple_select_one", - ) -> Dict[str, Any]: + ) -> Tuple[Any, ...]: ... @overload @@ -1625,7 +1608,7 @@ async def simple_select_one( retcols: Collection[str], allow_none: Literal[True] = True, desc: str = "simple_select_one", - ) -> Optional[Dict[str, Any]]: + ) -> Optional[Tuple[Any, ...]]: ... async def simple_select_one( @@ -1635,7 +1618,7 @@ async def simple_select_one( retcols: Collection[str], allow_none: bool = False, desc: str = "simple_select_one", - ) -> Optional[Dict[str, Any]]: + ) -> Optional[Tuple[Any, ...]]: """Executes a SELECT query on the named table, which is expected to return a single row, returning multiple columns from it. @@ -1804,9 +1787,9 @@ async def simple_select_list( keyvalues: Optional[Dict[str, Any]], retcols: Collection[str], desc: str = "simple_select_list", - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. + more rows, returning the result as a list of tuples. Args: table: the table name @@ -1817,8 +1800,7 @@ async def simple_select_list( desc: description of the transaction, for logging and metrics Returns: - A list of dictionaries, one per result row, each a mapping between the - column names from `retcols` and that column's value for the row. + A list of tuples, one per result row, each the retcolumn's value for the row. """ return await self.runInteraction( desc, @@ -1836,9 +1818,9 @@ def simple_select_list_txn( table: str, keyvalues: Optional[Dict[str, Any]], retcols: Iterable[str], - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. + more rows, returning the result as a list of tuples. Args: txn: Transaction object @@ -1849,8 +1831,7 @@ def simple_select_list_txn( retcols: the names of the columns to return Returns: - A list of dictionaries, one per result row, each a mapping between the - column names from `retcols` and that column's value for the row. + A list of tuples, one per result row, each the retcolumn's value for the row. """ if keyvalues: sql = "SELECT %s FROM %s WHERE %s" % ( @@ -1863,7 +1844,7 @@ def simple_select_list_txn( sql = "SELECT %s FROM %s" % (", ".join(retcols), table) txn.execute(sql) - return cls.cursor_to_dict(txn) + return txn.fetchall() async def simple_select_many_batch( self, @@ -1938,6 +1919,7 @@ def simple_select_many_txn( Returns: The results as a list of tuples. """ + # If there's nothing to select, then skip executing the query. if not iterable: return [] @@ -2072,13 +2054,13 @@ def simple_update_many_txn( raise ValueError( f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number." ) + # If there is nothing to update, then skip executing the query. + if not key_values: + return # List of tuples of (value values, then key values) # (This matches the order needed for the query) - args = [tuple(x) + tuple(y) for x, y in zip(value_values, key_values)] - - for ks, vs in zip(key_values, value_values): - args.append(tuple(vs) + tuple(ks)) + args = [tuple(vv) + tuple(kv) for vv, kv in zip(value_values, key_values)] # 'col1 = ?, col2 = ?, ...' set_clause = ", ".join(f"{n} = ?" for n in value_names) @@ -2090,9 +2072,7 @@ def simple_update_many_txn( where_clause = "" # UPDATE mytable SET col1 = ?, col2 = ? WHERE col3 = ? AND col4 = ? - sql = f""" - UPDATE {table} SET {set_clause} {where_clause} - """ + sql = f"UPDATE {table} SET {set_clause} {where_clause}" txn.execute_batch(sql, args) @@ -2147,7 +2127,7 @@ def simple_select_one_txn( keyvalues: Dict[str, Any], retcols: Collection[str], allow_none: bool = False, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[Tuple[Any, ...]]: select_sql = "SELECT %s FROM %s" % (", ".join(retcols), table) if keyvalues: @@ -2165,7 +2145,7 @@ def simple_select_one_txn( if txn.rowcount > 1: raise StoreError(500, "More than one row matched (%s)" % (table,)) - return dict(zip(retcols, row)) + return row async def simple_delete_one( self, table: str, keyvalues: Dict[str, Any], desc: str = "simple_delete_one" @@ -2308,11 +2288,10 @@ def simple_delete_many_txn( Returns: Number rows deleted """ + # If there's nothing to delete, then skip executing the query. if not values: return 0 - sql = "DELETE FROM %s" % table - clause, values = make_in_list_sql_clause(txn.database_engine, column, values) clauses = [clause] @@ -2320,8 +2299,7 @@ def simple_delete_many_txn( clauses.append("%s = ?" % (key,)) values.append(value) - if clauses: - sql = "%s WHERE %s" % (sql, " AND ".join(clauses)) + sql = "DELETE FROM %s WHERE %s" % (table, " AND ".join(clauses)) txn.execute(sql, values) return txn.rowcount diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index 7aa24ccf2121..b57e260fe074 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -45,7 +45,7 @@ class Databases(Generic[DataStoreT]): """ databases: List[DatabasePool] - main: "DataStore" # FIXME: #11165: actually an instance of `main_store_class` + main: "DataStore" # FIXME: https://github.com/matrix-org/synapse/issues/11165: actually an instance of `main_store_class` state: StateGroupDataStore persist_events: Optional[PersistEventsStore] diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 840d725114d0..89f40773519b 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -17,6 +17,8 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast +import attr + from synapse.api.constants import Direction from synapse.config.homeserver import HomeServerConfig from synapse.storage._base import make_in_list_sql_clause @@ -28,7 +30,7 @@ from synapse.storage.databases.main.stats import UserSortOrder from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor -from synapse.types import JsonDict, get_domain_from_id +from synapse.types import get_domain_from_id from .account_data import AccountDataStore from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore @@ -82,6 +84,25 @@ logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class UserPaginateResponse: + """This is very similar to UserInfo, but not quite the same.""" + + name: str + user_type: Optional[str] + is_guest: bool + admin: bool + deactivated: bool + shadow_banned: bool + displayname: Optional[str] + avatar_url: Optional[str] + creation_ts: Optional[int] + approved: bool + erased: bool + last_seen_ts: int + locked: bool + + class DataStore( EventsBackgroundUpdatesStore, ExperimentalFeaturesStore, @@ -156,7 +177,7 @@ async def get_users_paginate( approved: bool = True, not_user_types: Optional[List[str]] = None, locked: bool = False, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[UserPaginateResponse], int]: """Function to retrieve a paginated list of users from users list. This will return a json list of users and the total number of users matching the filter criteria. @@ -182,7 +203,7 @@ async def get_users_paginate( def get_users_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[UserPaginateResponse], int]: filters = [] args: list = [] @@ -282,13 +303,24 @@ def get_users_paginate_txn( """ args += [limit, start] txn.execute(sql, args) - users = self.db_pool.cursor_to_dict(txn) - - # some of those boolean values are returned as integers when we're on SQLite - columns_to_boolify = ["erased"] - for user in users: - for column in columns_to_boolify: - user[column] = bool(user[column]) + users = [ + UserPaginateResponse( + name=row[0], + user_type=row[1], + is_guest=bool(row[2]), + admin=bool(row[3]), + deactivated=bool(row[4]), + shadow_banned=bool(row[5]), + displayname=row[6], + avatar_url=row[7], + creation_ts=row[8], + approved=bool(row[9]), + erased=bool(row[10]), + last_seen_ts=row[11], + locked=bool(row[12]), + ) + for row in txn + ] return users, count diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 39498d52c62f..07f9b65af31c 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -94,7 +94,10 @@ def __init__( hs.get_replication_notifier(), "room_account_data", "stream_id", - extra_tables=[("room_tags_revisions", "stream_id")], + extra_tables=[ + ("account_data", "stream_id"), + ("room_tags_revisions", "stream_id"), + ], is_writer=self._instance_name in hs.config.worker.writers.account_data, ) @@ -283,16 +286,20 @@ async def get_account_data_for_room( def get_account_data_for_room_txn( txn: LoggingTransaction, - ) -> Dict[str, JsonDict]: - rows = self.db_pool.simple_select_list_txn( - txn, - "room_account_data", - {"user_id": user_id, "room_id": room_id}, - ["account_data_type", "content"], + ) -> Dict[str, JsonMapping]: + rows = cast( + List[Tuple[str, str]], + self.db_pool.simple_select_list_txn( + txn, + table="room_account_data", + keyvalues={"user_id": user_id, "room_id": room_id}, + retcols=["account_data_type", "content"], + ), ) return { - row["account_data_type"]: db_to_json(row["content"]) for row in rows + account_data_type: db_to_json(content) + for account_data_type, content in rows } return await self.db_pool.runInteraction( @@ -740,8 +747,16 @@ def _add_account_data_for_user( ) # Invalidate the cache for any ignored users which were added or removed. - for ignored_user_id in previously_ignored_users ^ currently_ignored_users: - self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,)) + self._invalidate_cache_and_stream_bulk( + txn, + self.ignored_by, + [ + (ignored_user_id,) + for ignored_user_id in ( + previously_ignored_users ^ currently_ignored_users + ) + ], + ) self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,)) async def remove_account_data_for_user( @@ -817,10 +832,14 @@ def _remove_account_data_for_user_txn( ) # Invalidate the cache for ignored users which were removed. - for ignored_user_id in previously_ignored_users: - self._invalidate_cache_and_stream( - txn, self.ignored_by, (ignored_user_id,) - ) + self._invalidate_cache_and_stream_bulk( + txn, + self.ignored_by, + [ + (ignored_user_id,) + for ignored_user_id in previously_ignored_users + ], + ) # Invalidate for this user the cache tracking ignored users. self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,)) diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 073a99cd8402..fa7d1c469abe 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -197,16 +197,21 @@ async def get_appservices_by_state( Returns: A list of ApplicationServices, which may be empty. """ - results = await self.db_pool.simple_select_list( - "application_services_state", {"state": state.value}, ["as_id"] + results = cast( + List[Tuple[str]], + await self.db_pool.simple_select_list( + table="application_services_state", + keyvalues={"state": state.value}, + retcols=("as_id",), + ), ) # NB: This assumes this class is linked with ApplicationServiceStore as_list = self.get_app_services() services = [] - for res in results: + for (as_id,) in results: for service in as_list: - if service.id == res["as_id"]: + if service.id == as_id: services.append(service) return services diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 4d0470ffd95a..d7232f566ba4 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -483,6 +483,30 @@ def _invalidate_cache_and_stream( txn.call_after(cache_func.invalidate, keys) self._send_invalidation_to_replication(txn, cache_func.__name__, keys) + def _invalidate_cache_and_stream_bulk( + self, + txn: LoggingTransaction, + cache_func: CachedFunction, + key_tuples: Collection[Tuple[Any, ...]], + ) -> None: + """A bulk version of _invalidate_cache_and_stream. + + Locally invalidate every key-tuple in `key_tuples`, then emit invalidations + for each key-tuple over replication. + + This implementation is more efficient than a loop which repeatedly calls the + non-bulk version. + """ + if not key_tuples: + return + + for keys in key_tuples: + txn.call_after(cache_func.invalidate, keys) + + self._send_invalidation_to_replication_bulk( + txn, cache_func.__name__, key_tuples + ) + def _invalidate_all_cache_and_stream( self, txn: LoggingTransaction, cache_func: CachedFunction ) -> None: @@ -564,10 +588,6 @@ def _send_invalidation_to_replication( if isinstance(self.database_engine, PostgresEngine): assert self._cache_id_gen is not None - # get_next() returns a context manager which is designed to wrap - # the transaction. However, we want to only get an ID when we want - # to use it, here, so we need to call __enter__ manually, and have - # __exit__ called after the transaction finishes. stream_id = self._cache_id_gen.get_next_txn(txn) txn.call_after(self.hs.get_notifier().on_new_replication_data) @@ -586,6 +606,53 @@ def _send_invalidation_to_replication( }, ) + def _send_invalidation_to_replication_bulk( + self, + txn: LoggingTransaction, + cache_name: str, + key_tuples: Collection[Tuple[Any, ...]], + ) -> None: + """Announce the invalidation of multiple (but not all) cache entries. + + This is more efficient than repeated calls to the non-bulk version. It should + NOT be used to invalidating the entire cache: use + `_send_invalidation_to_replication` with keys=None. + + Note that this does *not* invalidate the cache locally. + + Args: + txn + cache_name + key_tuples: Key-tuples to invalidate. Assumed to be non-empty. + """ + if isinstance(self.database_engine, PostgresEngine): + assert self._cache_id_gen is not None + + stream_ids = self._cache_id_gen.get_next_mult_txn(txn, len(key_tuples)) + ts = self._clock.time_msec() + txn.call_after(self.hs.get_notifier().on_new_replication_data) + self.db_pool.simple_insert_many_txn( + txn, + table="cache_invalidation_stream_by_instance", + keys=( + "stream_id", + "instance_name", + "cache_func", + "keys", + "invalidation_ts", + ), + values=[ + # We convert key_tuples to a list here because psycopg2 serialises + # lists as pq arrrays, but serialises tuples as "composite types". + # (We need an array because the `keys` column has type `[]text`.) + # See: + # https://www.psycopg.org/docs/usage.html#adapt-list + # https://www.psycopg.org/docs/usage.html#adapt-tuple + (stream_id, self._instance_name, cache_name, list(key_tuple), ts) + for stream_id, key_tuple in zip(stream_ids, key_tuples) + ], + ) + def get_cache_stream_token_for_writer(self, instance_name: str) -> int: if self._cache_id_gen: return self._cache_id_gen.get_current_token_for_writer(instance_name) diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 58177ecec132..711fdddd4e96 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -93,7 +93,7 @@ async def _censor_redactions(self) -> None: """ rows = await self.db_pool.execute( - "_censor_redactions_fetch", None, sql, before_ts, 100 + "_censor_redactions_fetch", sql, before_ts, 100 ) updates = [] diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 8be1511859c9..1df7731050d8 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -465,18 +465,15 @@ async def _prune_old_user_ips(self) -> None: # # This works by finding the max last_seen that is less than the given # time, but has no more than N rows before it, deleting all rows with - # a lesser last_seen time. (We COALESCE so that the sub-SELECT always - # returns exactly one row). + # a lesser last_seen time. (We use an `IN` clause to force postgres to + # use the index, otherwise it tends to do a seq scan). sql = """ DELETE FROM user_ips - WHERE last_seen <= ( - SELECT COALESCE(MAX(last_seen), -1) - FROM ( - SELECT last_seen FROM user_ips - WHERE last_seen <= ? - ORDER BY last_seen ASC - LIMIT 5000 - ) AS u + WHERE last_seen IN ( + SELECT last_seen FROM user_ips + WHERE last_seen <= ? + ORDER BY last_seen ASC + LIMIT 5000 ) """ @@ -508,21 +505,24 @@ async def _get_last_client_ip_by_device_from_database( if device_id is not None: keyvalues["device_id"] = device_id - res = await self.db_pool.simple_select_list( - table="devices", - keyvalues=keyvalues, - retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + res = cast( + List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + await self.db_pool.simple_select_list( + table="devices", + keyvalues=keyvalues, + retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + ), ) return { - (d["user_id"], d["device_id"]): DeviceLastConnectionInfo( - user_id=d["user_id"], - device_id=d["device_id"], - ip=d["ip"], - user_agent=d["user_agent"], - last_seen=d["last_seen"], + (user_id, device_id): DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id, + ip=ip, + user_agent=user_agent, + last_seen=last_seen, ) - for d in res + for user_id, ip, user_agent, device_id, last_seen in res } async def _get_user_ip_and_agents_from_database( @@ -586,6 +586,27 @@ async def insert_client_ip( device_id: Optional[str], now: Optional[int] = None, ) -> None: + """Record that `user_id` used `access_token` from this `ip` address. + + This method does two things. + + 1. It queues up a row to be upserted into the `client_ips` table. These happen + periodically; see _update_client_ips_batch. + 2. It immediately records this user as having taken action for the purposes of + MAU tracking. + + Any DB writes take place on the background tasks worker, falling back to the + main process. If we're not that worker, this method emits a replication payload + to run this logic on that worker. + + Two caveats to note: + + - We only take action once per LAST_SEEN_GRANULARITY, to avoid spamming the + DB with writes. + - Requests using the sliding-sync proxy's user agent are excluded, as its + requests are not directly driven by end-users. This is a hack and we're not + very proud of it. + """ # The sync proxy continuously triggers /sync even if the user is not # present so should be excluded from user_ips entries. if user_agent == "sync-v3-proxy-": diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 3e7425d4a654..0541f4e93d7d 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -87,25 +87,32 @@ def __init__( self._instance_name in hs.config.worker.writers.to_device ) - self._device_inbox_id_gen: AbstractStreamIdGenerator = ( + self._to_device_msg_id_gen: AbstractStreamIdGenerator = ( MultiWriterIdGenerator( db_conn=db_conn, db=database, notifier=hs.get_replication_notifier(), stream_name="to_device", instance_name=self._instance_name, - tables=[("device_inbox", "instance_name", "stream_id")], + tables=[ + ("device_inbox", "instance_name", "stream_id"), + ("device_federation_outbox", "instance_name", "stream_id"), + ], sequence_name="device_inbox_sequence", writers=hs.config.worker.writers.to_device, ) ) else: self._can_write_to_device = True - self._device_inbox_id_gen = StreamIdGenerator( - db_conn, hs.get_replication_notifier(), "device_inbox", "stream_id" + self._to_device_msg_id_gen = StreamIdGenerator( + db_conn, + hs.get_replication_notifier(), + "device_inbox", + "stream_id", + extra_tables=[("device_federation_outbox", "stream_id")], ) - max_device_inbox_id = self._device_inbox_id_gen.get_current_token() + max_device_inbox_id = self._to_device_msg_id_gen.get_current_token() device_inbox_prefill, min_device_inbox_id = self.db_pool.get_cache_dict( db_conn, "device_inbox", @@ -145,8 +152,8 @@ def process_replication_rows( ) -> None: if stream_name == ToDeviceStream.NAME: # If replication is happening than postgres must be being used. - assert isinstance(self._device_inbox_id_gen, MultiWriterIdGenerator) - self._device_inbox_id_gen.advance(instance_name, token) + assert isinstance(self._to_device_msg_id_gen, MultiWriterIdGenerator) + self._to_device_msg_id_gen.advance(instance_name, token) for row in rows: if row.entity.startswith("@"): self._device_inbox_stream_cache.entity_has_changed( @@ -162,11 +169,11 @@ def process_replication_position( self, stream_name: str, instance_name: str, token: int ) -> None: if stream_name == ToDeviceStream.NAME: - self._device_inbox_id_gen.advance(instance_name, token) + self._to_device_msg_id_gen.advance(instance_name, token) super().process_replication_position(stream_name, instance_name, token) def get_to_device_stream_token(self) -> int: - return self._device_inbox_id_gen.get_current_token() + return self._to_device_msg_id_gen.get_current_token() async def get_messages_for_user_devices( self, @@ -450,14 +457,12 @@ async def delete_messages_for_device( user_id: str, device_id: Optional[str], up_to_stream_id: int, - limit: Optional[int] = None, ) -> int: """ Args: user_id: The recipient user_id. device_id: The recipient device_id. up_to_stream_id: Where to delete messages up to. - limit: maximum number of messages to delete Returns: The number of messages deleted. @@ -478,32 +483,22 @@ async def delete_messages_for_device( log_kv({"message": "No changes in cache since last check"}) return 0 - def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: - limit_statement = "" if limit is None else f"LIMIT {limit}" - sql = f""" - DELETE FROM device_inbox WHERE user_id = ? AND device_id = ? AND stream_id <= ( - SELECT MAX(stream_id) FROM ( - SELECT stream_id FROM device_inbox - WHERE user_id = ? AND device_id = ? AND stream_id <= ? - ORDER BY stream_id - {limit_statement} - ) AS q1 - ) - """ - txn.execute(sql, (user_id, device_id, user_id, device_id, up_to_stream_id)) - return txn.rowcount - - count = await self.db_pool.runInteraction( - "delete_messages_for_device", delete_messages_for_device_txn - ) + from_stream_id = None + count = 0 + while True: + from_stream_id, loop_count = await self.delete_messages_for_device_between( + user_id, + device_id, + from_stream_id=from_stream_id, + to_stream_id=up_to_stream_id, + limit=1000, + ) + count += loop_count + if from_stream_id is None: + break log_kv({"message": f"deleted {count} messages for device", "count": count}) - # In this case we don't know if we hit the limit or the delete is complete - # so let's not update the cache. - if count == limit: - return count - # Update the cache, ensuring that we only ever increase the value updated_last_deleted_stream_id = self._last_device_delete_cache.get( (user_id, device_id), 0 @@ -514,6 +509,74 @@ def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: return count + @trace + async def delete_messages_for_device_between( + self, + user_id: str, + device_id: Optional[str], + from_stream_id: Optional[int], + to_stream_id: int, + limit: int, + ) -> Tuple[Optional[int], int]: + """Delete N device messages between the stream IDs, returning the + highest stream ID deleted (or None if all messages in the range have + been deleted) and the number of messages deleted. + + This is more efficient than `delete_messages_for_device` when calling in + a loop to batch delete messages. + """ + + # Keeping track of a lower bound of stream ID where we've deleted + # everything below makes the queries much faster. Otherwise, every time + # we scan for rows to delete we'd re-scan across all the rows that have + # previously deleted (until the next table VACUUM). + + if from_stream_id is None: + # Minimum device stream ID is 1. + from_stream_id = 0 + + def delete_messages_for_device_between_txn( + txn: LoggingTransaction, + ) -> Tuple[Optional[int], int]: + txn.execute( + """ + SELECT MAX(stream_id) FROM ( + SELECT stream_id FROM device_inbox + WHERE user_id = ? AND device_id = ? + AND ? < stream_id AND stream_id <= ? + ORDER BY stream_id + LIMIT ? + ) AS d + """, + (user_id, device_id, from_stream_id, to_stream_id, limit), + ) + row = txn.fetchone() + if row is None or row[0] is None: + return None, 0 + + (max_stream_id,) = row + + txn.execute( + """ + DELETE FROM device_inbox + WHERE user_id = ? AND device_id = ? + AND ? < stream_id AND stream_id <= ? + """, + (user_id, device_id, from_stream_id, max_stream_id), + ) + + num_deleted = txn.rowcount + if num_deleted < limit: + return None, num_deleted + + return max_stream_id, num_deleted + + return await self.db_pool.runInteraction( + "delete_messages_for_device_between", + delete_messages_for_device_between_txn, + db_autocommit=True, # We don't need to run in a transaction + ) + @trace async def get_new_device_msgs_for_remote( self, destination: str, last_stream_id: int, current_stream_id: int, limit: int @@ -745,7 +808,7 @@ def add_messages_txn( msg.get(EventContentFields.TO_DEVICE_MSGID), ) - async with self._device_inbox_id_gen.get_next() as stream_id: + async with self._to_device_msg_id_gen.get_next() as stream_id: now_ms = self._clock.time_msec() await self.db_pool.runInteraction( "add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id @@ -757,7 +820,7 @@ def add_messages_txn( destination, stream_id ) - return self._device_inbox_id_gen.get_current_token() + return self._to_device_msg_id_gen.get_current_token() async def add_messages_from_remote_to_device_inbox( self, @@ -801,7 +864,7 @@ def add_messages_txn( txn, stream_id, local_messages_by_user_then_device ) - async with self._device_inbox_id_gen.get_next() as stream_id: + async with self._to_device_msg_id_gen.get_next() as stream_id: now_ms = self._clock.time_msec() await self.db_pool.runInteraction( "add_messages_from_remote_to_device_inbox", diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fc23d18eba53..775abbac79d9 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -255,35 +255,20 @@ async def get_device( A dict containing the device information, or `None` if the device does not exist. """ - return await self.db_pool.simple_select_one( - table="devices", - keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False}, - retcols=("user_id", "device_id", "display_name"), - desc="get_device", - allow_none=True, - ) - - async def get_device_opt( - self, user_id: str, device_id: str - ) -> Optional[Dict[str, Any]]: - """Retrieve a device. Only returns devices that are not marked as - hidden. - - Args: - user_id: The ID of the user which owns the device - device_id: The ID of the device to retrieve - Returns: - A dict containing the device information, or None if the device does not exist. - """ - return await self.db_pool.simple_select_one( + row = await self.db_pool.simple_select_one( table="devices", keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False}, retcols=("user_id", "device_id", "display_name"), desc="get_device", allow_none=True, ) + if row is None: + return None + return {"user_id": row[0], "device_id": row[1], "display_name": row[2]} - async def get_devices_by_user(self, user_id: str) -> Dict[str, Dict[str, str]]: + async def get_devices_by_user( + self, user_id: str + ) -> Dict[str, Dict[str, Optional[str]]]: """Retrieve all of a user's registered devices. Only returns devices that are not marked as hidden. @@ -291,20 +276,26 @@ async def get_devices_by_user(self, user_id: str) -> Dict[str, Dict[str, str]]: user_id: Returns: A mapping from device_id to a dict containing "device_id", "user_id" - and "display_name" for each device. + and "display_name" for each device. Display name may be null. """ - devices = await self.db_pool.simple_select_list( - table="devices", - keyvalues={"user_id": user_id, "hidden": False}, - retcols=("user_id", "device_id", "display_name"), - desc="get_devices_by_user", + devices = cast( + List[Tuple[str, str, Optional[str]]], + await self.db_pool.simple_select_list( + table="devices", + keyvalues={"user_id": user_id, "hidden": False}, + retcols=("user_id", "device_id", "display_name"), + desc="get_devices_by_user", + ), ) - return {d["device_id"]: d for d in devices} + return { + d[1]: {"user_id": d[0], "device_id": d[1], "display_name": d[2]} + for d in devices + } async def get_devices_by_auth_provider_session_id( self, auth_provider_id: str, auth_provider_session_id: str - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, str]]: """Retrieve the list of devices associated with a SSO IdP session ID. Args: @@ -313,14 +304,17 @@ async def get_devices_by_auth_provider_session_id( Returns: A list of dicts containing the device_id and the user_id of each device """ - return await self.db_pool.simple_select_list( - table="device_auth_providers", - keyvalues={ - "auth_provider_id": auth_provider_id, - "auth_provider_session_id": auth_provider_session_id, - }, - retcols=("user_id", "device_id"), - desc="get_devices_by_auth_provider_session_id", + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="device_auth_providers", + keyvalues={ + "auth_provider_id": auth_provider_id, + "auth_provider_session_id": auth_provider_session_id, + }, + retcols=("user_id", "device_id"), + desc="get_devices_by_auth_provider_session_id", + ), ) @trace @@ -692,7 +686,7 @@ def _mark_as_sent_devices_by_remote_txn( key_names=("destination", "user_id"), key_values=[(destination, user_id) for user_id, _ in rows], value_names=("stream_id",), - value_values=((stream_id,) for _, stream_id in rows), + value_values=[(stream_id,) for _, stream_id in rows], ) # Delete all sent outbound pokes @@ -821,15 +815,16 @@ async def _get_cached_user_device( async def get_cached_devices_for_user( self, user_id: str ) -> Mapping[str, JsonMapping]: - devices = await self.db_pool.simple_select_list( - table="device_lists_remote_cache", - keyvalues={"user_id": user_id}, - retcols=("device_id", "content"), - desc="get_cached_devices_for_user", + devices = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="device_lists_remote_cache", + keyvalues={"user_id": user_id}, + retcols=("device_id", "content"), + desc="get_cached_devices_for_user", + ), ) - return { - device["device_id"]: db_to_json(device["content"]) for device in devices - } + return {device[0]: db_to_json(device[1]) for device in devices} def get_cached_device_list_changes( self, @@ -882,7 +877,6 @@ async def get_all_devices_changed( rows = await self.db_pool.execute( "get_all_devices_changed", - None, sql, from_key, to_key, @@ -966,7 +960,7 @@ async def get_users_whose_signatures_changed( WHERE from_user_id = ? AND stream_id > ? """ rows = await self.db_pool.execute( - "get_users_whose_signatures_changed", None, sql, user_id, from_key + "get_users_whose_signatures_changed", sql, user_id, from_key ) return {user for row in rows for user in db_to_json(row[0])} else: @@ -1080,7 +1074,7 @@ async def get_user_ids_requiring_device_list_resync( The IDs of users whose device lists need resync. """ if user_ids: - row_tuples = cast( + rows = cast( List[Tuple[str]], await self.db_pool.simple_select_many_batch( table="device_lists_remote_resync", @@ -1090,11 +1084,9 @@ async def get_user_ids_requiring_device_list_resync( desc="get_user_ids_requiring_device_list_resync_with_iterable", ), ) - - return {row[0] for row in row_tuples} else: rows = cast( - List[Dict[str, str]], + List[Tuple[str]], await self.db_pool.simple_select_list( table="device_lists_remote_resync", keyvalues=None, @@ -1103,7 +1095,7 @@ async def get_user_ids_requiring_device_list_resync( ), ) - return {row["user_id"] for row in rows} + return {row[0] for row in rows} async def mark_remote_users_device_caches_as_stale( self, user_ids: StrCollection @@ -1212,9 +1204,7 @@ async def get_dehydrated_device( retcols=["device_id", "device_data"], allow_none=True, ) - return ( - (row["device_id"], json_decoder.decode(row["device_data"])) if row else None - ) + return (row[0], json_decoder.decode(row[1])) if row else None def _store_dehydrated_device_txn( self, @@ -1611,7 +1601,6 @@ async def _remove_duplicate_outbound_pokes( # # For each duplicate, we delete all the existing rows and put one back. - KEY_COLS = ["stream_id", "destination", "user_id", "device_id"] last_row = progress.get( "last_row", {"stream_id": 0, "destination": "", "user_id": "", "device_id": ""}, @@ -1619,44 +1608,62 @@ async def _remove_duplicate_outbound_pokes( def _txn(txn: LoggingTransaction) -> int: clause, args = make_tuple_comparison_clause( - [(x, last_row[x]) for x in KEY_COLS] + [ + ("stream_id", last_row["stream_id"]), + ("destination", last_row["destination"]), + ("user_id", last_row["user_id"]), + ("device_id", last_row["device_id"]), + ] ) - sql = """ + sql = f""" SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts FROM device_lists_outbound_pokes - WHERE %s - GROUP BY %s + WHERE {clause} + GROUP BY stream_id, destination, user_id, device_id HAVING count(*) > 1 - ORDER BY %s + ORDER BY stream_id, destination, user_id, device_id LIMIT ? - """ % ( - clause, # WHERE - ",".join(KEY_COLS), # GROUP BY - ",".join(KEY_COLS), # ORDER BY - ) + """ txn.execute(sql, args + [batch_size]) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() - row = None - for row in rows: + stream_id, destination, user_id, device_id = None, None, None, None + for stream_id, destination, user_id, device_id, _ in rows: self.db_pool.simple_delete_txn( txn, "device_lists_outbound_pokes", - {x: row[x] for x in KEY_COLS}, + { + "stream_id": stream_id, + "destination": destination, + "user_id": user_id, + "device_id": device_id, + }, ) - row["sent"] = False self.db_pool.simple_insert_txn( txn, "device_lists_outbound_pokes", - row, + { + "stream_id": stream_id, + "destination": destination, + "user_id": user_id, + "device_id": device_id, + "sent": False, + }, ) - if row: + if rows: self.db_pool.updates._background_update_progress_txn( txn, BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, - {"last_row": row}, + { + "last_row": { + "stream_id": stream_id, + "destination": destination, + "user_id": user_id, + "device_id": device_id, + } + }, ) return len(rows) @@ -2300,13 +2307,15 @@ async def get_device_change_last_converted_pos(self) -> Tuple[int, str]: `FALSE` have not been converted. """ - row = await self.db_pool.simple_select_one( - table="device_lists_changes_converted_stream_position", - keyvalues={}, - retcols=["stream_id", "room_id"], - desc="get_device_change_last_converted_pos", + return cast( + Tuple[int, str], + await self.db_pool.simple_select_one( + table="device_lists_changes_converted_stream_position", + keyvalues={}, + retcols=["stream_id", "room_id"], + desc="get_device_change_last_converted_pos", + ), ) - return row["stream_id"], row["room_id"] async def set_device_change_last_converted_pos( self, diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index aac4cfb054f6..fae23c340798 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, cast +from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Tuple, cast from typing_extensions import Literal, TypedDict @@ -274,32 +274,41 @@ async def get_e2e_room_keys( if session_id: keyvalues["session_id"] = session_id - rows = await self.db_pool.simple_select_list( - table="e2e_room_keys", - keyvalues=keyvalues, - retcols=( - "user_id", - "room_id", - "session_id", - "first_message_index", - "forwarded_count", - "is_verified", - "session_data", + rows = cast( + List[Tuple[str, str, int, int, int, str]], + await self.db_pool.simple_select_list( + table="e2e_room_keys", + keyvalues=keyvalues, + retcols=( + "room_id", + "session_id", + "first_message_index", + "forwarded_count", + "is_verified", + "session_data", + ), + desc="get_e2e_room_keys", ), - desc="get_e2e_room_keys", ) sessions: Dict[ Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]] ] = {"rooms": {}} - for row in rows: - room_entry = sessions["rooms"].setdefault(row["room_id"], {"sessions": {}}) - room_entry["sessions"][row["session_id"]] = { - "first_message_index": row["first_message_index"], - "forwarded_count": row["forwarded_count"], + for ( + room_id, + session_id, + first_message_index, + forwarded_count, + is_verified, + session_data, + ) in rows: + room_entry = sessions["rooms"].setdefault(room_id, {"sessions": {}}) + room_entry["sessions"][session_id] = { + "first_message_index": first_message_index, + "forwarded_count": forwarded_count, # is_verified must be returned to the client as a boolean - "is_verified": bool(row["is_verified"]), - "session_data": db_to_json(row["session_data"]), + "is_verified": bool(is_verified), + "session_data": db_to_json(session_data), } return sessions @@ -497,19 +506,26 @@ def _get_e2e_room_keys_version_info_txn(txn: LoggingTransaction) -> JsonDict: # it isn't there. raise StoreError(404, "No backup with that version exists") - result = self.db_pool.simple_select_one_txn( - txn, - table="e2e_room_keys_versions", - keyvalues={"user_id": user_id, "version": this_version, "deleted": 0}, - retcols=("version", "algorithm", "auth_data", "etag"), - allow_none=False, + row = cast( + Tuple[int, str, str, Optional[int]], + self.db_pool.simple_select_one_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={ + "user_id": user_id, + "version": this_version, + "deleted": 0, + }, + retcols=("version", "algorithm", "auth_data", "etag"), + allow_none=False, + ), ) - assert result is not None # see comment on `simple_select_one_txn` - result["auth_data"] = db_to_json(result["auth_data"]) - result["version"] = str(result["version"]) - if result["etag"] is None: - result["etag"] = 0 - return result + return { + "auth_data": db_to_json(row[2]), + "version": str(row[0]), + "algorithm": row[1], + "etag": 0 if row[3] is None else row[3], + } return await self.db_pool.runInteraction( "get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index f13d776b0d1a..9e98729330a3 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -24,6 +24,7 @@ Mapping, Optional, Sequence, + Set, Tuple, Union, cast, @@ -155,7 +156,6 @@ async def get_e2e_device_keys_for_federation_query( """ rows = await self.db_pool.execute( "get_e2e_device_keys_for_federation_query_check", - None, sql, now_stream_id, user_id, @@ -1111,7 +1111,7 @@ def get_device_stream_token(self) -> int: ... async def claim_e2e_one_time_keys( - self, query_list: Iterable[Tuple[str, str, str, int]] + self, query_list: Collection[Tuple[str, str, str, int]] ) -> Tuple[ Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] ]: @@ -1121,131 +1121,63 @@ async def claim_e2e_one_time_keys( query_list: An iterable of tuples of (user ID, device ID, algorithm). Returns: - A tuple pf: + A tuple (results, missing) of: A map of user ID -> a map device ID -> a map of key ID -> JSON. - A copy of the input which has not been fulfilled. + A copy of the input which has not been fulfilled. The returned counts + may be less than the input counts. In this case, the returned counts + are the number of claims that were not fulfilled. """ - - @trace - def _claim_e2e_one_time_key_simple( - txn: LoggingTransaction, - user_id: str, - device_id: str, - algorithm: str, - count: int, - ) -> List[Tuple[str, str]]: - """Claim OTK for device for DBs that don't support RETURNING. - - Returns: - A tuple of key name (algorithm + key ID) and key JSON, if an - OTK was found. - """ - - sql = """ - SELECT key_id, key_json FROM e2e_one_time_keys_json - WHERE user_id = ? AND device_id = ? AND algorithm = ? - LIMIT ? - """ - - txn.execute(sql, (user_id, device_id, algorithm, count)) - otk_rows = list(txn) - if not otk_rows: - return [] - - self.db_pool.simple_delete_many_txn( - txn, - table="e2e_one_time_keys_json", - column="key_id", - values=[otk_row[0] for otk_row in otk_rows], - keyvalues={ - "user_id": user_id, - "device_id": device_id, - "algorithm": algorithm, - }, - ) - self._invalidate_cache_and_stream( - txn, self.count_e2e_one_time_keys, (user_id, device_id) - ) - - return [ - (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows - ] - - @trace - def _claim_e2e_one_time_key_returning( - txn: LoggingTransaction, - user_id: str, - device_id: str, - algorithm: str, - count: int, - ) -> List[Tuple[str, str]]: - """Claim OTK for device for DBs that support RETURNING. - - Returns: - A tuple of key name (algorithm + key ID) and key JSON, if an - OTK was found. - """ - - # We can use RETURNING to do the fetch and DELETE in once step. - sql = """ - DELETE FROM e2e_one_time_keys_json - WHERE user_id = ? AND device_id = ? AND algorithm = ? - AND key_id IN ( - SELECT key_id FROM e2e_one_time_keys_json - WHERE user_id = ? AND device_id = ? AND algorithm = ? - LIMIT ? - ) - RETURNING key_id, key_json - """ - - txn.execute( - sql, - (user_id, device_id, algorithm, user_id, device_id, algorithm, count), - ) - otk_rows = list(txn) - if not otk_rows: - return [] - - self._invalidate_cache_and_stream( - txn, self.count_e2e_one_time_keys, (user_id, device_id) - ) - - return [ - (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows - ] - results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} missing: List[Tuple[str, str, str, int]] = [] - for user_id, device_id, algorithm, count in query_list: - if self.database_engine.supports_returning: - # If we support RETURNING clause we can use a single query that - # allows us to use autocommit mode. - _claim_e2e_one_time_key = _claim_e2e_one_time_key_returning - db_autocommit = True - else: - _claim_e2e_one_time_key = _claim_e2e_one_time_key_simple - db_autocommit = False - - claim_rows = await self.db_pool.runInteraction( + if isinstance(self.database_engine, PostgresEngine): + # If we can use execute_values we can use a single batch query + # in autocommit mode. + unfulfilled_claim_counts: Dict[Tuple[str, str, str], int] = {} + for user_id, device_id, algorithm, count in query_list: + unfulfilled_claim_counts[user_id, device_id, algorithm] = count + + bulk_claims = await self.db_pool.runInteraction( "claim_e2e_one_time_keys", - _claim_e2e_one_time_key, - user_id, - device_id, - algorithm, - count, - db_autocommit=db_autocommit, + self._claim_e2e_one_time_keys_bulk, + query_list, + db_autocommit=True, ) - if claim_rows: + + for user_id, device_id, algorithm, key_id, key_json in bulk_claims: device_results = results.setdefault(user_id, {}).setdefault( device_id, {} ) - for claim_row in claim_rows: - device_results[claim_row[0]] = json_decoder.decode(claim_row[1]) + device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json) + unfulfilled_claim_counts[(user_id, device_id, algorithm)] -= 1 + # Did we get enough OTKs? - count -= len(claim_rows) - if count: - missing.append((user_id, device_id, algorithm, count)) + missing = [ + (user, device, alg, count) + for (user, device, alg), count in unfulfilled_claim_counts.items() + if count > 0 + ] + else: + for user_id, device_id, algorithm, count in query_list: + claim_rows = await self.db_pool.runInteraction( + "claim_e2e_one_time_keys", + self._claim_e2e_one_time_key_simple, + user_id, + device_id, + algorithm, + count, + db_autocommit=False, + ) + if claim_rows: + device_results = results.setdefault(user_id, {}).setdefault( + device_id, {} + ) + for claim_row in claim_rows: + device_results[claim_row[0]] = json_decoder.decode(claim_row[1]) + # Did we get enough OTKs? + count -= len(claim_rows) + if count: + missing.append((user_id, device_id, algorithm, count)) return results, missing @@ -1261,6 +1193,63 @@ async def claim_e2e_fallback_keys( Returns: A map of user ID -> a map device ID -> a map of key ID -> JSON. """ + if isinstance(self.database_engine, PostgresEngine): + return await self.db_pool.runInteraction( + "_claim_e2e_fallback_keys_bulk", + self._claim_e2e_fallback_keys_bulk_txn, + query_list, + db_autocommit=True, + ) + # Use an UPDATE FROM... RETURNING combined with a VALUES block to do + # everything in one query. Note: this is also supported in SQLite 3.33.0, + # (see https://www.sqlite.org/lang_update.html#update_from), but we do not + # have an equivalent of psycopg2's execute_values to do this in one query. + else: + return await self._claim_e2e_fallback_keys_simple(query_list) + + def _claim_e2e_fallback_keys_bulk_txn( + self, + txn: LoggingTransaction, + query_list: Iterable[Tuple[str, str, str, bool]], + ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + """Efficient implementation of claim_e2e_fallback_keys for Postgres. + + Safe to autocommit: this is a single query. + """ + results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + + sql = """ + WITH claims(user_id, device_id, algorithm, mark_as_used) AS ( + VALUES ? + ) + UPDATE e2e_fallback_keys_json k + SET used = used OR mark_as_used + FROM claims + WHERE (k.user_id, k.device_id, k.algorithm) = (claims.user_id, claims.device_id, claims.algorithm) + RETURNING k.user_id, k.device_id, k.algorithm, k.key_id, k.key_json; + """ + claimed_keys = cast( + List[Tuple[str, str, str, str, str]], + txn.execute_values(sql, query_list), + ) + + seen_user_device: Set[Tuple[str, str]] = set() + for user_id, device_id, algorithm, key_id, key_json in claimed_keys: + device_results = results.setdefault(user_id, {}).setdefault(device_id, {}) + device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json) + seen_user_device.add((user_id, device_id)) + + self._invalidate_cache_and_stream_bulk( + txn, self.get_e2e_unused_fallback_key_types, seen_user_device + ) + + return results + + async def _claim_e2e_fallback_keys_simple( + self, + query_list: Iterable[Tuple[str, str, str, bool]], + ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + """Naive, inefficient implementation of claim_e2e_fallback_keys for SQLite.""" results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} for user_id, device_id, algorithm, mark_as_used in query_list: row = await self.db_pool.simple_select_one( @@ -1277,9 +1266,7 @@ async def claim_e2e_fallback_keys( if row is None: continue - key_id = row["key_id"] - key_json = row["key_json"] - used = row["used"] + key_id, key_json, used = row # Mark fallback key as used if not already. if not used and mark_as_used: @@ -1303,6 +1290,144 @@ async def claim_e2e_fallback_keys( return results + @trace + def _claim_e2e_one_time_key_simple( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + algorithm: str, + count: int, + ) -> List[Tuple[str, str]]: + """Claim OTK for device for DBs that don't support RETURNING. + + Returns: + A tuple of key name (algorithm + key ID) and key JSON, if an + OTK was found. + """ + + sql = """ + SELECT key_id, key_json FROM e2e_one_time_keys_json + WHERE user_id = ? AND device_id = ? AND algorithm = ? + LIMIT ? + """ + + txn.execute(sql, (user_id, device_id, algorithm, count)) + otk_rows = list(txn) + if not otk_rows: + return [] + + self.db_pool.simple_delete_many_txn( + txn, + table="e2e_one_time_keys_json", + column="key_id", + values=[otk_row[0] for otk_row in otk_rows], + keyvalues={ + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + }, + ) + self._invalidate_cache_and_stream( + txn, self.count_e2e_one_time_keys, (user_id, device_id) + ) + + return [(f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows] + + @trace + def _claim_e2e_one_time_keys_bulk( + self, + txn: LoggingTransaction, + query_list: Iterable[Tuple[str, str, str, int]], + ) -> List[Tuple[str, str, str, str, str]]: + """Bulk claim OTKs, for DBs that support DELETE FROM... RETURNING. + + Args: + query_list: Collection of tuples (user_id, device_id, algorithm, count) + as passed to claim_e2e_one_time_keys. + + Returns: + A list of tuples (user_id, device_id, algorithm, key_id, key_json) + for each OTK claimed. + """ + sql = """ + WITH claims(user_id, device_id, algorithm, claim_count) AS ( + VALUES ? + ), ranked_keys AS ( + SELECT + user_id, device_id, algorithm, key_id, claim_count, + ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r + FROM e2e_one_time_keys_json + JOIN claims USING (user_id, device_id, algorithm) + ) + DELETE FROM e2e_one_time_keys_json k + WHERE (user_id, device_id, algorithm, key_id) IN ( + SELECT user_id, device_id, algorithm, key_id + FROM ranked_keys + WHERE r <= claim_count + ) + RETURNING user_id, device_id, algorithm, key_id, key_json; + """ + otk_rows = cast( + List[Tuple[str, str, str, str, str]], txn.execute_values(sql, query_list) + ) + + seen_user_device = { + (user_id, device_id) for user_id, device_id, _, _, _ in otk_rows + } + self._invalidate_cache_and_stream_bulk( + txn, + self.count_e2e_one_time_keys, + seen_user_device, + ) + + return otk_rows + + async def get_master_cross_signing_key_updatable_before( + self, user_id: str + ) -> Tuple[bool, Optional[int]]: + """Get time before which a master cross-signing key may be replaced without UIA. + + (UIA means "User-Interactive Auth".) + + There are three cases to distinguish: + (1) No master cross-signing key. + (2) The key exists, but there is no replace-without-UI timestamp in the DB. + (3) The key exists, and has such a timestamp recorded. + + Returns: a 2-tuple of: + - a boolean: is there a master cross-signing key already? + - an optional timestamp, directly taken from the DB. + + In terms of the cases above, these are: + (1) (False, None). + (2) (True, None). + (3) (True, ). + + """ + + def impl(txn: LoggingTransaction) -> Tuple[bool, Optional[int]]: + # We want to distinguish between three cases: + txn.execute( + """ + SELECT updatable_without_uia_before_ms + FROM e2e_cross_signing_keys + WHERE user_id = ? AND keytype = 'master' + ORDER BY stream_id DESC + LIMIT 1 + """, + (user_id,), + ) + row = cast(Optional[Tuple[Optional[int]]], txn.fetchone()) + if row is None: + return False, None + return True, row[0] + + return await self.db_pool.runInteraction( + "e2e_cross_signing_keys", + impl, + ) + class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def __init__( @@ -1550,3 +1675,42 @@ async def store_e2e_cross_signing_signatures( ], desc="add_e2e_signing_key", ) + + async def allow_master_cross_signing_key_replacement_without_uia( + self, user_id: str, duration_ms: int + ) -> Optional[int]: + """Mark this user's latest master key as being replaceable without UIA. + + Said replacement will only be permitted for a short time after calling this + function. That time period is controlled by the duration argument. + + Returns: + None, if there is no such key. + Otherwise, the timestamp before which replacement is allowed without UIA. + """ + timestamp = self._clock.time_msec() + duration_ms + + def impl(txn: LoggingTransaction) -> Optional[int]: + txn.execute( + """ + UPDATE e2e_cross_signing_keys + SET updatable_without_uia_before_ms = ? + WHERE stream_id = ( + SELECT stream_id + FROM e2e_cross_signing_keys + WHERE user_id = ? AND keytype = 'master' + ORDER BY stream_id DESC + LIMIT 1 + ) + """, + (timestamp, user_id), + ) + if txn.rowcount == 0: + return None + + return timestamp + + return await self.db_pool.runInteraction( + "allow_master_cross_signing_key_replacement_without_uia", + impl, + ) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 4f80ce75ccb4..d5fb6ecc73bc 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -193,7 +193,8 @@ async def get_auth_chain_ids( # Check if we have indexed the room so we can use the chain cover # algorithm. room = await self.get_room(room_id) # type: ignore[attr-defined] - if room["has_auth_chain_index"]: + # If the room has an auth chain index. + if room[1]: try: return await self.db_pool.runInteraction( "get_auth_chain_ids_chains", @@ -300,6 +301,11 @@ def _get_auth_chain_ids_using_cover_index_txn( # Add the initial set of chains, excluding the sequence corresponding to # initial event. for chain_id, seq_no in event_chains.items(): + # Check if the initial event is the first item in the chain. If so, then + # there is nothing new to add from this chain. + if seq_no == 1: + continue + chains[chain_id] = max(seq_no - 1, chains.get(chain_id, 0)) # Now for each chain we figure out the maximum sequence number reachable @@ -411,7 +417,8 @@ async def get_auth_chain_difference( # Check if we have indexed the room so we can use the chain cover # algorithm. room = await self.get_room(room_id) # type: ignore[attr-defined] - if room["has_auth_chain_index"]: + # If the room has an auth chain index. + if room[1]: try: return await self.db_pool.runInteraction( "get_auth_chain_difference_chains", @@ -1437,24 +1444,18 @@ def _get_backfill_events( ) if event_lookup_result is not None: + event_type, depth, stream_ordering = event_lookup_result logger.debug( "_get_backfill_events(room_id=%s): seed_event_id=%s depth=%s stream_ordering=%s type=%s", room_id, seed_event_id, - event_lookup_result["depth"], - event_lookup_result["stream_ordering"], - event_lookup_result["type"], + depth, + stream_ordering, + event_type, ) - if event_lookup_result["depth"]: - queue.put( - ( - -event_lookup_result["depth"], - -event_lookup_result["stream_ordering"], - seed_event_id, - event_lookup_result["type"], - ) - ) + if depth: + queue.put((-depth, -stream_ordering, seed_event_id, event_type)) while not queue.empty() and len(event_id_results) < limit: try: @@ -1898,21 +1899,23 @@ async def prune_staged_events_in_room( # keeping only the forward extremities (i.e. the events not referenced # by other events in the queue). We do this so that we can always # backpaginate in all the events we have dropped. - rows = await self.db_pool.simple_select_list( - table="federation_inbound_events_staging", - keyvalues={"room_id": room_id}, - retcols=("event_id", "event_json"), - desc="prune_staged_events_in_room_fetch", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="federation_inbound_events_staging", + keyvalues={"room_id": room_id}, + retcols=("event_id", "event_json"), + desc="prune_staged_events_in_room_fetch", + ), ) # Find the set of events referenced by those in the queue, as well as # collecting all the event IDs in the queue. referenced_events: Set[str] = set() seen_events: Set[str] = set() - for row in rows: - event_id = row["event_id"] + for event_id, event_json in rows: seen_events.add(event_id) - event_d = db_to_json(row["event_json"]) + event_d = db_to_json(event_json) # We don't bother parsing the dicts into full blown event objects, # as that is needlessly expensive. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 39556481ffc9..dd8957680ad2 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -311,6 +311,14 @@ def __init__( self._background_drop_null_thread_id_indexes, ) + # Add a room ID index to speed up room deletion + self.db_pool.updates.register_background_index_update( + "event_push_summary_index_room_id", + index_name="event_push_summary_index_room_id", + table="event_push_summary", + columns=["room_id"], + ) + async def _background_drop_null_thread_id_indexes( self, progress: JsonDict, batch_size: int ) -> int: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 3c1492e3ad28..5207cc0f4e80 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -79,7 +79,7 @@ class DeltaState: Attributes: to_delete: List of type/state_keys to delete from current state to_insert: Map of state to upsert into current state - no_longer_in_room: The server is not longer in the room, so the room + no_longer_in_room: The server is no longer in the room, so the room should e.g. be removed from `current_state_events` table. """ @@ -131,22 +131,25 @@ def __init__( @trace async def _persist_events_and_state_updates( self, + room_id: str, events_and_contexts: List[Tuple[EventBase, EventContext]], *, - state_delta_for_room: Dict[str, DeltaState], - new_forward_extremities: Dict[str, Set[str]], + state_delta_for_room: Optional[DeltaState], + new_forward_extremities: Optional[Set[str]], use_negative_stream_ordering: bool = False, inhibit_local_membership_updates: bool = False, ) -> None: """Persist a set of events alongside updates to the current state and - forward extremities tables. + forward extremities tables. + + Assumes that we are only persisting events for one room at a time. Args: + room_id: events_and_contexts: - state_delta_for_room: Map from room_id to the delta to apply to - room state - new_forward_extremities: Map from room_id to set of event IDs - that are the new forward extremities of the room. + state_delta_for_room: The delta to apply to the room state + new_forward_extremities: A set of event IDs that are the new forward + extremities of the room. use_negative_stream_ordering: Whether to start stream_ordering on the negative side and decrement. This should be set as True for backfilled events because backfilled events get a negative @@ -196,6 +199,7 @@ async def _persist_events_and_state_updates( await self.db_pool.runInteraction( "persist_events", self._persist_events_txn, + room_id=room_id, events_and_contexts=events_and_contexts, inhibit_local_membership_updates=inhibit_local_membership_updates, state_delta_for_room=state_delta_for_room, @@ -221,9 +225,9 @@ async def _persist_events_and_state_updates( event_counter.labels(event.type, origin_type, origin_entity).inc() - for room_id, latest_event_ids in new_forward_extremities.items(): + if new_forward_extremities: self.store.get_latest_event_ids_in_room.prefill( - (room_id,), frozenset(latest_event_ids) + (room_id,), frozenset(new_forward_extremities) ) async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]: @@ -336,10 +340,11 @@ def _persist_events_txn( self, txn: LoggingTransaction, *, + room_id: str, events_and_contexts: List[Tuple[EventBase, EventContext]], inhibit_local_membership_updates: bool, - state_delta_for_room: Dict[str, DeltaState], - new_forward_extremities: Dict[str, Set[str]], + state_delta_for_room: Optional[DeltaState], + new_forward_extremities: Optional[Set[str]], ) -> None: """Insert some number of room events into the necessary database tables. @@ -347,8 +352,11 @@ def _persist_events_txn( and the rejections table. Things reading from those table will need to check whether the event was rejected. + Assumes that we are only persisting events for one room at a time. + Args: txn + room_id: The room the events are from events_and_contexts: events to persist inhibit_local_membership_updates: Stop the local_current_membership from being updated by these events. This should be set to True @@ -357,10 +365,9 @@ def _persist_events_txn( delete_existing True to purge existing table rows for the events from the database. This is useful when retrying due to IntegrityError. - state_delta_for_room: The current-state delta for each room. - new_forward_extremities: The new forward extremities for each room. - For each room, a list of the event ids which are the forward - extremities. + state_delta_for_room: The current-state delta for the room. + new_forward_extremities: The new forward extremities for the room: + a set of the event ids which are the forward extremities. Raises: PartialStateConflictError: if attempting to persist a partial state event in @@ -376,14 +383,13 @@ def _persist_events_txn( # # Annoyingly SQLite doesn't support row level locking. if isinstance(self.database_engine, PostgresEngine): - for room_id in {e.room_id for e, _ in events_and_contexts}: - txn.execute( - "SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE", - (room_id,), - ) - row = txn.fetchone() - if row is None: - raise Exception(f"Room does not exist {room_id}") + txn.execute( + "SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE", + (room_id,), + ) + row = txn.fetchone() + if row is None: + raise Exception(f"Room does not exist {room_id}") # stream orderings should have been assigned by now assert min_stream_order @@ -419,7 +425,9 @@ def _persist_events_txn( events_and_contexts ) - self._update_room_depths_txn(txn, events_and_contexts=events_and_contexts) + self._update_room_depths_txn( + txn, room_id, events_and_contexts=events_and_contexts + ) # _update_outliers_txn filters out any events which have already been # persisted, and returns the filtered list. @@ -432,11 +440,13 @@ def _persist_events_txn( self._store_event_txn(txn, events_and_contexts=events_and_contexts) - self._update_forward_extremities_txn( - txn, - new_forward_extremities=new_forward_extremities, - max_stream_order=max_stream_order, - ) + if new_forward_extremities: + self._update_forward_extremities_txn( + txn, + room_id, + new_forward_extremities=new_forward_extremities, + max_stream_order=max_stream_order, + ) self._persist_transaction_ids_txn(txn, events_and_contexts) @@ -464,7 +474,10 @@ def _persist_events_txn( # We call this last as it assumes we've inserted the events into # room_memberships, where applicable. # NB: This function invalidates all state related caches - self._update_current_state_txn(txn, state_delta_for_room, min_stream_order) + if state_delta_for_room: + self._update_current_state_txn( + txn, room_id, state_delta_for_room, min_stream_order + ) def _persist_event_auth_chain_txn( self, @@ -1026,74 +1039,75 @@ async def update_current_state( await self.db_pool.runInteraction( "update_current_state", self._update_current_state_txn, - state_delta_by_room={room_id: state_delta}, + room_id, + delta_state=state_delta, stream_id=stream_ordering, ) def _update_current_state_txn( self, txn: LoggingTransaction, - state_delta_by_room: Dict[str, DeltaState], + room_id: str, + delta_state: DeltaState, stream_id: int, ) -> None: - for room_id, delta_state in state_delta_by_room.items(): - to_delete = delta_state.to_delete - to_insert = delta_state.to_insert - - # Figure out the changes of membership to invalidate the - # `get_rooms_for_user` cache. - # We find out which membership events we may have deleted - # and which we have added, then we invalidate the caches for all - # those users. - members_changed = { - state_key - for ev_type, state_key in itertools.chain(to_delete, to_insert) - if ev_type == EventTypes.Member - } + to_delete = delta_state.to_delete + to_insert = delta_state.to_insert + + # Figure out the changes of membership to invalidate the + # `get_rooms_for_user` cache. + # We find out which membership events we may have deleted + # and which we have added, then we invalidate the caches for all + # those users. + members_changed = { + state_key + for ev_type, state_key in itertools.chain(to_delete, to_insert) + if ev_type == EventTypes.Member + } - if delta_state.no_longer_in_room: - # Server is no longer in the room so we delete the room from - # current_state_events, being careful we've already updated the - # rooms.room_version column (which gets populated in a - # background task). - self._upsert_room_version_txn(txn, room_id) + if delta_state.no_longer_in_room: + # Server is no longer in the room so we delete the room from + # current_state_events, being careful we've already updated the + # rooms.room_version column (which gets populated in a + # background task). + self._upsert_room_version_txn(txn, room_id) - # Before deleting we populate the current_state_delta_stream - # so that async background tasks get told what happened. - sql = """ + # Before deleting we populate the current_state_delta_stream + # so that async background tasks get told what happened. + sql = """ INSERT INTO current_state_delta_stream (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id) SELECT ?, ?, room_id, type, state_key, null, event_id FROM current_state_events WHERE room_id = ? """ - txn.execute(sql, (stream_id, self._instance_name, room_id)) + txn.execute(sql, (stream_id, self._instance_name, room_id)) - # We also want to invalidate the membership caches for users - # that were in the room. - users_in_room = self.store.get_users_in_room_txn(txn, room_id) - members_changed.update(users_in_room) + # We also want to invalidate the membership caches for users + # that were in the room. + users_in_room = self.store.get_users_in_room_txn(txn, room_id) + members_changed.update(users_in_room) - self.db_pool.simple_delete_txn( - txn, - table="current_state_events", - keyvalues={"room_id": room_id}, - ) - else: - # We're still in the room, so we update the current state as normal. + self.db_pool.simple_delete_txn( + txn, + table="current_state_events", + keyvalues={"room_id": room_id}, + ) + else: + # We're still in the room, so we update the current state as normal. - # First we add entries to the current_state_delta_stream. We - # do this before updating the current_state_events table so - # that we can use it to calculate the `prev_event_id`. (This - # allows us to not have to pull out the existing state - # unnecessarily). - # - # The stream_id for the update is chosen to be the minimum of the stream_ids - # for the batch of the events that we are persisting; that means we do not - # end up in a situation where workers see events before the - # current_state_delta updates. - # - sql = """ + # First we add entries to the current_state_delta_stream. We + # do this before updating the current_state_events table so + # that we can use it to calculate the `prev_event_id`. (This + # allows us to not have to pull out the existing state + # unnecessarily). + # + # The stream_id for the update is chosen to be the minimum of the stream_ids + # for the batch of the events that we are persisting; that means we do not + # end up in a situation where workers see events before the + # current_state_delta updates. + # + sql = """ INSERT INTO current_state_delta_stream (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id) SELECT ?, ?, ?, ?, ?, ?, ( @@ -1101,39 +1115,39 @@ def _update_current_state_txn( WHERE room_id = ? AND type = ? AND state_key = ? ) """ - txn.execute_batch( - sql, + txn.execute_batch( + sql, + ( ( - ( - stream_id, - self._instance_name, - room_id, - etype, - state_key, - to_insert.get((etype, state_key)), - room_id, - etype, - state_key, - ) - for etype, state_key in itertools.chain(to_delete, to_insert) - ), - ) - # Now we actually update the current_state_events table + stream_id, + self._instance_name, + room_id, + etype, + state_key, + to_insert.get((etype, state_key)), + room_id, + etype, + state_key, + ) + for etype, state_key in itertools.chain(to_delete, to_insert) + ), + ) + # Now we actually update the current_state_events table - txn.execute_batch( - "DELETE FROM current_state_events" - " WHERE room_id = ? AND type = ? AND state_key = ?", - ( - (room_id, etype, state_key) - for etype, state_key in itertools.chain(to_delete, to_insert) - ), - ) + txn.execute_batch( + "DELETE FROM current_state_events" + " WHERE room_id = ? AND type = ? AND state_key = ?", + ( + (room_id, etype, state_key) + for etype, state_key in itertools.chain(to_delete, to_insert) + ), + ) - # We include the membership in the current state table, hence we do - # a lookup when we insert. This assumes that all events have already - # been inserted into room_memberships. - txn.execute_batch( - """INSERT INTO current_state_events + # We include the membership in the current state table, hence we do + # a lookup when we insert. This assumes that all events have already + # been inserted into room_memberships. + txn.execute_batch( + """INSERT INTO current_state_events (room_id, type, state_key, event_id, membership, event_stream_ordering) VALUES ( ?, ?, ?, ?, @@ -1141,34 +1155,34 @@ def _update_current_state_txn( (SELECT stream_ordering FROM events WHERE event_id = ?) ) """, - [ - (room_id, key[0], key[1], ev_id, ev_id, ev_id) - for key, ev_id in to_insert.items() - ], - ) + [ + (room_id, key[0], key[1], ev_id, ev_id, ev_id) + for key, ev_id in to_insert.items() + ], + ) - # We now update `local_current_membership`. We do this regardless - # of whether we're still in the room or not to handle the case where - # e.g. we just got banned (where we need to record that fact here). - - # Note: Do we really want to delete rows here (that we do not - # subsequently reinsert below)? While technically correct it means - # we have no record of the fact the user *was* a member of the - # room but got, say, state reset out of it. - if to_delete or to_insert: - txn.execute_batch( - "DELETE FROM local_current_membership" - " WHERE room_id = ? AND user_id = ?", - ( - (room_id, state_key) - for etype, state_key in itertools.chain(to_delete, to_insert) - if etype == EventTypes.Member and self.is_mine_id(state_key) - ), - ) + # We now update `local_current_membership`. We do this regardless + # of whether we're still in the room or not to handle the case where + # e.g. we just got banned (where we need to record that fact here). + + # Note: Do we really want to delete rows here (that we do not + # subsequently reinsert below)? While technically correct it means + # we have no record of the fact the user *was* a member of the + # room but got, say, state reset out of it. + if to_delete or to_insert: + txn.execute_batch( + "DELETE FROM local_current_membership" + " WHERE room_id = ? AND user_id = ?", + ( + (room_id, state_key) + for etype, state_key in itertools.chain(to_delete, to_insert) + if etype == EventTypes.Member and self.is_mine_id(state_key) + ), + ) - if to_insert: - txn.execute_batch( - """INSERT INTO local_current_membership + if to_insert: + txn.execute_batch( + """INSERT INTO local_current_membership (room_id, user_id, event_id, membership, event_stream_ordering) VALUES ( ?, ?, ?, @@ -1176,29 +1190,27 @@ def _update_current_state_txn( (SELECT stream_ordering FROM events WHERE event_id = ?) ) """, - [ - (room_id, key[1], ev_id, ev_id, ev_id) - for key, ev_id in to_insert.items() - if key[0] == EventTypes.Member and self.is_mine_id(key[1]) - ], - ) - - txn.call_after( - self.store._curr_state_delta_stream_cache.entity_has_changed, - room_id, - stream_id, + [ + (room_id, key[1], ev_id, ev_id, ev_id) + for key, ev_id in to_insert.items() + if key[0] == EventTypes.Member and self.is_mine_id(key[1]) + ], ) - # Invalidate the various caches - self.store._invalidate_state_caches_and_stream( - txn, room_id, members_changed - ) + txn.call_after( + self.store._curr_state_delta_stream_cache.entity_has_changed, + room_id, + stream_id, + ) - # Check if any of the remote membership changes requires us to - # unsubscribe from their device lists. - self.store.handle_potentially_left_users_txn( - txn, {m for m in members_changed if not self.hs.is_mine_id(m)} - ) + # Invalidate the various caches + self.store._invalidate_state_caches_and_stream(txn, room_id, members_changed) + + # Check if any of the remote membership changes requires us to + # unsubscribe from their device lists. + self.store.handle_potentially_left_users_txn( + txn, {m for m in members_changed if not self.hs.is_mine_id(m)} + ) def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None: """Update the room version in the database based off current state @@ -1232,23 +1244,19 @@ def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> Non def _update_forward_extremities_txn( self, txn: LoggingTransaction, - new_forward_extremities: Dict[str, Set[str]], + room_id: str, + new_forward_extremities: Set[str], max_stream_order: int, ) -> None: - for room_id in new_forward_extremities.keys(): - self.db_pool.simple_delete_txn( - txn, table="event_forward_extremities", keyvalues={"room_id": room_id} - ) + self.db_pool.simple_delete_txn( + txn, table="event_forward_extremities", keyvalues={"room_id": room_id} + ) self.db_pool.simple_insert_many_txn( txn, table="event_forward_extremities", keys=("event_id", "room_id"), - values=[ - (ev_id, room_id) - for room_id, new_extrem in new_forward_extremities.items() - for ev_id in new_extrem - ], + values=[(ev_id, room_id) for ev_id in new_forward_extremities], ) # We now insert into stream_ordering_to_exterm a mapping from room_id, # new stream_ordering to new forward extremeties in the room. @@ -1260,8 +1268,7 @@ def _update_forward_extremities_txn( keys=("room_id", "event_id", "stream_ordering"), values=[ (room_id, event_id, max_stream_order) - for room_id, new_extrem in new_forward_extremities.items() - for event_id in new_extrem + for event_id in new_forward_extremities ], ) @@ -1298,36 +1305,45 @@ def _filter_events_and_contexts_for_duplicates( def _update_room_depths_txn( self, txn: LoggingTransaction, + room_id: str, events_and_contexts: List[Tuple[EventBase, EventContext]], ) -> None: """Update min_depth for each room Args: txn: db connection + room_id: The room ID events_and_contexts: events we are persisting """ - depth_updates: Dict[str, int] = {} + stream_ordering: Optional[int] = None + depth_update = 0 for event, context in events_and_contexts: - # Then update the `stream_ordering` position to mark the latest - # event as the front of the room. This should not be done for - # backfilled events because backfilled events have negative - # stream_ordering and happened in the past so we know that we don't - # need to update the stream_ordering tip/front for the room. + # Don't update the stream ordering for backfilled events because + # backfilled events have negative stream_ordering and happened in the + # past, so we know that we don't need to update the stream_ordering + # tip/front for the room. assert event.internal_metadata.stream_ordering is not None if event.internal_metadata.stream_ordering >= 0: - txn.call_after( - self.store._events_stream_cache.entity_has_changed, - event.room_id, - event.internal_metadata.stream_ordering, - ) + if stream_ordering is None: + stream_ordering = event.internal_metadata.stream_ordering + else: + stream_ordering = max( + stream_ordering, event.internal_metadata.stream_ordering + ) if not event.internal_metadata.is_outlier() and not context.rejected: - depth_updates[event.room_id] = max( - event.depth, depth_updates.get(event.room_id, event.depth) - ) + depth_update = max(event.depth, depth_update) - for room_id, depth in depth_updates.items(): - self._update_min_depth_for_room_txn(txn, room_id, depth) + # Then update the `stream_ordering` position to mark the latest event as + # the front of the room. + if stream_ordering is not None: + txn.call_after( + self.store._events_stream_cache.entity_has_changed, + room_id, + stream_ordering, + ) + + self._update_min_depth_for_room_txn(txn, room_id, depth_update) def _update_outliers_txn( self, @@ -1350,13 +1366,19 @@ def _update_outliers_txn( PartialStateConflictError: if attempting to persist a partial state event in a room that has been un-partial stated. """ - txn.execute( - "SELECT event_id, outlier FROM events WHERE event_id in (%s)" - % (",".join(["?"] * len(events_and_contexts)),), - [event.event_id for event, _ in events_and_contexts], + rows = cast( + List[Tuple[str, bool]], + self.db_pool.simple_select_many_txn( + txn, + "events", + "event_id", + [event.event_id for event, _ in events_and_contexts], + keyvalues={}, + retcols=("event_id", "outlier"), + ), ) - have_persisted = dict(cast(Iterable[Tuple[str, bool]], txn)) + have_persisted = dict(rows) logger.debug( "_update_outliers_txn: events=%s have_persisted=%s", @@ -1454,7 +1476,7 @@ def event_dict(event: EventBase) -> JsonDict: txn, table="event_json", keys=("event_id", "room_id", "internal_metadata", "json", "format_version"), - values=( + values=[ ( event.event_id, event.room_id, @@ -1463,7 +1485,7 @@ def event_dict(event: EventBase) -> JsonDict: event.format_version, ) for event, _ in events_and_contexts - ), + ], ) self.db_pool.simple_insert_many_txn( @@ -1486,7 +1508,7 @@ def event_dict(event: EventBase) -> JsonDict: "state_key", "rejection_reason", ), - values=( + values=[ ( self._instance_name, event.internal_metadata.stream_ordering, @@ -1505,7 +1527,7 @@ def event_dict(event: EventBase) -> JsonDict: context.rejected, ) for event, context in events_and_contexts - ), + ], ) # If we're persisting an unredacted event we go and ensure @@ -1528,11 +1550,11 @@ def event_dict(event: EventBase) -> JsonDict: txn, table="state_events", keys=("event_id", "room_id", "type", "state_key"), - values=( + values=[ (event.event_id, event.room_id, event.type, event.state_key) for event, _ in events_and_contexts if event.is_state() - ), + ], ) def _store_rejected_events_txn( @@ -1912,8 +1934,7 @@ def _handle_redact_relations( if row is None: return - redacted_relates_to = row["relates_to_id"] - rel_type = row["relation_type"] + redacted_relates_to, rel_type = row self.db_pool.simple_delete_txn( txn, table="event_relations", keyvalues={"event_id": redacted_event_id} ) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index c5fce1c82b12..0c91f19c8e0d 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -425,7 +425,7 @@ async def _cleanup_extremities_bg_update( """Background update to clean out extremities that should have been deleted previously. - Mainly used to deal with the aftermath of #5269. + Mainly used to deal with the aftermath of https://github.com/matrix-org/synapse/issues/5269. """ # This works by first copying all existing forward extremities into the @@ -558,7 +558,7 @@ def _cleanup_extremities_bg_update_txn(txn: LoggingTransaction) -> int: ) logger.info( - "Deleted %d forward extremities of %d checked, to clean up #5269", + "Deleted %d forward extremities of %d checked, to clean up matrix-org/synapse#5269", deleted, len(original_set), ) @@ -1222,14 +1222,13 @@ def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int: ) # Iterate the parent IDs and invalidate caches. - for parent_id in {r[1] for r in relations_to_insert}: - cache_tuple = (parent_id,) - self._invalidate_cache_and_stream( # type: ignore[attr-defined] - txn, self.get_relations_for_event, cache_tuple # type: ignore[attr-defined] - ) - self._invalidate_cache_and_stream( # type: ignore[attr-defined] - txn, self.get_thread_summary, cache_tuple # type: ignore[attr-defined] - ) + cache_tuples = {(r[1],) for r in relations_to_insert} + self._invalidate_cache_and_stream_bulk( # type: ignore[attr-defined] + txn, self.get_relations_for_event, cache_tuples # type: ignore[attr-defined] + ) + self._invalidate_cache_and_stream_bulk( # type: ignore[attr-defined] + txn, self.get_thread_summary, cache_tuples # type: ignore[attr-defined] + ) if results: latest_event_id = results[-1][0] @@ -1310,12 +1309,9 @@ def process(txn: Cursor) -> None: # ANALYZE the new column to build stats on it, to encourage PostgreSQL to use the # indexes on it. - # We need to pass execute a dummy function to handle the txn's result otherwise - # it tries to call fetchall() on it and fails because there's no result to fetch. - await self.db_pool.execute( + await self.db_pool.runInteraction( "background_analyze_new_stream_ordering_column", - lambda txn: None, - "ANALYZE events(stream_ordering2)", + lambda txn: txn.execute("ANALYZE events(stream_ordering2)"), ) await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/events_forward_extremities.py b/synapse/storage/databases/main/events_forward_extremities.py index f851bff60451..0ba84b146975 100644 --- a/synapse/storage/databases/main/events_forward_extremities.py +++ b/synapse/storage/databases/main/events_forward_extremities.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import Any, Dict, List +from typing import List, Optional, Tuple, cast from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction @@ -91,12 +91,17 @@ def delete_forward_extremities_for_room_txn(txn: LoggingTransaction) -> int: async def get_forward_extremities_for_room( self, room_id: str - ) -> List[Dict[str, Any]]: - """Get list of forward extremities for a room.""" + ) -> List[Tuple[str, int, int, Optional[int]]]: + """ + Get list of forward extremities for a room. + + Returns: + A list of tuples of event_id, state_group, depth, and received_ts. + """ def get_forward_extremities_for_room_txn( txn: LoggingTransaction, - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, int, int, Optional[int]]]: sql = """ SELECT event_id, state_group, depth, received_ts FROM event_forward_extremities @@ -106,7 +111,7 @@ def get_forward_extremities_for_room_txn( """ txn.execute(sql, (room_id,)) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, int, int, Optional[int]]], txn.fetchall()) return await self.db_pool.runInteraction( "get_forward_extremities_for_room", diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 8af638d60f3c..41250590612d 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1312,7 +1312,8 @@ async def _fetch_event_ids_and_get_outstanding_redactions( room_version: Optional[RoomVersion] if not room_version_id: # this should only happen for out-of-band membership events which - # arrived before #6983 landed. For all other events, we should have + # arrived before https://github.com/matrix-org/synapse/issues/6983 + # landed. For all other events, we should have # an entry in the 'rooms' table. # # However, the 'out_of_band_membership' flag is unreliable for older @@ -1323,7 +1324,8 @@ async def _fetch_event_ids_and_get_outstanding_redactions( "Room %s for event %s is unknown" % (d["room_id"], event_id) ) - # so, assuming this is an out-of-band-invite that arrived before #6983 + # so, assuming this is an out-of-band-invite that arrived before + # https://github.com/matrix-org/synapse/issues/6983 # landed, we know that the room version must be v5 or earlier (because # v6 hadn't been invented at that point, so invites from such rooms # would have been rejected.) @@ -1998,7 +2000,7 @@ async def get_event_ordering(self, event_id: str) -> Tuple[int, int]: if not res: raise SynapseError(404, "Could not find event %s" % (event_id,)) - return int(res["topological_ordering"]), int(res["stream_ordering"]) + return int(res[0]), int(res[1]) async def get_next_event_to_expire(self) -> Optional[Tuple[str, int]]: """Retrieve the entry with the lowest expiry timestamp in the event_expiry @@ -2095,12 +2097,6 @@ async def _cleanup_old_transaction_ids(self) -> None: def _cleanup_old_transaction_ids_txn(txn: LoggingTransaction) -> None: one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000 - sql = """ - DELETE FROM event_txn_id - WHERE inserted_ts < ? - """ - txn.execute(sql, (one_day_ago,)) - sql = """ DELETE FROM event_txn_id_device_id WHERE inserted_ts < ? diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py index 654f924019a3..60621edeefca 100644 --- a/synapse/storage/databases/main/experimental_features.py +++ b/synapse/storage/databases/main/experimental_features.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict, FrozenSet +from typing import TYPE_CHECKING, Dict, FrozenSet, List, Tuple, cast from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main import CacheInvalidationWorkerStore @@ -42,13 +42,16 @@ async def list_enabled_features(self, user_id: str) -> FrozenSet[str]: Returns: the features currently enabled for the user """ - enabled = await self.db_pool.simple_select_list( - "per_user_experimental_features", - {"user_id": user_id, "enabled": True}, - ["feature"], + enabled = cast( + List[Tuple[str]], + await self.db_pool.simple_select_list( + table="per_user_experimental_features", + keyvalues={"user_id": user_id, "enabled": True}, + retcols=("feature",), + ), ) - return frozenset(feature["feature"] for feature in enabled) + return frozenset(feature[0] for feature in enabled) async def set_features_for_user( self, diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index ea797864b9fd..c700872fdc59 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -107,13 +107,16 @@ def store_server_keys_response_txn(txn: LoggingTransaction) -> None: # invalidate takes a tuple corresponding to the params of # _get_server_keys_json. _get_server_keys_json only takes one # param, which is itself the 2-tuple (server_name, key_id). - for key_id in verify_keys: - self._invalidate_cache_and_stream( - txn, self._get_server_keys_json, ((server_name, key_id),) - ) - self._invalidate_cache_and_stream( - txn, self.get_server_key_json_for_remote, (server_name, key_id) - ) + self._invalidate_cache_and_stream_bulk( + txn, + self._get_server_keys_json, + [((server_name, key_id),) for key_id in verify_keys], + ) + self._invalidate_cache_and_stream_bulk( + txn, + self.get_server_key_json_for_remote, + [(server_name, key_id) for key_id in verify_keys], + ) await self.db_pool.runInteraction( "store_server_keys_response", store_server_keys_response_txn @@ -248,17 +251,20 @@ async def get_all_server_keys_json_for_remote( If we have multiple entries for a given key ID, returns the most recent. """ - rows = await self.db_pool.simple_select_list( - table="server_keys_json", - keyvalues={"server_name": server_name}, - retcols=( - "key_id", - "from_server", - "ts_added_ms", - "ts_valid_until_ms", - "key_json", + rows = cast( + List[Tuple[str, str, int, int, Union[bytes, memoryview]]], + await self.db_pool.simple_select_list( + table="server_keys_json", + keyvalues={"server_name": server_name}, + retcols=( + "key_id", + "from_server", + "ts_added_ms", + "ts_valid_until_ms", + "key_json", + ), + desc="get_server_keys_json_for_remote", ), - desc="get_server_keys_json_for_remote", ) if not rows: @@ -266,14 +272,14 @@ async def get_all_server_keys_json_for_remote( # We sort the rows by ts_added_ms so that the most recently added entry # will stomp over older entries in the dictionary. - rows.sort(key=lambda r: r["ts_added_ms"]) + rows.sort(key=lambda r: r[2]) return { - row["key_id"]: FetchKeyResultForRemote( + key_id: FetchKeyResultForRemote( # Cast to bytes since postgresql returns a memoryview. - key_json=bytes(row["key_json"]), - valid_until_ts=row["ts_valid_until_ms"], - added_ts=row["ts_added_ms"], + key_json=bytes(key_json), + valid_until_ts=ts_valid_until_ms, + added_ts=ts_added_ms, ) - for row in rows + for key_id, from_server, ts_added_ms, ts_valid_until_ms, key_json in rows } diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 2e6b176bd2c2..149135b8b5fe 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -15,9 +15,7 @@ from enum import Enum from typing import ( TYPE_CHECKING, - Any, Collection, - Dict, Iterable, List, Optional, @@ -26,6 +24,8 @@ cast, ) +import attr + from synapse.api.constants import Direction from synapse.logging.opentracing import trace from synapse.media._base import ThumbnailInfo @@ -45,6 +45,40 @@ ) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class LocalMedia: + media_id: str + media_type: str + media_length: Optional[int] + upload_name: str + created_ts: int + url_cache: Optional[str] + last_access_ts: int + quarantined_by: Optional[str] + safe_from_quarantine: bool + user_id: Optional[str] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RemoteMedia: + media_origin: str + media_id: str + media_type: str + media_length: int + upload_name: Optional[str] + filesystem_id: str + created_ts: int + last_access_ts: int + quarantined_by: Optional[str] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class UrlCache: + response_code: int + expires_ts: int + og: Union[str, bytes] + + class MediaSortOrder(Enum): """ Enum to define the sorting method used when returning media with @@ -116,6 +150,13 @@ def __init__( self._drop_media_index_without_method, ) + if hs.config.media.can_load_media_repo: + self.unused_expiration_time: Optional[ + int + ] = hs.config.media.unused_expiration_time + else: + self.unused_expiration_time = None + async def _drop_media_index_without_method( self, progress: JsonDict, batch_size: int ) -> int: @@ -151,13 +192,13 @@ def __init__( super().__init__(database, db_conn, hs) self.server_name: str = hs.hostname - async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]: + async def get_local_media(self, media_id: str) -> Optional[LocalMedia]: """Get the metadata for a local piece of media Returns: None if the media_id doesn't exist. """ - return await self.db_pool.simple_select_one( + row = await self.db_pool.simple_select_one( "local_media_repository", {"media_id": media_id}, ( @@ -167,11 +208,27 @@ async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]: "created_ts", "quarantined_by", "url_cache", + "last_access_ts", "safe_from_quarantine", + "user_id", ), allow_none=True, desc="get_local_media", ) + if row is None: + return None + return LocalMedia( + media_id=media_id, + media_type=row[0], + media_length=row[1], + upload_name=row[2], + created_ts=row[3], + quarantined_by=row[4], + url_cache=row[5], + last_access_ts=row[6], + safe_from_quarantine=row[7], + user_id=row[8], + ) async def get_local_media_by_user_paginate( self, @@ -180,7 +237,7 @@ async def get_local_media_by_user_paginate( user_id: str, order_by: str = MediaSortOrder.CREATED_TS.value, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> Tuple[List[LocalMedia], int]: """Get a paginated list of metadata for a local piece of media which an user_id has uploaded @@ -197,7 +254,7 @@ async def get_local_media_by_user_paginate( def get_local_media_by_user_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> Tuple[List[LocalMedia], int]: # Set ordering order_by_column = MediaSortOrder(order_by).value @@ -217,14 +274,16 @@ def get_local_media_by_user_paginate_txn( sql = """ SELECT - "media_id", - "media_type", - "media_length", - "upload_name", - "created_ts", - "last_access_ts", - "quarantined_by", - "safe_from_quarantine" + media_id, + media_type, + media_length, + upload_name, + created_ts, + url_cache, + last_access_ts, + quarantined_by, + safe_from_quarantine, + user_id FROM local_media_repository WHERE user_id = ? ORDER BY {order_by_column} {order}, media_id ASC @@ -236,7 +295,21 @@ def get_local_media_by_user_paginate_txn( args += [limit, start] txn.execute(sql, args) - media = self.db_pool.cursor_to_dict(txn) + media = [ + LocalMedia( + media_id=row[0], + media_type=row[1], + media_length=row[2], + upload_name=row[3], + created_ts=row[4], + url_cache=row[5], + last_access_ts=row[6], + quarantined_by=row[7], + safe_from_quarantine=bool(row[8]), + user_id=row[9], + ) + for row in txn + ] return media, count return await self.db_pool.runInteraction( @@ -330,6 +403,23 @@ def _get_local_media_ids_txn(txn: LoggingTransaction) -> List[str]: "get_local_media_ids", _get_local_media_ids_txn ) + @trace + async def store_local_media_id( + self, + media_id: str, + time_now_ms: int, + user_id: UserID, + ) -> None: + await self.db_pool.simple_insert( + "local_media_repository", + { + "media_id": media_id, + "created_ts": time_now_ms, + "user_id": user_id.to_string(), + }, + desc="store_local_media_id", + ) + @trace async def store_local_media( self, @@ -355,6 +445,30 @@ async def store_local_media( desc="store_local_media", ) + async def update_local_media( + self, + media_id: str, + media_type: str, + upload_name: Optional[str], + media_length: int, + user_id: UserID, + url_cache: Optional[str] = None, + ) -> None: + await self.db_pool.simple_update_one( + "local_media_repository", + keyvalues={ + "user_id": user_id.to_string(), + "media_id": media_id, + }, + updatevalues={ + "media_type": media_type, + "upload_name": upload_name, + "media_length": media_length, + "url_cache": url_cache, + }, + desc="update_local_media", + ) + async def mark_local_media_as_safe(self, media_id: str, safe: bool = True) -> None: """Mark a local media as safe or unsafe from quarantining.""" await self.db_pool.simple_update_one( @@ -364,51 +478,72 @@ async def mark_local_media_as_safe(self, media_id: str, safe: bool = True) -> No desc="mark_local_media_as_safe", ) - async def get_url_cache(self, url: str, ts: int) -> Optional[Dict[str, Any]]: + async def count_pending_media(self, user_id: UserID) -> Tuple[int, int]: + """Count the number of pending media for a user. + + Returns: + A tuple of two integers: the total pending media requests and the earliest + expiration timestamp. + """ + + def get_pending_media_txn(txn: LoggingTransaction) -> Tuple[int, int]: + sql = """ + SELECT COUNT(*), MIN(created_ts) + FROM local_media_repository + WHERE user_id = ? + AND created_ts > ? + AND media_length IS NULL + """ + assert self.unused_expiration_time is not None + txn.execute( + sql, + ( + user_id.to_string(), + self._clock.time_msec() - self.unused_expiration_time, + ), + ) + row = txn.fetchone() + if not row: + return 0, 0 + return row[0], (row[1] + self.unused_expiration_time if row[1] else 0) + + return await self.db_pool.runInteraction( + "get_pending_media", get_pending_media_txn + ) + + async def get_url_cache(self, url: str, ts: int) -> Optional[UrlCache]: """Get the media_id and ts for a cached URL as of the given timestamp Returns: None if the URL isn't cached. """ - def get_url_cache_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: + def get_url_cache_txn(txn: LoggingTransaction) -> Optional[UrlCache]: # get the most recently cached result (relative to the given ts) - sql = ( - "SELECT response_code, etag, expires_ts, og, media_id, download_ts" - " FROM local_media_repository_url_cache" - " WHERE url = ? AND download_ts <= ?" - " ORDER BY download_ts DESC LIMIT 1" - ) + sql = """ + SELECT response_code, expires_ts, og + FROM local_media_repository_url_cache + WHERE url = ? AND download_ts <= ? + ORDER BY download_ts DESC LIMIT 1 + """ txn.execute(sql, (url, ts)) row = txn.fetchone() if not row: # ...or if we've requested a timestamp older than the oldest # copy in the cache, return the oldest copy (if any) - sql = ( - "SELECT response_code, etag, expires_ts, og, media_id, download_ts" - " FROM local_media_repository_url_cache" - " WHERE url = ? AND download_ts > ?" - " ORDER BY download_ts ASC LIMIT 1" - ) + sql = """ + SELECT response_code, expires_ts, og + FROM local_media_repository_url_cache + WHERE url = ? AND download_ts > ? + ORDER BY download_ts ASC LIMIT 1 + """ txn.execute(sql, (url, ts)) row = txn.fetchone() if not row: return None - return dict( - zip( - ( - "response_code", - "etag", - "expires_ts", - "og", - "media_id", - "download_ts", - ), - row, - ) - ) + return UrlCache(response_code=row[0], expires_ts=row[1], og=row[2]) return await self.db_pool.runInteraction("get_url_cache", get_url_cache_txn) @@ -418,7 +553,7 @@ async def store_url_cache( response_code: int, etag: Optional[str], expires_ts: int, - og: Optional[str], + og: str, media_id: str, download_ts: int, ) -> None: @@ -437,25 +572,24 @@ async def store_url_cache( ) async def get_local_media_thumbnails(self, media_id: str) -> List[ThumbnailInfo]: - rows = await self.db_pool.simple_select_list( - "local_media_repository_thumbnails", - {"media_id": media_id}, - ( - "thumbnail_width", - "thumbnail_height", - "thumbnail_method", - "thumbnail_type", - "thumbnail_length", + rows = cast( + List[Tuple[int, int, str, str, int]], + await self.db_pool.simple_select_list( + "local_media_repository_thumbnails", + {"media_id": media_id}, + ( + "thumbnail_width", + "thumbnail_height", + "thumbnail_method", + "thumbnail_type", + "thumbnail_length", + ), + desc="get_local_media_thumbnails", ), - desc="get_local_media_thumbnails", ) return [ ThumbnailInfo( - width=row["thumbnail_width"], - height=row["thumbnail_height"], - method=row["thumbnail_method"], - type=row["thumbnail_type"], - length=row["thumbnail_length"], + width=row[0], height=row[1], method=row[2], type=row[3], length=row[4] ) for row in rows ] @@ -485,8 +619,8 @@ async def store_local_thumbnail( async def get_cached_remote_media( self, origin: str, media_id: str - ) -> Optional[Dict[str, Any]]: - return await self.db_pool.simple_select_one( + ) -> Optional[RemoteMedia]: + row = await self.db_pool.simple_select_one( "remote_media_cache", {"media_origin": origin, "media_id": media_id}, ( @@ -495,11 +629,25 @@ async def get_cached_remote_media( "upload_name", "created_ts", "filesystem_id", + "last_access_ts", "quarantined_by", ), allow_none=True, desc="get_cached_remote_media", ) + if row is None: + return row + return RemoteMedia( + media_origin=origin, + media_id=media_id, + media_type=row[0], + media_length=row[1], + upload_name=row[2], + created_ts=row[3], + filesystem_id=row[4], + last_access_ts=row[5], + quarantined_by=row[6], + ) async def store_cached_remote_media( self, @@ -568,25 +716,24 @@ def update_cache_txn(txn: LoggingTransaction) -> None: async def get_remote_media_thumbnails( self, origin: str, media_id: str ) -> List[ThumbnailInfo]: - rows = await self.db_pool.simple_select_list( - "remote_media_cache_thumbnails", - {"media_origin": origin, "media_id": media_id}, - ( - "thumbnail_width", - "thumbnail_height", - "thumbnail_method", - "thumbnail_type", - "thumbnail_length", + rows = cast( + List[Tuple[int, int, str, str, int]], + await self.db_pool.simple_select_list( + "remote_media_cache_thumbnails", + {"media_origin": origin, "media_id": media_id}, + ( + "thumbnail_width", + "thumbnail_height", + "thumbnail_method", + "thumbnail_type", + "thumbnail_length", + ), + desc="get_remote_media_thumbnails", ), - desc="get_remote_media_thumbnails", ) return [ ThumbnailInfo( - width=row["thumbnail_width"], - height=row["thumbnail_height"], - method=row["thumbnail_method"], - type=row["thumbnail_type"], - length=row["thumbnail_length"], + width=row[0], height=row[1], method=row[2], type=row[3], length=row[4] ) for row in rows ] @@ -599,10 +746,10 @@ async def get_remote_media_thumbnail( t_width: int, t_height: int, t_type: str, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[ThumbnailInfo]: """Fetch the thumbnail info of given width, height and type.""" - return await self.db_pool.simple_select_one( + row = await self.db_pool.simple_select_one( table="remote_media_cache_thumbnails", keyvalues={ "media_origin": origin, @@ -617,11 +764,15 @@ async def get_remote_media_thumbnail( "thumbnail_method", "thumbnail_type", "thumbnail_length", - "filesystem_id", ), allow_none=True, desc="get_remote_media_thumbnail", ) + if row is None: + return None + return ThumbnailInfo( + width=row[0], height=row[1], method=row[2], type=row[3], length=row[4] + ) @trace async def store_remote_media_thumbnail( @@ -652,7 +803,7 @@ async def store_remote_media_thumbnail( async def get_remote_media_ids( self, before_ts: int, include_quarantined_media: bool - ) -> List[Dict[str, str]]: + ) -> List[Tuple[str, str, str]]: """ Retrieve a list of server name, media ID tuples from the remote media cache. @@ -666,12 +817,14 @@ async def get_remote_media_ids( A list of tuples containing: * The server name of homeserver where the media originates from, * The ID of the media. + * The filesystem ID. + """ + + sql = """ + SELECT media_origin, media_id, filesystem_id + FROM remote_media_cache + WHERE last_access_ts < ? """ - sql = ( - "SELECT media_origin, media_id, filesystem_id" - " FROM remote_media_cache" - " WHERE last_access_ts < ?" - ) if include_quarantined_media is False: # Only include media that has not been quarantined @@ -679,8 +832,9 @@ async def get_remote_media_ids( AND quarantined_by IS NULL """ - return await self.db_pool.execute( - "get_remote_media_ids", self.db_pool.cursor_to_dict, sql, before_ts + return cast( + List[Tuple[str, str, str]], + await self.db_pool.execute("get_remote_media_ids", sql, before_ts), ) async def delete_remote_media(self, media_origin: str, media_id: str) -> None: diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 4b1061e6d7d0..2911e53310f8 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -317,7 +317,7 @@ def _initialise_reserved_users( if user_id: is_support = self.is_support_user_txn(txn, user_id) if not is_support: - # We do this manually here to avoid hitting #6791 + # We do this manually here to avoid hitting https://github.com/matrix-org/synapse/issues/6791 self.db_pool.simple_upsert_txn( txn, table="monthly_active_users", diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 3b444d2d07f1..0198bb09d230 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -363,10 +363,11 @@ def _add_users_to_send_full_presence_to(txn: LoggingTransaction) -> None: # for their user ID. value_values=[(presence_stream_id,) for _ in user_ids], ) - for user_id in user_ids: - self._invalidate_cache_and_stream( - txn, self._get_full_presence_stream_token_for_user, (user_id,) - ) + self._invalidate_cache_and_stream_bulk( + txn, + self._get_full_presence_stream_token_for_user, + [(user_id,) for user_id in user_ids], + ) return await self.db_pool.runInteraction( "add_users_to_send_full_presence_to", _add_users_to_send_full_presence_to diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 3ba9cc88537a..7ed111f63295 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import TYPE_CHECKING, Optional -from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -138,23 +137,18 @@ def _final_batch(txn: LoggingTransaction, lower_bound_id: str) -> None: return 50 async def get_profileinfo(self, user_id: UserID) -> ProfileInfo: - try: - profile = await self.db_pool.simple_select_one( - table="profiles", - keyvalues={"full_user_id": user_id.to_string()}, - retcols=("displayname", "avatar_url"), - desc="get_profileinfo", - ) - except StoreError as e: - if e.code == 404: - # no match - return ProfileInfo(None, None) - else: - raise - - return ProfileInfo( - avatar_url=profile["avatar_url"], display_name=profile["displayname"] + profile = await self.db_pool.simple_select_one( + table="profiles", + keyvalues={"full_user_id": user_id.to_string()}, + retcols=("displayname", "avatar_url"), + desc="get_profileinfo", + allow_none=True, ) + if profile is None: + # no match + return ProfileInfo(None, None) + + return ProfileInfo(avatar_url=profile[1], display_name=profile[0]) async def get_profile_displayname(self, user_id: UserID) -> Optional[str]: return await self.db_pool.simple_select_one_onecol( diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 1e11bf2706dd..1a5b5731bbeb 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -295,19 +295,28 @@ def _purge_history_txn( # so make sure to keep this actually last. txn.execute("DROP TABLE events_to_purge") - for event_id, should_delete in event_rows: - self._invalidate_cache_and_stream( - txn, self._get_state_group_for_event, (event_id,) - ) + self._invalidate_cache_and_stream_bulk( + txn, + self._get_state_group_for_event, + [(event_id,) for event_id, _ in event_rows], + ) - # XXX: This is racy, since have_seen_events could be called between the - # transaction completing and the invalidation running. On the other hand, - # that's no different to calling `have_seen_events` just before the - # event is deleted from the database. + # XXX: This is racy, since have_seen_events could be called between the + # transaction completing and the invalidation running. On the other hand, + # that's no different to calling `have_seen_events` just before the + # event is deleted from the database. + self._invalidate_cache_and_stream_bulk( + txn, + self.have_seen_event, + [ + (room_id, event_id) + for event_id, should_delete in event_rows + if should_delete + ], + ) + + for event_id, should_delete in event_rows: if should_delete: - self._invalidate_cache_and_stream( - txn, self.have_seen_event, (room_id, event_id) - ) self.invalidate_get_event_cache_after_txn(txn, event_id) logger.info("[purge] done") @@ -485,7 +494,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: # - room_tags_revisions # The problem with these is that they are largeish and there is no room_id # index on them. In any case we should be clearing out 'stream' tables - # periodically anyway (#5888) + # periodically anyway (https://github.com/matrix-org/synapse/issues/5888) self._invalidate_caches_for_room_and_stream(txn, room_id) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index f5356e7f8062..cf622e195cb6 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -28,8 +28,11 @@ cast, ) +from twisted.internet import defer + from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.replication.tcp.streams import PushRulesStream from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -51,7 +54,8 @@ ) from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRules from synapse.types import JsonDict -from synapse.util import json_encoder +from synapse.util import json_encoder, unwrapFirstError +from synapse.util.async_helpers import gather_results from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -179,46 +183,44 @@ def process_replication_position( @cached(max_entries=5000) async def get_push_rules_for_user(self, user_id: str) -> FilteredPushRules: - rows = await self.db_pool.simple_select_list( - table="push_rules", - keyvalues={"user_name": user_id}, - retcols=( - "user_name", - "rule_id", - "priority_class", - "priority", - "conditions", - "actions", + rows = cast( + List[Tuple[str, int, int, str, str]], + await self.db_pool.simple_select_list( + table="push_rules", + keyvalues={"user_name": user_id}, + retcols=( + "rule_id", + "priority_class", + "priority", + "conditions", + "actions", + ), + desc="get_push_rules_for_user", ), - desc="get_push_rules_for_user", ) - rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))) + # Sort by highest priority_class, then highest priority. + rows.sort(key=lambda row: (-int(row[1]), -int(row[2]))) enabled_map = await self.get_push_rules_enabled_for_user(user_id) return _load_rules( - [ - ( - row["rule_id"], - row["priority_class"], - row["conditions"], - row["actions"], - ) - for row in rows - ], + [(row[0], row[1], row[3], row[4]) for row in rows], enabled_map, self.hs.config.experimental, ) async def get_push_rules_enabled_for_user(self, user_id: str) -> Dict[str, bool]: - results = await self.db_pool.simple_select_list( - table="push_rules_enable", - keyvalues={"user_name": user_id}, - retcols=("rule_id", "enabled"), - desc="get_push_rules_enabled_for_user", + results = cast( + List[Tuple[str, Optional[Union[int, bool]]]], + await self.db_pool.simple_select_list( + table="push_rules_enable", + keyvalues={"user_name": user_id}, + retcols=("rule_id", "enabled"), + desc="get_push_rules_enabled_for_user", + ), ) - return {r["rule_id"]: bool(r["enabled"]) for r in results} + return {r[0]: bool(r[1]) for r in results} async def have_push_rules_changed_for_user( self, user_id: str, last_id: int @@ -251,23 +253,33 @@ async def bulk_get_push_rules( user_id: [] for user_id in user_ids } - rows = cast( - List[Tuple[str, str, int, int, str, str]], - await self.db_pool.simple_select_many_batch( - table="push_rules", - column="user_name", - iterable=user_ids, - retcols=( - "user_name", - "rule_id", - "priority_class", - "priority", - "conditions", - "actions", + # gatherResults loses all type information. + rows, enabled_map_by_user = await make_deferred_yieldable( + gather_results( + ( + cast( + "defer.Deferred[List[Tuple[str, str, int, int, str, str]]]", + run_in_background( + self.db_pool.simple_select_many_batch, + table="push_rules", + column="user_name", + iterable=user_ids, + retcols=( + "user_name", + "rule_id", + "priority_class", + "priority", + "conditions", + "actions", + ), + desc="bulk_get_push_rules", + batch_size=1000, + ), + ), + run_in_background(self.bulk_get_push_rules_enabled, user_ids), ), - desc="bulk_get_push_rules", - batch_size=1000, - ), + consumeErrors=True, + ).addErrback(unwrapFirstError) ) # Sort by highest priority_class, then highest priority. @@ -278,8 +290,6 @@ async def bulk_get_push_rules( (rule_id, priority_class, conditions, actions) ) - enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids) - results: Dict[str, FilteredPushRules] = {} for user_id, rules in raw_rules.items(): @@ -439,27 +449,28 @@ def _add_push_rule_relative_txn( before: str, after: str, ) -> None: - # Lock the table since otherwise we'll have annoying races between the - # SELECT here and the UPSERT below. - self.database_engine.lock_table(txn, "push_rules") - relative_to_rule = before or after - res = self.db_pool.simple_select_one_txn( - txn, - table="push_rules", - keyvalues={"user_name": user_id, "rule_id": relative_to_rule}, - retcols=["priority_class", "priority"], - allow_none=True, - ) + sql = """ + SELECT priority, priority_class FROM push_rules + WHERE user_name = ? AND rule_id = ? + """ + + if isinstance(self.database_engine, PostgresEngine): + sql += " FOR UPDATE" + else: + # Annoyingly SQLite doesn't support row level locking, so lock the whole table + self.database_engine.lock_table(txn, "push_rules") - if not res: + txn.execute(sql, (user_id, relative_to_rule)) + row = txn.fetchone() + + if row is None: raise RuleNotFoundException( "before/after rule not found: %s" % (relative_to_rule,) ) - base_priority_class = res["priority_class"] - base_rule_priority = res["priority"] + base_rule_priority, base_priority_class = row if base_priority_class != priority_class: raise InconsistentRuleException( @@ -507,9 +518,18 @@ def _add_push_rule_highest_priority_txn( conditions_json: str, actions_json: str, ) -> None: - # Lock the table since otherwise we'll have annoying races between the - # SELECT here and the UPSERT below. - self.database_engine.lock_table(txn, "push_rules") + if isinstance(self.database_engine, PostgresEngine): + # Postgres doesn't do FOR UPDATE on aggregate functions, so select the rows first + # then re-select the count/max below. + sql = """ + SELECT * FROM push_rules + WHERE user_name = ? and priority_class = ? + FOR UPDATE + """ + txn.execute(sql, (user_id, priority_class)) + else: + # Annoyingly SQLite doesn't support row level locking, so lock the whole table + self.database_engine.lock_table(txn, "push_rules") # find the highest priority rule in that class sql = ( diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index c7eb7fc4789e..8f36cfce1245 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -371,18 +371,20 @@ async def update_pusher_failing_since( async def get_throttle_params_by_room( self, pusher_id: int ) -> Dict[str, ThrottleParams]: - res = await self.db_pool.simple_select_list( - "pusher_throttle", - {"pusher": pusher_id}, - ["room_id", "last_sent_ts", "throttle_ms"], - desc="get_throttle_params_by_room", + res = cast( + List[Tuple[str, Optional[int], Optional[int]]], + await self.db_pool.simple_select_list( + "pusher_throttle", + {"pusher": pusher_id}, + ["room_id", "last_sent_ts", "throttle_ms"], + desc="get_throttle_params_by_room", + ), ) params_by_room = {} - for row in res: - params_by_room[row["room_id"]] = ThrottleParams( - row["last_sent_ts"], - row["throttle_ms"], + for room_id, last_sent_ts, throttle_ms in res: + params_by_room[room_id] = ThrottleParams( + last_sent_ts or 0, throttle_ms or 0 ) return params_by_room @@ -599,7 +601,7 @@ def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int: (last_pusher_id, batch_size), ) - rows = txn.fetchall() + rows = cast(List[Tuple[int, Optional[str], Optional[str]]], txn.fetchall()) if len(rows) == 0: return 0 @@ -615,7 +617,7 @@ def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int: txn=txn, table="pushers", key_names=("id",), - key_values=[row[0] for row in rows], + key_values=[(row[0],) for row in rows], value_names=("device_id", "access_token"), # If there was already a device_id on the pusher, we only want to clear # the access_token column, so we keep the existing device_id. Otherwise, diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index b2645ab43c5b..3484ce9ef937 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -28,6 +28,8 @@ cast, ) +from immutabledict import immutabledict + from synapse.api.constants import EduTypes from synapse.replication.tcp.streams import ReceiptsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -43,7 +45,12 @@ MultiWriterIdGenerator, StreamIdGenerator, ) -from synapse.types import JsonDict, JsonMapping +from synapse.types import ( + JsonDict, + JsonMapping, + MultiWriterStreamToken, + PersistedPosition, +) from synapse.util import json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -105,7 +112,7 @@ def __init__( "receipts_linearized", entity_column="room_id", stream_column="stream_id", - max_value=max_receipts_stream_id, + max_value=max_receipts_stream_id.stream, limit=10000, ) self._receipts_stream_cache = StreamChangeCache( @@ -114,9 +121,31 @@ def __init__( prefilled_cache=receipts_stream_prefill, ) - def get_max_receipt_stream_id(self) -> int: + def get_max_receipt_stream_id(self) -> MultiWriterStreamToken: """Get the current max stream ID for receipts stream""" - return self._receipts_id_gen.get_current_token() + + min_pos = self._receipts_id_gen.get_current_token() + + positions = {} + if isinstance(self._receipts_id_gen, MultiWriterIdGenerator): + # The `min_pos` is the minimum position that we know all instances + # have finished persisting to, so we only care about instances whose + # positions are ahead of that. (Instance positions can be behind the + # min position as there are times we can work out that the minimum + # position is ahead of the naive minimum across all current + # positions. See MultiWriterIdGenerator for details) + positions = { + i: p + for i, p in self._receipts_id_gen.get_positions().items() + if p > min_pos + } + + return MultiWriterStreamToken( + stream=min_pos, instance_map=immutabledict(positions) + ) + + def get_receipt_stream_id_for_instance(self, instance_name: str) -> int: + return self._receipts_id_gen.get_current_token_for_writer(instance_name) def get_last_unthreaded_receipt_for_user_txn( self, @@ -257,7 +286,10 @@ def f(txn: LoggingTransaction) -> List[Tuple[str, str, int, int]]: } async def get_linearized_receipts_for_rooms( - self, room_ids: Iterable[str], to_key: int, from_key: Optional[int] = None + self, + room_ids: Iterable[str], + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> List[JsonMapping]: """Get receipts for multiple rooms for sending to clients. @@ -276,7 +308,7 @@ async def get_linearized_receipts_for_rooms( # Only ask the database about rooms where there have been new # receipts added since `from_key` room_ids = self._receipts_stream_cache.get_entities_changed( - room_ids, from_key + room_ids, from_key.stream ) results = await self._get_linearized_receipts_for_rooms( @@ -286,7 +318,10 @@ async def get_linearized_receipts_for_rooms( return [ev for res in results.values() for ev in res] async def get_linearized_receipts_for_room( - self, room_id: str, to_key: int, from_key: Optional[int] = None + self, + room_id: str, + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Sequence[JsonMapping]: """Get receipts for a single room for sending to clients. @@ -302,36 +337,49 @@ async def get_linearized_receipts_for_room( if from_key is not None: # Check the cache first to see if any new receipts have been added # since`from_key`. If not we can no-op. - if not self._receipts_stream_cache.has_entity_changed(room_id, from_key): + if not self._receipts_stream_cache.has_entity_changed( + room_id, from_key.stream + ): return [] return await self._get_linearized_receipts_for_room(room_id, to_key, from_key) @cached(tree=True) async def _get_linearized_receipts_for_room( - self, room_id: str, to_key: int, from_key: Optional[int] = None + self, + room_id: str, + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Sequence[JsonMapping]: """See get_linearized_receipts_for_room""" def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str]]: if from_key: - sql = ( - "SELECT receipt_type, user_id, event_id, data" - " FROM receipts_linearized WHERE" - " room_id = ? AND stream_id > ? AND stream_id <= ?" - ) + sql = """ + SELECT stream_id, instance_name, receipt_type, user_id, event_id, data + FROM receipts_linearized + WHERE room_id = ? AND stream_id > ? AND stream_id <= ? + """ - txn.execute(sql, (room_id, from_key, to_key)) - else: - sql = ( - "SELECT receipt_type, user_id, event_id, data" - " FROM receipts_linearized WHERE" - " room_id = ? AND stream_id <= ?" + txn.execute( + sql, (room_id, from_key.stream, to_key.get_max_stream_pos()) ) + else: + sql = """ + SELECT stream_id, instance_name, receipt_type, user_id, event_id, data + FROM receipts_linearized WHERE + room_id = ? AND stream_id <= ? + """ - txn.execute(sql, (room_id, to_key)) + txn.execute(sql, (room_id, to_key.get_max_stream_pos())) - return cast(List[Tuple[str, str, str, str]], txn.fetchall()) + return [ + (receipt_type, user_id, event_id, data) + for stream_id, instance_name, receipt_type, user_id, event_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + from_key, to_key, instance_name, stream_id + ) + ] rows = await self.db_pool.runInteraction("get_linearized_receipts_for_room", f) @@ -352,7 +400,10 @@ def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str]]: num_args=3, ) async def _get_linearized_receipts_for_rooms( - self, room_ids: Collection[str], to_key: int, from_key: Optional[int] = None + self, + room_ids: Collection[str], + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Mapping[str, Sequence[JsonMapping]]: if not room_ids: return {} @@ -362,7 +413,8 @@ def f( ) -> List[Tuple[str, str, str, str, Optional[str], str]]: if from_key: sql = """ - SELECT room_id, receipt_type, user_id, event_id, thread_id, data + SELECT stream_id, instance_name, room_id, receipt_type, + user_id, event_id, thread_id, data FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? AND """ @@ -370,10 +422,14 @@ def f( self.database_engine, "room_id", room_ids ) - txn.execute(sql + clause, [from_key, to_key] + list(args)) + txn.execute( + sql + clause, + [from_key.stream, to_key.get_max_stream_pos()] + list(args), + ) else: sql = """ - SELECT room_id, receipt_type, user_id, event_id, thread_id, data + SELECT stream_id, instance_name, room_id, receipt_type, + user_id, event_id, thread_id, data FROM receipts_linearized WHERE stream_id <= ? AND """ @@ -382,11 +438,15 @@ def f( self.database_engine, "room_id", room_ids ) - txn.execute(sql + clause, [to_key] + list(args)) + txn.execute(sql + clause, [to_key.get_max_stream_pos()] + list(args)) - return cast( - List[Tuple[str, str, str, str, Optional[str], str]], txn.fetchall() - ) + return [ + (room_id, receipt_type, user_id, event_id, thread_id, data) + for stream_id, instance_name, room_id, receipt_type, user_id, event_id, thread_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + from_key, to_key, instance_name, stream_id + ) + ] txn_results = await self.db_pool.runInteraction( "_get_linearized_receipts_for_rooms", f @@ -420,7 +480,9 @@ def f( num_args=2, ) async def get_linearized_receipts_for_all_rooms( - self, to_key: int, from_key: Optional[int] = None + self, + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Mapping[str, JsonMapping]: """Get receipts for all rooms between two stream_ids, up to a limit of the latest 100 read receipts. @@ -437,25 +499,31 @@ async def get_linearized_receipts_for_all_rooms( def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str, str]]: if from_key: sql = """ - SELECT room_id, receipt_type, user_id, event_id, data + SELECT stream_id, instance_name, room_id, receipt_type, user_id, event_id, data FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? ORDER BY stream_id DESC LIMIT 100 """ - txn.execute(sql, [from_key, to_key]) + txn.execute(sql, [from_key.stream, to_key.get_max_stream_pos()]) else: sql = """ - SELECT room_id, receipt_type, user_id, event_id, data + SELECT stream_id, instance_name, room_id, receipt_type, user_id, event_id, data FROM receipts_linearized WHERE stream_id <= ? ORDER BY stream_id DESC LIMIT 100 """ - txn.execute(sql, [to_key]) + txn.execute(sql, [to_key.get_max_stream_pos()]) - return cast(List[Tuple[str, str, str, str, str]], txn.fetchall()) + return [ + (room_id, receipt_type, user_id, event_id, data) + for stream_id, instance_name, room_id, receipt_type, user_id, event_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + from_key, to_key, instance_name, stream_id + ) + ] txn_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_all_rooms", f @@ -545,10 +613,11 @@ def get_all_updated_receipts_txn( SELECT stream_id, room_id, receipt_type, user_id, event_id, thread_id, data FROM receipts_linearized WHERE ? < stream_id AND stream_id <= ? + AND instance_name = ? ORDER BY stream_id ASC LIMIT ? """ - txn.execute(sql, (last_id, current_id, limit)) + txn.execute(sql, (last_id, current_id, instance_name, limit)) updates = cast( List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], @@ -632,8 +701,8 @@ def _insert_linearized_receipt_txn( allow_none=True, ) - stream_ordering = int(res["stream_ordering"]) if res else None - rx_ts = res["received_ts"] if res else 0 + stream_ordering = int(res[0]) if res else None + rx_ts = res[1] if res else 0 # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts @@ -695,6 +764,7 @@ def _insert_linearized_receipt_txn( keyvalues=keyvalues, values={ "stream_id": stream_id, + "instance_name": self._instance_name, "event_id": event_id, "event_stream_ordering": stream_ordering, "data": json_encoder.encode(data), @@ -750,7 +820,7 @@ async def insert_receipt( event_ids: List[str], thread_id: Optional[str], data: dict, - ) -> Optional[int]: + ) -> Optional[PersistedPosition]: """Insert a receipt, either from local client or remote server. Automatically does conversion between linearized and graph @@ -812,7 +882,7 @@ async def insert_receipt( data, ) - return stream_id + return PersistedPosition(self._instance_name, stream_id) async def _insert_graph_receipt( self, diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 9e8643ae4d87..2c3f30e2eba2 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -151,6 +151,22 @@ class ThreepidResult: added_at: int +@attr.s(frozen=True, slots=True, auto_attribs=True) +class ThreepidValidationSession: + address: str + """address of the 3pid""" + medium: str + """medium of the 3pid""" + client_secret: str + """a secret provided by the client for this validation session""" + session_id: str + """ID of the validation session""" + last_send_attempt: int + """a number serving to dedupe send attempts for this session""" + validated_at: Optional[int] + """timestamp of when this session was validated if so""" + + class RegistrationWorkerStore(CacheInvalidationWorkerStore): def __init__( self, @@ -409,17 +425,14 @@ async def get_user_from_renewal_token( account timestamp as milliseconds since the epoch. None if the account has not been renewed using the current token yet. """ - ret_dict = await self.db_pool.simple_select_one( - table="account_validity", - keyvalues={"renewal_token": renewal_token}, - retcols=["user_id", "expiration_ts_ms", "token_used_ts_ms"], - desc="get_user_from_renewal_token", - ) - - return ( - ret_dict["user_id"], - ret_dict["expiration_ts_ms"], - ret_dict["token_used_ts_ms"], + return cast( + Tuple[str, int, Optional[int]], + await self.db_pool.simple_select_one( + table="account_validity", + keyvalues={"renewal_token": renewal_token}, + retcols=["user_id", "expiration_ts_ms", "token_used_ts_ms"], + desc="get_user_from_renewal_token", + ), ) async def get_renewal_token_for_user(self, user_id: str) -> str: @@ -548,16 +561,15 @@ def set_shadow_banned_txn(txn: LoggingTransaction) -> None: updatevalues={"shadow_banned": shadow_banned}, ) # In order for this to apply immediately, clear the cache for this user. - tokens = self.db_pool.simple_select_onecol_txn( + tokens = self.db_pool.simple_select_list_txn( txn, table="access_tokens", keyvalues={"user_id": user_id}, - retcol="token", + retcols=("token",), + ) + self._invalidate_cache_and_stream_bulk( + txn, self.get_user_by_access_token, tokens ) - for token in tokens: - self._invalidate_cache_and_stream( - txn, self.get_user_by_access_token, (token,) - ) self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) await self.db_pool.runInteraction("set_shadow_banned", set_shadow_banned_txn) @@ -855,13 +867,15 @@ async def get_external_ids_by_user(self, mxid: str) -> List[Tuple[str, str]]: Returns: Tuples of (auth_provider, external_id) """ - res = await self.db_pool.simple_select_list( - table="user_external_ids", - keyvalues={"user_id": mxid}, - retcols=("auth_provider", "external_id"), - desc="get_external_ids_by_user", + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="user_external_ids", + keyvalues={"user_id": mxid}, + retcols=("auth_provider", "external_id"), + desc="get_external_ids_by_user", + ), ) - return [(r["auth_provider"], r["external_id"]) for r in res] async def count_all_users(self) -> int: """Counts all users registered on the homeserver.""" @@ -971,16 +985,13 @@ def get_user_id_by_threepid_txn( Returns: user id, or None if no user id/threepid mapping exists """ - ret = self.db_pool.simple_select_one_txn( + return self.db_pool.simple_select_one_onecol_txn( txn, "user_threepids", {"medium": medium, "address": address}, - ["user_id"], + "user_id", True, ) - if ret: - return ret["user_id"] - return None async def user_add_threepid( self, @@ -997,13 +1008,24 @@ async def user_add_threepid( ) async def user_get_threepids(self, user_id: str) -> List[ThreepidResult]: - results = await self.db_pool.simple_select_list( - "user_threepids", - keyvalues={"user_id": user_id}, - retcols=["medium", "address", "validated_at", "added_at"], - desc="user_get_threepids", + results = cast( + List[Tuple[str, str, int, int]], + await self.db_pool.simple_select_list( + "user_threepids", + keyvalues={"user_id": user_id}, + retcols=["medium", "address", "validated_at", "added_at"], + desc="user_get_threepids", + ), ) - return [ThreepidResult(**r) for r in results] + return [ + ThreepidResult( + medium=r[0], + address=r[1], + validated_at=r[2], + added_at=r[3], + ) + for r in results + ] async def user_delete_threepid( self, user_id: str, medium: str, address: str @@ -1042,7 +1064,7 @@ async def add_user_bound_threepid( desc="add_user_bound_threepid", ) - async def user_get_bound_threepids(self, user_id: str) -> List[Dict[str, Any]]: + async def user_get_bound_threepids(self, user_id: str) -> List[Tuple[str, str]]: """Get the threepids that a user has bound to an identity server through the homeserver The homeserver remembers where binds to an identity server occurred. Using this method can retrieve those threepids. @@ -1051,15 +1073,18 @@ async def user_get_bound_threepids(self, user_id: str) -> List[Dict[str, Any]]: user_id: The ID of the user to retrieve threepids for Returns: - List of dictionaries containing the following keys: - medium (str): The medium of the threepid (e.g "email") - address (str): The address of the threepid (e.g "bob@example.com") - """ - return await self.db_pool.simple_select_list( - table="user_threepid_id_server", - keyvalues={"user_id": user_id}, - retcols=["medium", "address"], - desc="user_get_bound_threepids", + List of tuples of two strings: + medium: The medium of the threepid (e.g "email") + address: The address of the threepid (e.g "bob@example.com") + """ + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="user_threepid_id_server", + keyvalues={"user_id": user_id}, + retcols=["medium", "address"], + desc="user_get_bound_threepids", + ), ) async def remove_user_bound_threepid( @@ -1156,7 +1181,7 @@ async def get_threepid_validation_session( address: Optional[str] = None, sid: Optional[str] = None, validated: Optional[bool] = True, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[ThreepidValidationSession]: """Gets a session_id and last_send_attempt (if available) for a combination of validation metadata @@ -1171,15 +1196,7 @@ async def get_threepid_validation_session( perform no filtering Returns: - A dict containing the following: - * address - address of the 3pid - * medium - medium of the 3pid - * client_secret - a secret provided by the client for this validation session - * session_id - ID of the validation session - * send_attempt - a number serving to dedupe send attempts for this session - * validated_at - timestamp of when this session was validated if so - - Otherwise None if a validation session is not found + A ThreepidValidationSession or None if a validation session is not found """ if not client_secret: raise SynapseError( @@ -1198,7 +1215,7 @@ async def get_threepid_validation_session( def get_threepid_validation_session_txn( txn: LoggingTransaction, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[ThreepidValidationSession]: sql = """ SELECT address, session_id, medium, client_secret, last_send_attempt, validated_at @@ -1213,11 +1230,18 @@ def get_threepid_validation_session_txn( sql += " LIMIT 1" txn.execute(sql, list(keyvalues.values())) - rows = self.db_pool.cursor_to_dict(txn) - if not rows: + row = txn.fetchone() + if not row: return None - return rows[0] + return ThreepidValidationSession( + address=row[0], + session_id=row[1], + medium=row[2], + client_secret=row[3], + last_send_attempt=row[4], + validated_at=row[5], + ) return await self.db_pool.runInteraction( "get_threepid_validation_session", get_threepid_validation_session_txn @@ -1404,16 +1428,15 @@ async def registration_token_is_valid(self, token: str) -> bool: if res is None: return False + uses_allowed, pending, completed, expiry_time = res + # Check if the token has expired now = self._clock.time_msec() - if res["expiry_time"] and res["expiry_time"] < now: + if expiry_time and expiry_time < now: return False # Check if the token has been used up - if ( - res["uses_allowed"] - and res["pending"] + res["completed"] >= res["uses_allowed"] - ): + if uses_allowed and pending + completed >= uses_allowed: return False # Otherwise, the token is valid @@ -1459,8 +1482,8 @@ def _use_registration_token_txn(txn: LoggingTransaction) -> None: # Override type because the return type is only optional if # allow_none is True, and we don't want mypy throwing errors # about None not being indexable. - res = cast( - Dict[str, Any], + pending, completed = cast( + Tuple[int, int], self.db_pool.simple_select_one_txn( txn, "registration_tokens", @@ -1475,8 +1498,8 @@ def _use_registration_token_txn(txn: LoggingTransaction) -> None: "registration_tokens", keyvalues={"token": token}, updatevalues={ - "completed": res["completed"] + 1, - "pending": res["pending"] - 1, + "completed": completed + 1, + "pending": pending - 1, }, ) @@ -1486,7 +1509,7 @@ def _use_registration_token_txn(txn: LoggingTransaction) -> None: async def get_registration_tokens( self, valid: Optional[bool] = None - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]: """List all registration tokens. Used by the admin API. Args: @@ -1495,34 +1518,48 @@ async def get_registration_tokens( Default is None: return all tokens regardless of validity. Returns: - A list of dicts, each containing details of a token. + A list of tuples containing: + * The token + * The number of users allowed (or None) + * Whether it is pending + * Whether it has been completed + * An expiry time (or None if no expiry) """ def select_registration_tokens_txn( txn: LoggingTransaction, now: int, valid: Optional[bool] - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]: if valid is None: # Return all tokens regardless of validity - txn.execute("SELECT * FROM registration_tokens") + txn.execute( + """ + SELECT token, uses_allowed, pending, completed, expiry_time + FROM registration_tokens + """ + ) elif valid: # Select valid tokens only - sql = ( - "SELECT * FROM registration_tokens WHERE " - "(uses_allowed > pending + completed OR uses_allowed IS NULL) " - "AND (expiry_time > ? OR expiry_time IS NULL)" - ) + sql = """ + SELECT token, uses_allowed, pending, completed, expiry_time + FROM registration_tokens + WHERE (uses_allowed > pending + completed OR uses_allowed IS NULL) + AND (expiry_time > ? OR expiry_time IS NULL) + """ txn.execute(sql, [now]) else: # Select invalid tokens only - sql = ( - "SELECT * FROM registration_tokens WHERE " - "uses_allowed <= pending + completed OR expiry_time <= ?" - ) + sql = """ + SELECT token, uses_allowed, pending, completed, expiry_time + FROM registration_tokens + WHERE uses_allowed <= pending + completed OR expiry_time <= ? + """ txn.execute(sql, [now]) - return self.db_pool.cursor_to_dict(txn) + return cast( + List[Tuple[str, Optional[int], int, int, Optional[int]]], txn.fetchall() + ) return await self.db_pool.runInteraction( "select_registration_tokens", @@ -1540,13 +1577,22 @@ async def get_one_registration_token(self, token: str) -> Optional[Dict[str, Any Returns: A dict, or None if token doesn't exist. """ - return await self.db_pool.simple_select_one( + row = await self.db_pool.simple_select_one( "registration_tokens", keyvalues={"token": token}, retcols=["token", "uses_allowed", "pending", "completed", "expiry_time"], allow_none=True, desc="get_one_registration_token", ) + if row is None: + return None + return { + "token": row[0], + "uses_allowed": row[1], + "pending": row[2], + "completed": row[3], + "expiry_time": row[4], + } async def generate_registration_token( self, length: int, chars: str @@ -1669,7 +1715,7 @@ def _update_registration_token_txn( return None # Get all info about the token so it can be sent in the response - return self.db_pool.simple_select_one_txn( + result = self.db_pool.simple_select_one_txn( txn, "registration_tokens", keyvalues={"token": token}, @@ -1683,6 +1729,17 @@ def _update_registration_token_txn( allow_none=True, ) + if result is None: + return result + + return { + "token": result[0], + "uses_allowed": result[1], + "pending": result[2], + "completed": result[3], + "expiry_time": result[4], + } + return await self.db_pool.runInteraction( "update_registration_token", _update_registration_token_txn ) @@ -1894,11 +1951,13 @@ def _consume_login_token( keyvalues={"token": token}, updatevalues={"used_ts": ts}, ) - user_id = values["user_id"] - expiry_ts = values["expiry_ts"] - used_ts = values["used_ts"] - auth_provider_id = values["auth_provider_id"] - auth_provider_session_id = values["auth_provider_session_id"] + ( + user_id, + expiry_ts, + used_ts, + auth_provider_id, + auth_provider_session_id, + ) = values # Token was already used if used_ts is not None: @@ -2623,10 +2682,11 @@ def f(txn: LoggingTransaction) -> List[Tuple[str, int, Optional[str]]]: ) tokens_and_devices = [(r[0], r[1], r[2]) for r in txn] - for token, _, _ in tokens_and_devices: - self._invalidate_cache_and_stream( - txn, self.get_user_by_access_token, (token,) - ) + self._invalidate_cache_and_stream_bulk( + txn, + self.get_user_by_access_token, + [(token,) for token, _, _ in tokens_and_devices], + ) txn.execute("DELETE FROM access_tokens WHERE %s" % where_clause, values) @@ -2711,12 +2771,11 @@ def validate_threepid_session_txn(txn: LoggingTransaction) -> Optional[str]: # reason, the next check is on the client secret, which is NOT NULL, # so we don't have to worry about the client secret matching by # accident. - row = {"client_secret": None, "validated_at": None} + row = None, None else: raise ThreepidValidationError("Unknown session_id") - retrieved_client_secret = row["client_secret"] - validated_at = row["validated_at"] + retrieved_client_secret, validated_at = row row = self.db_pool.simple_select_one_txn( txn, @@ -2730,8 +2789,7 @@ def validate_threepid_session_txn(txn: LoggingTransaction) -> Optional[str]: raise ThreepidValidationError( "Validation token not found or has expired" ) - expires = row["expires"] - next_link = row["next_link"] + expires, next_link = row if retrieved_client_secret != client_secret: raise ThreepidValidationError( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 7f40e2c446a2..419b2c7a22c0 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -47,7 +47,7 @@ generate_pagination_where_clause, ) from synapse.storage.engines import PostgresEngine -from synapse.types import JsonDict, StreamKeyType, StreamToken +from synapse.types import JsonDict, MultiWriterStreamToken, StreamKeyType, StreamToken from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: @@ -314,7 +314,7 @@ def _get_recent_references_for_event_txn( room_key=next_key, presence_key=0, typing_key=0, - receipt_key=0, + receipt_key=MultiWriterStreamToken(stream=0), account_data_key=0, push_rules_key=0, to_device_key=0, @@ -384,14 +384,17 @@ async def get_all_relations_for_event( def get_all_relation_ids_for_event_txn( txn: LoggingTransaction, ) -> List[str]: - rows = self.db_pool.simple_select_list_txn( - txn=txn, - table="event_relations", - keyvalues={"relates_to_id": event_id}, - retcols=["event_id"], + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_list_txn( + txn=txn, + table="event_relations", + keyvalues={"relates_to_id": event_id}, + retcols=["event_id"], + ), ) - return [row["event_id"] for row in rows] + return [row[0] for row in rows] return await self.db_pool.runInteraction( desc="get_all_relation_ids_for_event", diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 9d24d2c347fc..ef26d5d9d37e 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -78,6 +78,31 @@ class RatelimitOverride: burst_count: int +@attr.s(slots=True, frozen=True, auto_attribs=True) +class LargestRoomStats: + room_id: str + name: Optional[str] + canonical_alias: Optional[str] + joined_members: int + join_rules: Optional[str] + guest_access: Optional[str] + history_visibility: Optional[str] + state_events: int + avatar: Optional[str] + topic: Optional[str] + room_type: Optional[str] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RoomStats(LargestRoomStats): + joined_local_members: int + version: Optional[str] + creator: Optional[str] + encryption: Optional[str] + federatable: bool + public: bool + + class RoomSortOrder(Enum): """ Enum to define the sorting method used when returning rooms with get_rooms_paginate @@ -188,23 +213,33 @@ async def store_room( logger.error("store_room with room_id=%s failed: %s", room_id, e) raise StoreError(500, "Problem creating room.") - async def get_room(self, room_id: str) -> Optional[Dict[str, Any]]: + async def get_room(self, room_id: str) -> Optional[Tuple[bool, bool]]: """Retrieve a room. Args: room_id: The ID of the room to retrieve. Returns: - A dict containing the room information, or None if the room is unknown. + A tuple containing the room information: + * True if the room is public + * True if the room has an auth chain index + + or None if the room is unknown. """ - return await self.db_pool.simple_select_one( - table="rooms", - keyvalues={"room_id": room_id}, - retcols=("room_id", "is_public", "creator", "has_auth_chain_index"), - desc="get_room", - allow_none=True, + row = cast( + Optional[Tuple[Optional[Union[int, bool]], Optional[Union[int, bool]]]], + await self.db_pool.simple_select_one( + table="rooms", + keyvalues={"room_id": room_id}, + retcols=("is_public", "has_auth_chain_index"), + desc="get_room", + allow_none=True, + ), ) + if row is None: + return row + return bool(row[0]), bool(row[1]) - async def get_room_with_stats(self, room_id: str) -> Optional[Dict[str, Any]]: + async def get_room_with_stats(self, room_id: str) -> Optional[RoomStats]: """Retrieve room with statistics. Args: @@ -215,7 +250,7 @@ async def get_room_with_stats(self, room_id: str) -> Optional[Dict[str, Any]]: def get_room_with_stats_txn( txn: LoggingTransaction, room_id: str - ) -> Optional[Dict[str, Any]]: + ) -> Optional[RoomStats]: sql = """ SELECT room_id, state.name, state.canonical_alias, curr.joined_members, curr.local_users_in_room AS joined_local_members, rooms.room_version AS version, @@ -229,15 +264,28 @@ def get_room_with_stats_txn( WHERE room_id = ? """ txn.execute(sql, [room_id]) - # Catch error if sql returns empty result to return "None" instead of an error - try: - res = self.db_pool.cursor_to_dict(txn)[0] - except IndexError: + row = txn.fetchone() + if not row: return None - - res["federatable"] = bool(res["federatable"]) - res["public"] = bool(res["public"]) - return res + return RoomStats( + room_id=row[0], + name=row[1], + canonical_alias=row[2], + joined_members=row[3], + joined_local_members=row[4], + version=row[5], + creator=row[6], + encryption=row[7], + federatable=bool(row[8]), + public=bool(row[9]), + join_rules=row[10], + guest_access=row[11], + history_visibility=row[12], + state_events=row[13], + avatar=row[14], + topic=row[15], + room_type=row[16], + ) return await self.db_pool.runInteraction( "get_room_with_stats", get_room_with_stats_txn, room_id @@ -368,7 +416,7 @@ async def get_largest_public_rooms( bounds: Optional[Tuple[int, str]], forwards: bool, ignore_non_federatable: bool = False, - ) -> List[Dict[str, Any]]: + ) -> List[LargestRoomStats]: """Gets the largest public rooms (where largest is in terms of joined members, as tracked in the statistics table). @@ -505,20 +553,34 @@ async def get_largest_public_rooms( def _get_largest_public_rooms_txn( txn: LoggingTransaction, - ) -> List[Dict[str, Any]]: + ) -> List[LargestRoomStats]: txn.execute(sql, query_args) - results = self.db_pool.cursor_to_dict(txn) + results = [ + LargestRoomStats( + room_id=r[0], + name=r[1], + canonical_alias=r[3], + joined_members=r[4], + join_rules=r[8], + guest_access=r[7], + history_visibility=r[6], + state_events=0, + avatar=r[5], + topic=r[2], + room_type=r[9], + ) + for r in txn + ] if not forwards: results.reverse() return results - ret_val = await self.db_pool.runInteraction( + return await self.db_pool.runInteraction( "get_largest_public_rooms", _get_largest_public_rooms_txn ) - return ret_val @cached(max_entries=10000) async def is_room_blocked(self, room_id: str) -> Optional[bool]: @@ -742,10 +804,7 @@ async def get_ratelimit_for_user(self, user_id: str) -> Optional[RatelimitOverri ) if row: - return RatelimitOverride( - messages_per_second=row["messages_per_second"], - burst_count=row["burst_count"], - ) + return RatelimitOverride(messages_per_second=row[0], burst_count=row[1]) else: return None @@ -1232,28 +1291,30 @@ async def get_partial_state_room_resync_info( """ room_servers: Dict[str, PartialStateResyncInfo] = {} - rows = await self.db_pool.simple_select_list( - table="partial_state_rooms", - keyvalues={}, - retcols=("room_id", "joined_via"), - desc="get_server_which_served_partial_join", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="partial_state_rooms", + keyvalues={}, + retcols=("room_id", "joined_via"), + desc="get_server_which_served_partial_join", + ), ) - for row in rows: - room_id = row["room_id"] - joined_via = row["joined_via"] + for room_id, joined_via in rows: room_servers[room_id] = PartialStateResyncInfo(joined_via=joined_via) - rows = await self.db_pool.simple_select_list( - "partial_state_rooms_servers", - keyvalues=None, - retcols=("room_id", "server_name"), - desc="get_partial_state_rooms", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + "partial_state_rooms_servers", + keyvalues=None, + retcols=("room_id", "server_name"), + desc="get_partial_state_rooms", + ), ) - for row in rows: - room_id = row["room_id"] - server_name = row["server_name"] + for room_id, server_name in rows: entry = room_servers.get(room_id) if entry is None: # There is a foreign key constraint which enforces that every room_id in @@ -1317,13 +1378,15 @@ async def get_join_event_id_and_device_lists_stream_id_for_partial_state( join. """ - result = await self.db_pool.simple_select_one( - table="partial_state_rooms", - keyvalues={"room_id": room_id}, - retcols=("join_event_id", "device_lists_stream_id"), - desc="get_join_event_id_for_partial_state", + return cast( + Tuple[str, int], + await self.db_pool.simple_select_one( + table="partial_state_rooms", + keyvalues={"room_id": room_id}, + retcols=("join_event_id", "device_lists_stream_id"), + desc="get_join_event_id_for_partial_state", + ), ) - return result["join_event_id"], result["device_lists_stream_id"] def get_un_partial_stated_rooms_token(self, instance_name: str) -> int: return self._un_partial_stated_rooms_stream_id_gen.get_current_token_for_writer( @@ -2214,7 +2277,7 @@ def _store_partial_state_room_txn( txn, table="partial_state_rooms_servers", keys=("room_id", "server_name"), - values=((room_id, s) for s in servers), + values=[(room_id, s) for s in servers], ) self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,)) self._invalidate_cache_and_stream( diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 3a87eba430d8..60d4a9ef301f 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -482,6 +482,22 @@ async def get_local_users_in_room(self, room_id: str) -> Sequence[str]: desc="get_local_users_in_room", ) + async def get_local_users_related_to_room( + self, room_id: str + ) -> List[Tuple[str, str]]: + """ + Retrieves a list of the current roommembers who are local to the server and their membership status. + """ + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="local_current_membership", + keyvalues={"room_id": room_id}, + retcols=("user_id", "membership"), + desc="get_local_users_in_room", + ), + ) + async def check_local_user_in_room(self, user_id: str, room_id: str) -> bool: """ Check whether a given local user is currently joined to the given room. @@ -543,17 +559,20 @@ async def get_local_current_membership_for_user_in_room( "non-local user %s" % (user_id,), ) - results_dict = await self.db_pool.simple_select_one( - "local_current_membership", - {"room_id": room_id, "user_id": user_id}, - ("membership", "event_id"), - allow_none=True, - desc="get_local_current_membership_for_user_in_room", + results = cast( + Optional[Tuple[str, str]], + await self.db_pool.simple_select_one( + "local_current_membership", + {"room_id": room_id, "user_id": user_id}, + ("membership", "event_id"), + allow_none=True, + desc="get_local_current_membership_for_user_in_room", + ), ) - if not results_dict: + if not results: return None, None - return results_dict.get("membership"), results_dict.get("event_id") + return results @cached(max_entries=500000, iterable=True) async def get_rooms_for_user_with_stream_ordering( @@ -940,7 +959,7 @@ async def _check_host_room_membership( like_clause = "%:" + host rows = await self.db_pool.execute( - "is_host_joined", None, sql, membership, room_id, like_clause + "is_host_joined", sql, membership, room_id, like_clause ) if not rows: @@ -1070,13 +1089,16 @@ async def _get_approximate_current_memberships_in_room( for fully-joined rooms. """ - rows = await self.db_pool.simple_select_list( - "current_state_events", - keyvalues={"room_id": room_id}, - retcols=("event_id", "membership"), - desc="has_completed_background_updates", + rows = cast( + List[Tuple[str, Optional[str]]], + await self.db_pool.simple_select_list( + "current_state_events", + keyvalues={"room_id": room_id}, + retcols=("event_id", "membership"), + desc="has_completed_background_updates", + ), ) - return {row["event_id"]: row["membership"] for row in rows} + return dict(rows) # TODO This returns a mutable object, which is generally confusing when using a cache. @cached(max_entries=10000) # type: ignore[synapse-@cached-mutable] @@ -1165,7 +1187,7 @@ async def is_locally_forgotten_room(self, room_id: str) -> bool: AND forgotten = 0; """ - rows = await self.db_pool.execute("is_forgotten_room", None, sql, room_id) + rows = await self.db_pool.execute("is_forgotten_room", sql, room_id) # `count(*)` returns always an integer # If any rows still exist it means someone has not forgotten this room yet diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 1d69c4a5f006..e25d86818b37 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -26,6 +26,7 @@ Set, Tuple, Union, + cast, ) import attr @@ -105,7 +106,7 @@ def store_search_entries_txn( txn, table="event_search", keys=("event_id", "room_id", "key", "value"), - values=( + values=[ ( entry.event_id, entry.room_id, @@ -113,7 +114,7 @@ def store_search_entries_txn( _clean_value_for_search(entry.value), ) for entry in entries - ), + ], ) else: @@ -274,7 +275,7 @@ def create_index(conn: LoggingDatabaseConnection) -> None: # we have to set autocommit, because postgres refuses to # CREATE INDEX CONCURRENTLY without it. - conn.set_session(autocommit=True) + conn.engine.attempt_to_set_autocommit(conn.conn, True) try: c = conn.cursor() @@ -300,7 +301,7 @@ def create_index(conn: LoggingDatabaseConnection) -> None: # we should now be able to delete the GIST index. c.execute("DROP INDEX IF EXISTS event_search_fts_idx_gist") finally: - conn.set_session(autocommit=False) + conn.engine.attempt_to_set_autocommit(conn.conn, False) if isinstance(self.database_engine, PostgresEngine): await self.db_pool.runWithConnection(create_index) @@ -322,7 +323,7 @@ async def _background_reindex_search_order( def create_index(conn: LoggingDatabaseConnection) -> None: conn.rollback() - conn.set_session(autocommit=True) + conn.engine.attempt_to_set_autocommit(conn.conn, True) c = conn.cursor() # We create with NULLS FIRST so that when we search *backwards* @@ -339,7 +340,7 @@ def create_index(conn: LoggingDatabaseConnection) -> None: ON event_search(origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST) """ ) - conn.set_session(autocommit=False) + conn.engine.attempt_to_set_autocommit(conn.conn, False) await self.db_pool.runWithConnection(create_index) @@ -506,16 +507,18 @@ async def search_msgs( # entire table from the database. sql += " ORDER BY rank DESC LIMIT 500" - results = await self.db_pool.execute( - "search_msgs", self.db_pool.cursor_to_dict, sql, *args + # List of tuples of (rank, room_id, event_id). + results = cast( + List[Tuple[Union[int, float], str, str]], + await self.db_pool.execute("search_msgs", sql, *args), ) - results = list(filter(lambda row: row["room_id"] in room_ids, results)) + results = list(filter(lambda row: row[1] in room_ids, results)) # We set redact_behaviour to block here to prevent redacted events being returned in # search results (which is a data leak) events = await self.get_events_as_list( # type: ignore[attr-defined] - [r["event_id"] for r in results], + [r[2] for r in results], redact_behaviour=EventRedactBehaviour.block, ) @@ -527,16 +530,18 @@ async def search_msgs( count_sql += " GROUP BY room_id" - count_results = await self.db_pool.execute( - "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args + # List of tuples of (room_id, count). + count_results = cast( + List[Tuple[str, int]], + await self.db_pool.execute("search_rooms_count", count_sql, *count_args), ) - count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) + count = sum(row[1] for row in count_results if row[0] in room_ids) return { "results": [ - {"event": event_map[r["event_id"]], "rank": r["rank"]} + {"event": event_map[r[2]], "rank": r[0]} for r in results - if r["event_id"] in event_map + if r[2] in event_map ], "highlights": highlights, "count": count, @@ -604,7 +609,7 @@ async def search_rooms( search_query = search_term sql = """ SELECT ts_rank_cd(vector, websearch_to_tsquery('english', ?)) as rank, - origin_server_ts, stream_ordering, room_id, event_id + room_id, event_id, origin_server_ts, stream_ordering FROM event_search WHERE vector @@ websearch_to_tsquery('english', ?) AND """ @@ -665,16 +670,18 @@ async def search_rooms( # mypy expects to append only a `str`, not an `int` args.append(limit) - results = await self.db_pool.execute( - "search_rooms", self.db_pool.cursor_to_dict, sql, *args + # List of tuples of (rank, room_id, event_id, origin_server_ts, stream_ordering). + results = cast( + List[Tuple[Union[int, float], str, str, int, int]], + await self.db_pool.execute("search_rooms", sql, *args), ) - results = list(filter(lambda row: row["room_id"] in room_ids, results)) + results = list(filter(lambda row: row[1] in room_ids, results)) # We set redact_behaviour to block here to prevent redacted events being returned in # search results (which is a data leak) events = await self.get_events_as_list( # type: ignore[attr-defined] - [r["event_id"] for r in results], + [r[2] for r in results], redact_behaviour=EventRedactBehaviour.block, ) @@ -686,22 +693,23 @@ async def search_rooms( count_sql += " GROUP BY room_id" - count_results = await self.db_pool.execute( - "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args + # List of tuples of (room_id, count). + count_results = cast( + List[Tuple[str, int]], + await self.db_pool.execute("search_rooms_count", count_sql, *count_args), ) - count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) + count = sum(row[1] for row in count_results if row[0] in room_ids) return { "results": [ { - "event": event_map[r["event_id"]], - "rank": r["rank"], - "pagination_token": "%s,%s" - % (r["origin_server_ts"], r["stream_ordering"]), + "event": event_map[r[2]], + "rank": r[0], + "pagination_token": "%s,%s" % (r[3], r[4]), } for r in results - if r["event_id"] in event_map + if r[2] in event_map ], "highlights": highlights, "count": count, diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 5b2d0ba8707b..e96c9b0486a1 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -679,7 +679,7 @@ async def get_users_media_usage_paginate( order_by: Optional[str] = UserSortOrder.USER_ID.value, direction: Direction = Direction.FORWARDS, search_term: Optional[str] = None, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, Optional[str], int, int]], int]: """Function to retrieve a paginated list of users and their uploaded local media (size and number). This will return a json list of users and the total number of users matching the filter criteria. @@ -692,14 +692,19 @@ async def get_users_media_usage_paginate( order_by: the sort order of the returned list direction: sort ascending or descending search_term: a string to filter user names by + Returns: - A list of user dicts and an integer representing the total number of - users that exist given this query + A tuple of: + A list of tuples of user information (the user ID, displayname, + total number of media, total length of media) and + + An integer representing the total number of users that exist + given this query """ def get_users_media_usage_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, Optional[str], int, int]], int]: filters = [] args: list = [] @@ -773,7 +778,7 @@ def get_users_media_usage_paginate_txn( args += [limit, start] txn.execute(sql, args) - users = self.db_pool.cursor_to_dict(txn) + users = cast(List[Tuple[str, Optional[str], int, int]], txn.fetchall()) return users, count diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 872df6bda12c..563c275a2cec 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1014,9 +1014,7 @@ async def get_position_for_event(self, event_id: str) -> PersistedEventPosition: desc="get_position_for_event", ) - return PersistedEventPosition( - row["instance_name"] or "master", row["stream_ordering"] - ) + return PersistedEventPosition(row[1] or "master", row[0]) async def get_topological_token_for_event(self, event_id: str) -> RoomStreamToken: """The stream token for an event @@ -1033,9 +1031,7 @@ async def get_topological_token_for_event(self, event_id: str) -> RoomStreamToke retcols=("stream_ordering", "topological_ordering"), desc="get_topological_token_for_event", ) - return RoomStreamToken( - topological=row["topological_ordering"], stream=row["stream_ordering"] - ) + return RoomStreamToken(topological=row[1], stream=row[0]) async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: """Gets the topological token in a room after or at the given stream @@ -1078,7 +1074,7 @@ async def get_current_topological_token(self, room_id: str, stream_key: int) -> """ row = await self.db_pool.execute( - "get_current_topological_token", None, sql, room_id, room_id, stream_key + "get_current_topological_token", sql, room_id, room_id, stream_key ) return row[0][0] if row else 0 @@ -1180,26 +1176,24 @@ def _get_events_around_txn( dict """ - results = self.db_pool.simple_select_one_txn( - txn, - "events", - keyvalues={"event_id": event_id, "room_id": room_id}, - retcols=["stream_ordering", "topological_ordering"], + stream_ordering, topological_ordering = cast( + Tuple[int, int], + self.db_pool.simple_select_one_txn( + txn, + "events", + keyvalues={"event_id": event_id, "room_id": room_id}, + retcols=["stream_ordering", "topological_ordering"], + ), ) - # This cannot happen as `allow_none=False`. - assert results is not None - # Paginating backwards includes the event at the token, but paginating # forward doesn't. before_token = RoomStreamToken( - topological=results["topological_ordering"] - 1, - stream=results["stream_ordering"], + topological=topological_ordering - 1, stream=stream_ordering ) after_token = RoomStreamToken( - topological=results["topological_ordering"], - stream=results["stream_ordering"], + topological=topological_ordering, stream=stream_ordering ) rows, start_token = self._paginate_room_events_txn( @@ -1636,7 +1630,6 @@ async def get_timeline_gaps( rows = await self.db_pool.execute( "get_timeline_gaps", - None, sql, room_id, from_token.stream if from_token else 0, diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 61403a98cf95..7deda7790e1e 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -45,14 +45,17 @@ async def get_tags_for_user( tag content. """ - rows = await self.db_pool.simple_select_list( - "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"] + rows = cast( + List[Tuple[str, str, str]], + await self.db_pool.simple_select_list( + "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"] + ), ) tags_by_room: Dict[str, Dict[str, JsonDict]] = {} - for row in rows: - room_tags = tags_by_room.setdefault(row["room_id"], {}) - room_tags[row["tag"]] = db_to_json(row["content"]) + for room_id, tag, content in rows: + room_tags = tags_by_room.setdefault(room_id, {}) + room_tags[tag] = db_to_json(content) return tags_by_room async def get_all_updated_tags( @@ -161,13 +164,16 @@ async def get_tags_for_room( Returns: A mapping of tags to tag content. """ - rows = await self.db_pool.simple_select_list( - table="room_tags", - keyvalues={"user_id": user_id, "room_id": room_id}, - retcols=("tag", "content"), - desc="get_tags_for_room", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="room_tags", + keyvalues={"user_id": user_id, "room_id": room_id}, + retcols=("tag", "content"), + desc="get_tags_for_room", + ), ) - return {row["tag"]: db_to_json(row["content"]) for row in rows} + return {tag: db_to_json(content) for tag, content in rows} async def add_tag_to_room( self, user_id: str, room_id: str, tag: str, content: JsonDict diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 5555b53575b2..64543b4d614f 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -183,39 +183,27 @@ async def get_scheduled_task(self, id: str) -> Optional[ScheduledTask]: Returns: the task if available, `None` otherwise """ - row = await self.db_pool.simple_select_one( - table="scheduled_tasks", - keyvalues={"id": id}, - retcols=( - "id", - "action", - "status", - "timestamp", - "resource_id", - "params", - "result", - "error", + row = cast( + Optional[ScheduledTaskRow], + await self.db_pool.simple_select_one( + table="scheduled_tasks", + keyvalues={"id": id}, + retcols=( + "id", + "action", + "status", + "timestamp", + "resource_id", + "params", + "result", + "error", + ), + allow_none=True, + desc="get_scheduled_task", ), - allow_none=True, - desc="get_scheduled_task", ) - return ( - TaskSchedulerWorkerStore._convert_row_to_task( - ( - row["id"], - row["action"], - row["status"], - row["timestamp"], - row["resource_id"], - row["params"], - row["result"], - row["error"], - ) - ) - if row - else None - ) + return TaskSchedulerWorkerStore._convert_row_to_task(row) if row else None async def delete_scheduled_task(self, id: str) -> None: """Delete a specific task from its id. diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index c4a647506013..2d341affaa48 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -118,19 +118,13 @@ def _get_received_txn_response( txn, table="received_transactions", keyvalues={"transaction_id": transaction_id, "origin": origin}, - retcols=( - "transaction_id", - "origin", - "ts", - "response_code", - "response_json", - "has_been_referenced", - ), + retcols=("response_code", "response_json"), allow_none=True, ) - if result and result["response_code"]: - return result["response_code"], db_to_json(result["response_json"]) + # If the result exists and the response code is non-0. + if result and result[0]: + return result[0], db_to_json(result[1]) else: return None @@ -200,8 +194,10 @@ def _get_destination_retry_timings( # check we have a row and retry_last_ts is not null or zero # (retry_last_ts can't be negative) - if result and result["retry_last_ts"]: - return DestinationRetryTimings(**result) + if result and result[1]: + return DestinationRetryTimings( + failure_ts=result[0], retry_last_ts=result[1], retry_interval=result[2] + ) else: return None @@ -478,7 +474,10 @@ async def get_destinations_paginate( destination: Optional[str] = None, order_by: str = DestinationSortOrder.DESTINATION.value, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[ + List[Tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]]], + int, + ]: """Function to retrieve a paginated list of destinations. This will return a json list of destinations and the total number of destinations matching the filter criteria. @@ -490,13 +489,23 @@ async def get_destinations_paginate( order_by: the sort order of the returned list direction: sort ascending or descending Returns: - A tuple of a list of mappings from destination to information + A tuple of a list of tuples of destination information: + * destination + * retry_last_ts + * retry_interval + * failure_ts + * last_successful_stream_ordering and a count of total destinations. """ def get_destinations_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[ + List[ + Tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]] + ], + int, + ]: order_by_column = DestinationSortOrder(order_by).value if direction == Direction.BACKWARDS: @@ -523,7 +532,14 @@ def get_destinations_paginate_txn( LIMIT ? OFFSET ? """ txn.execute(sql, args + [limit, start]) - destinations = self.db_pool.cursor_to_dict(txn) + destinations = cast( + List[ + Tuple[ + str, Optional[int], Optional[int], Optional[int], Optional[int] + ] + ], + txn.fetchall(), + ) return destinations, count return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 919c66f5530a..5b164fed8e30 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -122,9 +122,13 @@ async def get_ui_auth_session(self, session_id: str) -> UIAuthSessionData: desc="get_ui_auth_session", ) - result["clientdict"] = db_to_json(result["clientdict"]) - - return UIAuthSessionData(session_id, **result) + return UIAuthSessionData( + session_id, + clientdict=db_to_json(result[0]), + uri=result[1], + method=result[2], + description=result[3], + ) async def mark_ui_auth_stage_complete( self, @@ -169,13 +173,17 @@ async def get_completed_ui_auth_stages( that auth-type. """ results = {} - for row in await self.db_pool.simple_select_list( - table="ui_auth_sessions_credentials", - keyvalues={"session_id": session_id}, - retcols=("stage_type", "result"), - desc="get_completed_ui_auth_stages", - ): - results[row["stage_type"]] = db_to_json(row["result"]) + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="ui_auth_sessions_credentials", + keyvalues={"session_id": session_id}, + retcols=("stage_type", "result"), + desc="get_completed_ui_auth_stages", + ), + ) + for stage_type, result in rows: + results[stage_type] = db_to_json(result) return results @@ -227,18 +235,15 @@ def _set_ui_auth_session_data_txn( self, txn: LoggingTransaction, session_id: str, key: str, value: Any ) -> None: # Get the current value. - result = cast( - Dict[str, Any], - self.db_pool.simple_select_one_txn( - txn, - table="ui_auth_sessions", - keyvalues={"session_id": session_id}, - retcols=("serverdict",), - ), + result = self.db_pool.simple_select_one_onecol_txn( + txn, + table="ui_auth_sessions", + keyvalues={"session_id": session_id}, + retcol="serverdict", ) # Update it and add it back to the database. - serverdict = db_to_json(result["serverdict"]) + serverdict = db_to_json(result) serverdict[key] = value self.db_pool.simple_update_one_txn( @@ -261,14 +266,14 @@ async def get_ui_auth_session_data( Raises: StoreError if the session cannot be found. """ - result = await self.db_pool.simple_select_one( + result = await self.db_pool.simple_select_one_onecol( table="ui_auth_sessions", keyvalues={"session_id": session_id}, - retcols=("serverdict",), + retcol="serverdict", desc="get_ui_auth_session_data", ) - serverdict = db_to_json(result["serverdict"]) + serverdict = db_to_json(result) return serverdict.get(key, default) @@ -295,13 +300,15 @@ async def get_user_agents_ips_to_ui_auth_session( Returns: List of user_agent/ip pairs """ - rows = await self.db_pool.simple_select_list( - table="ui_auth_sessions_ips", - keyvalues={"session_id": session_id}, - retcols=("user_agent", "ip"), - desc="get_user_agents_ips_to_ui_auth_session", + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="ui_auth_sessions_ips", + keyvalues={"session_id": session_id}, + retcols=("user_agent", "ip"), + desc="get_user_agents_ips_to_ui_auth_session", + ), ) - return [(row["user_agent"], row["ip"]) for row in rows] async def delete_old_ui_auth_sessions(self, expiration_time: int) -> None: """ diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 23eb92c5149e..1a38f3d785d6 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -20,7 +20,6 @@ Collection, Iterable, List, - Mapping, Optional, Sequence, Set, @@ -833,13 +832,25 @@ def _delete_all_from_user_dir_txn(txn: LoggingTransaction) -> None: "delete_all_from_user_dir", _delete_all_from_user_dir_txn ) - async def _get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]: - return await self.db_pool.simple_select_one( - table="user_directory", - keyvalues={"user_id": user_id}, - retcols=("display_name", "avatar_url"), - allow_none=True, - desc="get_user_in_directory", + async def _get_user_in_directory( + self, user_id: str + ) -> Optional[Tuple[Optional[str], Optional[str]]]: + """ + Fetch the user information in the user directory. + + Returns: + None if the user is unknown, otherwise a tuple of display name and + avatar URL (both of which may be None). + """ + return cast( + Optional[Tuple[Optional[str], Optional[str]]], + await self.db_pool.simple_select_one( + table="user_directory", + keyvalues={"user_id": user_id}, + retcols=("display_name", "avatar_url"), + allow_none=True, + desc="get_user_in_directory", + ), ) async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> None: @@ -1145,15 +1156,19 @@ async def search_user_dir( raise Exception("Unrecognized database engine") results = cast( - List[UserProfile], - await self.db_pool.execute( - "search_user_dir", self.db_pool.cursor_to_dict, sql, *args - ), + List[Tuple[str, Optional[str], Optional[str]]], + await self.db_pool.execute("search_user_dir", sql, *args), ) limited = len(results) > limit - return {"limited": limited, "results": results[0:limit]} + return { + "limited": limited, + "results": [ + {"user_id": r[0], "display_name": r[1], "avatar_url": r[2]} + for r in results[0:limit] + ], + } def _filter_text_for_index(text: str) -> str: diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 6ff533a129bd..2c3151526db8 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -359,7 +359,6 @@ async def _background_deduplicate_state( if max_group is None: rows = await self.db_pool.execute( "_background_deduplicate_state", - None, "SELECT coalesce(max(id), 0) FROM state_groups", ) max_group = rows[0][0] @@ -493,7 +492,7 @@ def reindex_txn(conn: LoggingDatabaseConnection) -> None: conn.rollback() if isinstance(self.database_engine, PostgresEngine): # postgres insists on autocommit for the index - conn.set_session(autocommit=True) + conn.engine.attempt_to_set_autocommit(conn.conn, True) try: txn = conn.cursor() txn.execute( @@ -502,7 +501,7 @@ def reindex_txn(conn: LoggingDatabaseConnection) -> None: ) txn.execute("DROP INDEX IF EXISTS state_groups_state_id") finally: - conn.set_session(autocommit=False) + conn.engine.attempt_to_set_autocommit(conn.conn, False) else: txn = conn.cursor() txn.execute( diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 09d2a8c5b3f0..182e429174f9 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -154,16 +154,22 @@ def _get_state_group_delta_txn(txn: LoggingTransaction) -> _GetStateGroupDelta: if not prev_group: return _GetStateGroupDelta(None, None) - delta_ids = self.db_pool.simple_select_list_txn( - txn, - table="state_groups_state", - keyvalues={"state_group": state_group}, - retcols=("type", "state_key", "event_id"), + delta_ids = cast( + List[Tuple[str, str, str]], + self.db_pool.simple_select_list_txn( + txn, + table="state_groups_state", + keyvalues={"state_group": state_group}, + retcols=("type", "state_key", "event_id"), + ), ) return _GetStateGroupDelta( prev_group, - {(row["type"], row["state_key"]): row["event_id"] for row in delta_ids}, + { + (event_type, state_key): event_id + for event_type, state_key, event_id in delta_ids + }, ) return await self.db_pool.runInteraction( diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 6309363217a8..ec4c4041b700 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -38,7 +38,8 @@ def __init__(self, database_config: Mapping[str, Any]): super().__init__(psycopg2, database_config) psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) - # Disables passing `bytes` to txn.execute, c.f. #6186. If you do + # Disables passing `bytes` to txn.execute, c.f. + # https://github.com/matrix-org/synapse/issues/6186. If you do # actually want to use bytes than wrap it in `bytearray`. def _disable_bytes_adapter(_: bytes) -> NoReturn: raise Exception("Passing bytes to DB is disabled.") diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 5b50bd66bcb3..03e5a0f55d3b 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 82 # remember to update the list below when updating +SCHEMA_VERSION = 83 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -109,7 +109,8 @@ Changes in SCHEMA_VERSION = 79 - Add tables to handle in DB read-write locks. - - Add some mitigations for a painful race between foreground and background updates, cf #15677. + - Add some mitigations for a painful race between foreground and background updates, cf + https://github.com/matrix-org/synapse/issues/15677. Changes in SCHEMA_VERSION = 80 - The event_txn_id_device_id is always written to for new events. @@ -121,6 +122,9 @@ Changes in SCHEMA_VERSION = 82 - The insertion_events, insertion_event_extremities, insertion_event_edges, and batch_events tables are no longer purged in preparation for their removal. + +Changes in SCHEMA_VERSION = 83 + - The event_txn_id is no longer used. """ diff --git a/synapse/storage/schema/common/full_schemas/54/full.sql b/synapse/storage/schema/common/full_schemas/54/full.sql deleted file mode 100644 index 10058804667f..000000000000 --- a/synapse/storage/schema/common/full_schemas/54/full.sql +++ /dev/null @@ -1,8 +0,0 @@ - - -CREATE TABLE background_updates ( - update_name text NOT NULL, - progress_json text NOT NULL, - depends_on text, - CONSTRAINT background_updates_uniqueness UNIQUE (update_name) -); diff --git a/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql index b062ec840ce9..f713e42aa022 100644 --- a/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql @@ -14,7 +14,7 @@ */ -- Start a background job to cleanup extremities that were incorrectly added --- by bug #5269. +-- by bug https://github.com/matrix-org/synapse/issues/5269. INSERT INTO background_updates (update_name, progress_json) VALUES ('delete_soft_failed_extremities', '{}'); diff --git a/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql b/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql index aeb17813d3fa..246c3359f7a2 100644 --- a/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql +++ b/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql @@ -13,6 +13,7 @@ * limitations under the License. */ --- Now that #6232 is a thing, we can remove old rooms from the directory. +-- Now that https://github.com/matrix-org/synapse/pull/6232 is a thing, we can +-- remove old rooms from the directory. INSERT INTO background_updates (update_name, progress_json) VALUES ('remove_tombstoned_rooms_from_directory', '{}'); diff --git a/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql b/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql index aed79635b2a2..31a61defa710 100644 --- a/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql +++ b/synapse/storage/schema/main/delta/70/01clean_table_purged_rooms.sql @@ -13,7 +13,8 @@ * limitations under the License. */ --- Clean up left over rows from bug #11833, which was fixed in #12770. +-- Clean up left over rows from bug https://github.com/matrix-org/synapse/issues/11833, +-- which was fixed in https://github.com/matrix-org/synapse/pull/12770. DELETE FROM federation_inbound_events_staging WHERE room_id not in ( SELECT room_id FROM rooms ); diff --git a/synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres b/synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite similarity index 77% rename from synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres rename to synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite index fcd926c9fbbe..6c7ad0fd378e 100644 --- a/synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres +++ b/synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite @@ -1,4 +1,4 @@ -/* Copyright 2019 The Matrix.org Foundation C.I.C +/* Copyright 2023 The Matrix.org Foundation C.I.C * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,9 +13,5 @@ * limitations under the License. */ -CREATE SEQUENCE state_group_id_seq - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; +-- This already exists on Postgres. +ALTER TABLE receipts_linearized ADD COLUMN instance_name TEXT; diff --git a/synapse/storage/schema/main/full_schemas/16/profiles.sql b/synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql similarity index 76% rename from synapse/storage/schema/main/full_schemas/16/profiles.sql rename to synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql index c04f4747d94a..b74bdd71fab9 100644 --- a/synapse/storage/schema/main/full_schemas/16/profiles.sql +++ b/synapse/storage/schema/main/delta/83/05_cross_signing_key_update_grant.sql @@ -1,4 +1,4 @@ -/* Copyright 2014-2016 OpenMarket Ltd +/* Copyright 2023 The Matrix.org Foundation C.I.C * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -12,9 +12,4 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -CREATE TABLE IF NOT EXISTS profiles( - user_id TEXT NOT NULL, - displayname TEXT, - avatar_url TEXT, - UNIQUE(user_id) -); +ALTER TABLE e2e_cross_signing_keys ADD COLUMN updatable_without_uia_before_ms bigint DEFAULT NULL; \ No newline at end of file diff --git a/synapse/storage/schema/main/full_schemas/16/redactions.sql b/synapse/storage/schema/main/delta/83/06_event_push_summary_room.sql similarity index 61% rename from synapse/storage/schema/main/full_schemas/16/redactions.sql rename to synapse/storage/schema/main/delta/83/06_event_push_summary_room.sql index 318f0d9aa582..1aae1b7557d4 100644 --- a/synapse/storage/schema/main/full_schemas/16/redactions.sql +++ b/synapse/storage/schema/main/delta/83/06_event_push_summary_room.sql @@ -1,10 +1,10 @@ -/* Copyright 2014-2016 OpenMarket Ltd +/* Copyright 2023 The Matrix.org Foundation C.I.C * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -12,11 +12,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -CREATE TABLE IF NOT EXISTS redactions ( - event_id TEXT NOT NULL, - redacts TEXT NOT NULL, - UNIQUE (event_id) -); -CREATE INDEX redactions_event_id ON redactions (event_id); -CREATE INDEX redactions_redacts ON redactions (redacts); +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8306, 'event_push_summary_index_room_id', '{}'); diff --git a/synapse/storage/schema/main/full_schemas/16/application_services.sql b/synapse/storage/schema/main/full_schemas/16/application_services.sql deleted file mode 100644 index 883fcd10b21d..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/application_services.sql +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2015, 2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* We used to create tables called application_services and - * application_services_regex, but these are no longer used and are removed in - * delta 54. - */ - - -CREATE TABLE IF NOT EXISTS application_services_state( - as_id TEXT PRIMARY KEY, - state VARCHAR(5), - last_txn INTEGER -); - -CREATE TABLE IF NOT EXISTS application_services_txns( - as_id TEXT NOT NULL, - txn_id INTEGER NOT NULL, - event_ids TEXT NOT NULL, - UNIQUE(as_id, txn_id) -); - -CREATE INDEX application_services_txns_id ON application_services_txns ( - as_id -); diff --git a/synapse/storage/schema/main/full_schemas/16/event_edges.sql b/synapse/storage/schema/main/full_schemas/16/event_edges.sql deleted file mode 100644 index 10ce2aa7a04a..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/event_edges.sql +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* We used to create tables called event_destinations and - * state_forward_extremities, but these are no longer used and are removed in - * delta 54. - */ - -CREATE TABLE IF NOT EXISTS event_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - UNIQUE (event_id, room_id) -); - -CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id); -CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_backward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - UNIQUE (event_id, room_id) -); - -CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id); -CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_edges( - event_id TEXT NOT NULL, - prev_event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - is_state BOOL NOT NULL, -- true if this is a prev_state edge rather than a regular - -- event dag edge. - UNIQUE (event_id, prev_event_id, room_id, is_state) -); - -CREATE INDEX ev_edges_id ON event_edges(event_id); -CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id); - - -CREATE TABLE IF NOT EXISTS room_depth( - room_id TEXT NOT NULL, - min_depth INTEGER NOT NULL, - UNIQUE (room_id) -); - -CREATE INDEX room_depth_room ON room_depth(room_id); - -CREATE TABLE IF NOT EXISTS event_auth( - event_id TEXT NOT NULL, - auth_id TEXT NOT NULL, - room_id TEXT NOT NULL, - UNIQUE (event_id, auth_id, room_id) -); - -CREATE INDEX evauth_edges_id ON event_auth(event_id); -CREATE INDEX evauth_edges_auth_id ON event_auth(auth_id); diff --git a/synapse/storage/schema/main/full_schemas/16/event_signatures.sql b/synapse/storage/schema/main/full_schemas/16/event_signatures.sql deleted file mode 100644 index 95826da43168..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/event_signatures.sql +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - /* We used to create tables called event_content_hashes and event_edge_hashes, - * but these are no longer used and are removed in delta 54. - */ - -CREATE TABLE IF NOT EXISTS event_reference_hashes ( - event_id TEXT, - algorithm TEXT, - hash bytea, - UNIQUE (event_id, algorithm) -); - -CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id); - - -CREATE TABLE IF NOT EXISTS event_signatures ( - event_id TEXT, - signature_name TEXT, - key_id TEXT, - signature bytea, - UNIQUE (event_id, signature_name, key_id) -); - -CREATE INDEX event_signatures_id ON event_signatures(event_id); diff --git a/synapse/storage/schema/main/full_schemas/16/im.sql b/synapse/storage/schema/main/full_schemas/16/im.sql deleted file mode 100644 index a1a2aa8e5b5f..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/im.sql +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* We used to create tables called room_hosts and feedback, - * but these are no longer used and are removed in delta 54. - */ - -CREATE TABLE IF NOT EXISTS events( - stream_ordering INTEGER PRIMARY KEY, - topological_ordering BIGINT NOT NULL, - event_id TEXT NOT NULL, - type TEXT NOT NULL, - room_id TEXT NOT NULL, - - -- 'content' used to be created NULLable, but as of delta 50 we drop that constraint. - -- the hack we use to drop the constraint doesn't work for an in-memory sqlite - -- database, which breaks the sytests. Hence, we no longer make it nullable. - content TEXT, - - unrecognized_keys TEXT, - processed BOOL NOT NULL, - outlier BOOL NOT NULL, - depth BIGINT DEFAULT 0 NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX events_stream_ordering ON events (stream_ordering); -CREATE INDEX events_topological_ordering ON events (topological_ordering); -CREATE INDEX events_order ON events (topological_ordering, stream_ordering); -CREATE INDEX events_room_id ON events (room_id); -CREATE INDEX events_order_room ON events ( - room_id, topological_ordering, stream_ordering -); - - -CREATE TABLE IF NOT EXISTS event_json( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - internal_metadata TEXT NOT NULL, - json TEXT NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX event_json_room_id ON event_json(room_id); - - -CREATE TABLE IF NOT EXISTS state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - prev_state TEXT, - UNIQUE (event_id) -); - -CREATE INDEX state_events_room_id ON state_events (room_id); -CREATE INDEX state_events_type ON state_events (type); -CREATE INDEX state_events_state_key ON state_events (state_key); - - -CREATE TABLE IF NOT EXISTS current_state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - UNIQUE (event_id), - UNIQUE (room_id, type, state_key) -); - -CREATE INDEX current_state_events_room_id ON current_state_events (room_id); -CREATE INDEX current_state_events_type ON current_state_events (type); -CREATE INDEX current_state_events_state_key ON current_state_events (state_key); - -CREATE TABLE IF NOT EXISTS room_memberships( - event_id TEXT NOT NULL, - user_id TEXT NOT NULL, - sender TEXT NOT NULL, - room_id TEXT NOT NULL, - membership TEXT NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX room_memberships_room_id ON room_memberships (room_id); -CREATE INDEX room_memberships_user_id ON room_memberships (user_id); - -CREATE TABLE IF NOT EXISTS topics( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - topic TEXT NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX topics_room_id ON topics(room_id); - -CREATE TABLE IF NOT EXISTS room_names( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - name TEXT NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX room_names_room_id ON room_names(room_id); - -CREATE TABLE IF NOT EXISTS rooms( - room_id TEXT PRIMARY KEY NOT NULL, - is_public BOOL, - creator TEXT -); diff --git a/synapse/storage/schema/main/full_schemas/16/keys.sql b/synapse/storage/schema/main/full_schemas/16/keys.sql deleted file mode 100644 index 11cdffdbb3ae..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/keys.sql +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- we used to create a table called server_tls_certificates, but this is no --- longer used, and is removed in delta 54. - -CREATE TABLE IF NOT EXISTS server_signature_keys( - server_name TEXT, -- Server name. - key_id TEXT, -- Key version. - from_server TEXT, -- Which key server the key was fetched form. - ts_added_ms BIGINT, -- When the key was added. - verify_key bytea, -- NACL verification key. - UNIQUE (server_name, key_id) -); diff --git a/synapse/storage/schema/main/full_schemas/16/media_repository.sql b/synapse/storage/schema/main/full_schemas/16/media_repository.sql deleted file mode 100644 index 8f3759bb2a6f..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/media_repository.sql +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS local_media_repository ( - media_id TEXT, -- The id used to refer to the media. - media_type TEXT, -- The MIME-type of the media. - media_length INTEGER, -- Length of the media in bytes. - created_ts BIGINT, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - user_id TEXT, -- The user who uploaded the file. - UNIQUE (media_id) -); - -CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_method TEXT, -- The method used to make the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - UNIQUE ( - media_id, thumbnail_width, thumbnail_height, thumbnail_type - ) -); - -CREATE INDEX local_media_repository_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); - -CREATE TABLE IF NOT EXISTS remote_media_cache ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media on that server. - media_type TEXT, -- The MIME-type of the media. - created_ts BIGINT, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - media_length INTEGER, -- Length of the media in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - UNIQUE (media_origin, media_id) -); - -CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_method TEXT, -- The method used to make the thumbnail - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - UNIQUE ( - media_origin, media_id, thumbnail_width, thumbnail_height, - thumbnail_type - ) -); - -CREATE INDEX remote_media_cache_thumbnails_media_id - ON remote_media_cache_thumbnails (media_id); diff --git a/synapse/storage/schema/main/full_schemas/16/presence.sql b/synapse/storage/schema/main/full_schemas/16/presence.sql deleted file mode 100644 index 01d2d8f833c4..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/presence.sql +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS presence( - user_id TEXT NOT NULL, - state VARCHAR(20), - status_msg TEXT, - mtime BIGINT, -- miliseconds since last state change - UNIQUE (user_id) -); - --- For each of /my/ users which possibly-remote users are allowed to see their --- presence state -CREATE TABLE IF NOT EXISTS presence_allow_inbound( - observed_user_id TEXT NOT NULL, - observer_user_id TEXT NOT NULL, -- a UserID, - UNIQUE (observed_user_id, observer_user_id) -); - --- We used to create a table called presence_list, but this is no longer used --- and is removed in delta 54. \ No newline at end of file diff --git a/synapse/storage/schema/main/full_schemas/16/push.sql b/synapse/storage/schema/main/full_schemas/16/push.sql deleted file mode 100644 index e44465cf45bd..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/push.sql +++ /dev/null @@ -1,74 +0,0 @@ -/* Copyright 2015, 2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS rejections( - event_id TEXT NOT NULL, - reason TEXT NOT NULL, - last_check TEXT NOT NULL, - UNIQUE (event_id) -); - --- Push notification endpoints that users have configured -CREATE TABLE IF NOT EXISTS pushers ( - id BIGINT PRIMARY KEY, - user_name TEXT NOT NULL, - access_token BIGINT DEFAULT NULL, - profile_tag VARCHAR(32) NOT NULL, - kind VARCHAR(8) NOT NULL, - app_id VARCHAR(64) NOT NULL, - app_display_name VARCHAR(64) NOT NULL, - device_display_name VARCHAR(128) NOT NULL, - pushkey bytea NOT NULL, - ts BIGINT NOT NULL, - lang VARCHAR(8), - data bytea, - last_token TEXT, - last_success BIGINT, - failing_since BIGINT, - UNIQUE (app_id, pushkey) -); - -CREATE TABLE IF NOT EXISTS push_rules ( - id BIGINT PRIMARY KEY, - user_name TEXT NOT NULL, - rule_id TEXT NOT NULL, - priority_class SMALLINT NOT NULL, - priority INTEGER NOT NULL DEFAULT 0, - conditions TEXT NOT NULL, - actions TEXT NOT NULL, - UNIQUE(user_name, rule_id) -); - -CREATE INDEX push_rules_user_name on push_rules (user_name); - -CREATE TABLE IF NOT EXISTS user_filters( - user_id TEXT, - filter_id BIGINT, - filter_json bytea -); - -CREATE INDEX user_filters_by_user_id_filter_id ON user_filters( - user_id, filter_id -); - -CREATE TABLE IF NOT EXISTS push_rules_enable ( - id BIGINT PRIMARY KEY, - user_name TEXT NOT NULL, - rule_id TEXT NOT NULL, - enabled SMALLINT, - UNIQUE(user_name, rule_id) -); - -CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name); diff --git a/synapse/storage/schema/main/full_schemas/16/room_aliases.sql b/synapse/storage/schema/main/full_schemas/16/room_aliases.sql deleted file mode 100644 index d47da3b12fc1..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/room_aliases.sql +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS room_aliases( - room_alias TEXT NOT NULL, - room_id TEXT NOT NULL, - UNIQUE (room_alias) -); - -CREATE INDEX room_aliases_id ON room_aliases(room_id); - -CREATE TABLE IF NOT EXISTS room_alias_servers( - room_alias TEXT NOT NULL, - server TEXT NOT NULL -); - -CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias); diff --git a/synapse/storage/schema/main/full_schemas/16/state.sql b/synapse/storage/schema/main/full_schemas/16/state.sql deleted file mode 100644 index 96391a8f0e1c..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/state.sql +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS state_groups( - id BIGINT PRIMARY KEY, - room_id TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS state_groups_state( - state_group BIGINT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS event_to_state_groups( - event_id TEXT NOT NULL, - state_group BIGINT NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX state_groups_id ON state_groups(id); - -CREATE INDEX state_groups_state_id ON state_groups_state(state_group); -CREATE INDEX state_groups_state_tuple ON state_groups_state(room_id, type, state_key); -CREATE INDEX event_to_state_groups_id ON event_to_state_groups(event_id); diff --git a/synapse/storage/schema/main/full_schemas/16/transactions.sql b/synapse/storage/schema/main/full_schemas/16/transactions.sql deleted file mode 100644 index 17e67bedacb0..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/transactions.sql +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Stores what transaction ids we have received and what our response was -CREATE TABLE IF NOT EXISTS received_transactions( - transaction_id TEXT, - origin TEXT, - ts BIGINT, - response_code INTEGER, - response_json bytea, - has_been_referenced smallint default 0, -- Whether thishas been referenced by a prev_tx - UNIQUE (transaction_id, origin) -); - -CREATE INDEX transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0; - --- For sent transactions only. -CREATE TABLE IF NOT EXISTS transaction_id_to_pdu( - transaction_id INTEGER, - destination TEXT, - pdu_id TEXT, - pdu_origin TEXT, - UNIQUE (transaction_id, destination) -); - -CREATE INDEX transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination); - --- To track destination health -CREATE TABLE IF NOT EXISTS destinations( - destination TEXT PRIMARY KEY, - retry_last_ts BIGINT, - retry_interval INTEGER -); diff --git a/synapse/storage/schema/main/full_schemas/16/users.sql b/synapse/storage/schema/main/full_schemas/16/users.sql deleted file mode 100644 index f013aa8b18eb..000000000000 --- a/synapse/storage/schema/main/full_schemas/16/users.sql +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS users( - name TEXT, - password_hash TEXT, - creation_ts BIGINT, - admin SMALLINT DEFAULT 0 NOT NULL, - UNIQUE(name) -); - -CREATE TABLE IF NOT EXISTS access_tokens( - id BIGINT PRIMARY KEY, - user_id TEXT NOT NULL, - device_id TEXT, - token TEXT NOT NULL, - last_used BIGINT, - UNIQUE(token) -); - -CREATE TABLE IF NOT EXISTS user_ips ( - user_id TEXT NOT NULL, - access_token TEXT NOT NULL, - device_id TEXT, - ip TEXT NOT NULL, - user_agent TEXT NOT NULL, - last_seen BIGINT NOT NULL -); - -CREATE INDEX user_ips_user ON user_ips(user_id); -CREATE INDEX user_ips_user_ip ON user_ips(user_id, access_token, ip); diff --git a/synapse/storage/schema/main/full_schemas/54/full.sql.postgres b/synapse/storage/schema/main/full_schemas/54/full.sql.postgres deleted file mode 100644 index 889a9a0ce4c4..000000000000 --- a/synapse/storage/schema/main/full_schemas/54/full.sql.postgres +++ /dev/null @@ -1,1983 +0,0 @@ - - - - - -CREATE TABLE access_tokens ( - id bigint NOT NULL, - user_id text NOT NULL, - device_id text, - token text NOT NULL, - last_used bigint -); - - - -CREATE TABLE account_data ( - user_id text NOT NULL, - account_data_type text NOT NULL, - stream_id bigint NOT NULL, - content text NOT NULL -); - - - -CREATE TABLE account_data_max_stream_id ( - lock character(1) DEFAULT 'X'::bpchar NOT NULL, - stream_id bigint NOT NULL, - CONSTRAINT private_user_data_max_stream_id_lock_check CHECK ((lock = 'X'::bpchar)) -); - - - -CREATE TABLE account_validity ( - user_id text NOT NULL, - expiration_ts_ms bigint NOT NULL, - email_sent boolean NOT NULL, - renewal_token text -); - - - -CREATE TABLE application_services_state ( - as_id text NOT NULL, - state character varying(5), - last_txn integer -); - - - -CREATE TABLE application_services_txns ( - as_id text NOT NULL, - txn_id integer NOT NULL, - event_ids text NOT NULL -); - - - -CREATE TABLE appservice_room_list ( - appservice_id text NOT NULL, - network_id text NOT NULL, - room_id text NOT NULL -); - - - -CREATE TABLE appservice_stream_position ( - lock character(1) DEFAULT 'X'::bpchar NOT NULL, - stream_ordering bigint, - CONSTRAINT appservice_stream_position_lock_check CHECK ((lock = 'X'::bpchar)) -); - - -CREATE TABLE blocked_rooms ( - room_id text NOT NULL, - user_id text NOT NULL -); - - - -CREATE TABLE cache_invalidation_stream ( - stream_id bigint, - cache_func text, - keys text[], - invalidation_ts bigint -); - - - -CREATE TABLE current_state_delta_stream ( - stream_id bigint NOT NULL, - room_id text NOT NULL, - type text NOT NULL, - state_key text NOT NULL, - event_id text, - prev_event_id text -); - - - -CREATE TABLE current_state_events ( - event_id text NOT NULL, - room_id text NOT NULL, - type text NOT NULL, - state_key text NOT NULL -); - - - -CREATE TABLE deleted_pushers ( - stream_id bigint NOT NULL, - app_id text NOT NULL, - pushkey text NOT NULL, - user_id text NOT NULL -); - - - -CREATE TABLE destinations ( - destination text NOT NULL, - retry_last_ts bigint, - retry_interval integer -); - - - -CREATE TABLE device_federation_inbox ( - origin text NOT NULL, - message_id text NOT NULL, - received_ts bigint NOT NULL -); - - - -CREATE TABLE device_federation_outbox ( - destination text NOT NULL, - stream_id bigint NOT NULL, - queued_ts bigint NOT NULL, - messages_json text NOT NULL -); - - - -CREATE TABLE device_inbox ( - user_id text NOT NULL, - device_id text NOT NULL, - stream_id bigint NOT NULL, - message_json text NOT NULL -); - - - -CREATE TABLE device_lists_outbound_last_success ( - destination text NOT NULL, - user_id text NOT NULL, - stream_id bigint NOT NULL -); - - - -CREATE TABLE device_lists_outbound_pokes ( - destination text NOT NULL, - stream_id bigint NOT NULL, - user_id text NOT NULL, - device_id text NOT NULL, - sent boolean NOT NULL, - ts bigint NOT NULL -); - - - -CREATE TABLE device_lists_remote_cache ( - user_id text NOT NULL, - device_id text NOT NULL, - content text NOT NULL -); - - - -CREATE TABLE device_lists_remote_extremeties ( - user_id text NOT NULL, - stream_id text NOT NULL -); - - - -CREATE TABLE device_lists_stream ( - stream_id bigint NOT NULL, - user_id text NOT NULL, - device_id text NOT NULL -); - - - -CREATE TABLE device_max_stream_id ( - stream_id bigint NOT NULL -); - - - -CREATE TABLE devices ( - user_id text NOT NULL, - device_id text NOT NULL, - display_name text -); - - - -CREATE TABLE e2e_device_keys_json ( - user_id text NOT NULL, - device_id text NOT NULL, - ts_added_ms bigint NOT NULL, - key_json text NOT NULL -); - - - -CREATE TABLE e2e_one_time_keys_json ( - user_id text NOT NULL, - device_id text NOT NULL, - algorithm text NOT NULL, - key_id text NOT NULL, - ts_added_ms bigint NOT NULL, - key_json text NOT NULL -); - - - -CREATE TABLE e2e_room_keys ( - user_id text NOT NULL, - room_id text NOT NULL, - session_id text NOT NULL, - version bigint NOT NULL, - first_message_index integer, - forwarded_count integer, - is_verified boolean, - session_data text NOT NULL -); - - - -CREATE TABLE e2e_room_keys_versions ( - user_id text NOT NULL, - version bigint NOT NULL, - algorithm text NOT NULL, - auth_data text NOT NULL, - deleted smallint DEFAULT 0 NOT NULL -); - - - -CREATE TABLE erased_users ( - user_id text NOT NULL -); - - - -CREATE TABLE event_auth ( - event_id text NOT NULL, - auth_id text NOT NULL, - room_id text NOT NULL -); - - - -CREATE TABLE event_backward_extremities ( - event_id text NOT NULL, - room_id text NOT NULL -); - - - -CREATE TABLE event_edges ( - event_id text NOT NULL, - prev_event_id text NOT NULL, - room_id text NOT NULL, - is_state boolean NOT NULL -); - - - -CREATE TABLE event_forward_extremities ( - event_id text NOT NULL, - room_id text NOT NULL -); - - - -CREATE TABLE event_json ( - event_id text NOT NULL, - room_id text NOT NULL, - internal_metadata text NOT NULL, - json text NOT NULL, - format_version integer -); - - - -CREATE TABLE event_push_actions ( - room_id text NOT NULL, - event_id text NOT NULL, - user_id text NOT NULL, - profile_tag character varying(32), - actions text NOT NULL, - topological_ordering bigint, - stream_ordering bigint, - notif smallint, - highlight smallint -); - - - -CREATE TABLE event_push_actions_staging ( - event_id text NOT NULL, - user_id text NOT NULL, - actions text NOT NULL, - notif smallint NOT NULL, - highlight smallint NOT NULL -); - - - -CREATE TABLE event_push_summary ( - user_id text NOT NULL, - room_id text NOT NULL, - notif_count bigint NOT NULL, - stream_ordering bigint NOT NULL -); - - - -CREATE TABLE event_push_summary_stream_ordering ( - lock character(1) DEFAULT 'X'::bpchar NOT NULL, - stream_ordering bigint NOT NULL, - CONSTRAINT event_push_summary_stream_ordering_lock_check CHECK ((lock = 'X'::bpchar)) -); - - - -CREATE TABLE event_reference_hashes ( - event_id text, - algorithm text, - hash bytea -); - - - -CREATE TABLE event_relations ( - event_id text NOT NULL, - relates_to_id text NOT NULL, - relation_type text NOT NULL, - aggregation_key text -); - - - -CREATE TABLE event_reports ( - id bigint NOT NULL, - received_ts bigint NOT NULL, - room_id text NOT NULL, - event_id text NOT NULL, - user_id text NOT NULL, - reason text, - content text -); - - - -CREATE TABLE event_search ( - event_id text, - room_id text, - sender text, - key text, - vector tsvector, - origin_server_ts bigint, - stream_ordering bigint -); - - - -CREATE TABLE event_to_state_groups ( - event_id text NOT NULL, - state_group bigint NOT NULL -); - - - -CREATE TABLE events ( - stream_ordering integer NOT NULL, - topological_ordering bigint NOT NULL, - event_id text NOT NULL, - type text NOT NULL, - room_id text NOT NULL, - content text, - unrecognized_keys text, - processed boolean NOT NULL, - outlier boolean NOT NULL, - depth bigint DEFAULT 0 NOT NULL, - origin_server_ts bigint, - received_ts bigint, - sender text, - contains_url boolean -); - - - -CREATE TABLE ex_outlier_stream ( - event_stream_ordering bigint NOT NULL, - event_id text NOT NULL, - state_group bigint NOT NULL -); - - - -CREATE TABLE federation_stream_position ( - type text NOT NULL, - stream_id integer NOT NULL -); - - - -CREATE TABLE group_attestations_remote ( - group_id text NOT NULL, - user_id text NOT NULL, - valid_until_ms bigint NOT NULL, - attestation_json text NOT NULL -); - - - -CREATE TABLE group_attestations_renewals ( - group_id text NOT NULL, - user_id text NOT NULL, - valid_until_ms bigint NOT NULL -); - - - -CREATE TABLE group_invites ( - group_id text NOT NULL, - user_id text NOT NULL -); - - - -CREATE TABLE group_roles ( - group_id text NOT NULL, - role_id text NOT NULL, - profile text NOT NULL, - is_public boolean NOT NULL -); - - - -CREATE TABLE group_room_categories ( - group_id text NOT NULL, - category_id text NOT NULL, - profile text NOT NULL, - is_public boolean NOT NULL -); - - - -CREATE TABLE group_rooms ( - group_id text NOT NULL, - room_id text NOT NULL, - is_public boolean NOT NULL -); - - - -CREATE TABLE group_summary_roles ( - group_id text NOT NULL, - role_id text NOT NULL, - role_order bigint NOT NULL, - CONSTRAINT group_summary_roles_role_order_check CHECK ((role_order > 0)) -); - - - -CREATE TABLE group_summary_room_categories ( - group_id text NOT NULL, - category_id text NOT NULL, - cat_order bigint NOT NULL, - CONSTRAINT group_summary_room_categories_cat_order_check CHECK ((cat_order > 0)) -); - - - -CREATE TABLE group_summary_rooms ( - group_id text NOT NULL, - room_id text NOT NULL, - category_id text NOT NULL, - room_order bigint NOT NULL, - is_public boolean NOT NULL, - CONSTRAINT group_summary_rooms_room_order_check CHECK ((room_order > 0)) -); - - - -CREATE TABLE group_summary_users ( - group_id text NOT NULL, - user_id text NOT NULL, - role_id text NOT NULL, - user_order bigint NOT NULL, - is_public boolean NOT NULL -); - - - -CREATE TABLE group_users ( - group_id text NOT NULL, - user_id text NOT NULL, - is_admin boolean NOT NULL, - is_public boolean NOT NULL -); - - - -CREATE TABLE groups ( - group_id text NOT NULL, - name text, - avatar_url text, - short_description text, - long_description text, - is_public boolean NOT NULL, - join_policy text DEFAULT 'invite'::text NOT NULL -); - - - -CREATE TABLE guest_access ( - event_id text NOT NULL, - room_id text NOT NULL, - guest_access text NOT NULL -); - - - -CREATE TABLE history_visibility ( - event_id text NOT NULL, - room_id text NOT NULL, - history_visibility text NOT NULL -); - - - -CREATE TABLE local_group_membership ( - group_id text NOT NULL, - user_id text NOT NULL, - is_admin boolean NOT NULL, - membership text NOT NULL, - is_publicised boolean NOT NULL, - content text NOT NULL -); - - - -CREATE TABLE local_group_updates ( - stream_id bigint NOT NULL, - group_id text NOT NULL, - user_id text NOT NULL, - type text NOT NULL, - content text NOT NULL -); - - - -CREATE TABLE local_invites ( - stream_id bigint NOT NULL, - inviter text NOT NULL, - invitee text NOT NULL, - event_id text NOT NULL, - room_id text NOT NULL, - locally_rejected text, - replaced_by text -); - - - -CREATE TABLE local_media_repository ( - media_id text, - media_type text, - media_length integer, - created_ts bigint, - upload_name text, - user_id text, - quarantined_by text, - url_cache text, - last_access_ts bigint -); - - - -CREATE TABLE local_media_repository_thumbnails ( - media_id text, - thumbnail_width integer, - thumbnail_height integer, - thumbnail_type text, - thumbnail_method text, - thumbnail_length integer -); - - - -CREATE TABLE local_media_repository_url_cache ( - url text, - response_code integer, - etag text, - expires_ts bigint, - og text, - media_id text, - download_ts bigint -); - - - -CREATE TABLE monthly_active_users ( - user_id text NOT NULL, - "timestamp" bigint NOT NULL -); - - - -CREATE TABLE open_id_tokens ( - token text NOT NULL, - ts_valid_until_ms bigint NOT NULL, - user_id text NOT NULL -); - - - -CREATE TABLE presence ( - user_id text NOT NULL, - state character varying(20), - status_msg text, - mtime bigint -); - - - -CREATE TABLE presence_allow_inbound ( - observed_user_id text NOT NULL, - observer_user_id text NOT NULL -); - - - -CREATE TABLE presence_stream ( - stream_id bigint, - user_id text, - state text, - last_active_ts bigint, - last_federation_update_ts bigint, - last_user_sync_ts bigint, - status_msg text, - currently_active boolean -); - - - -CREATE TABLE profiles ( - user_id text NOT NULL, - displayname text, - avatar_url text -); - - - -CREATE TABLE public_room_list_stream ( - stream_id bigint NOT NULL, - room_id text NOT NULL, - visibility boolean NOT NULL, - appservice_id text, - network_id text -); - - - -CREATE TABLE push_rules ( - id bigint NOT NULL, - user_name text NOT NULL, - rule_id text NOT NULL, - priority_class smallint NOT NULL, - priority integer DEFAULT 0 NOT NULL, - conditions text NOT NULL, - actions text NOT NULL -); - - - -CREATE TABLE push_rules_enable ( - id bigint NOT NULL, - user_name text NOT NULL, - rule_id text NOT NULL, - enabled smallint -); - - - -CREATE TABLE push_rules_stream ( - stream_id bigint NOT NULL, - event_stream_ordering bigint NOT NULL, - user_id text NOT NULL, - rule_id text NOT NULL, - op text NOT NULL, - priority_class smallint, - priority integer, - conditions text, - actions text -); - - - -CREATE TABLE pusher_throttle ( - pusher bigint NOT NULL, - room_id text NOT NULL, - last_sent_ts bigint, - throttle_ms bigint -); - - - -CREATE TABLE pushers ( - id bigint NOT NULL, - user_name text NOT NULL, - access_token bigint, - profile_tag text NOT NULL, - kind text NOT NULL, - app_id text NOT NULL, - app_display_name text NOT NULL, - device_display_name text NOT NULL, - pushkey text NOT NULL, - ts bigint NOT NULL, - lang text, - data text, - last_stream_ordering integer, - last_success bigint, - failing_since bigint -); - - - -CREATE TABLE ratelimit_override ( - user_id text NOT NULL, - messages_per_second bigint, - burst_count bigint -); - - - -CREATE TABLE receipts_graph ( - room_id text NOT NULL, - receipt_type text NOT NULL, - user_id text NOT NULL, - event_ids text NOT NULL, - data text NOT NULL -); - - - -CREATE TABLE receipts_linearized ( - stream_id bigint NOT NULL, - room_id text NOT NULL, - receipt_type text NOT NULL, - user_id text NOT NULL, - event_id text NOT NULL, - data text NOT NULL -); - - - -CREATE TABLE received_transactions ( - transaction_id text, - origin text, - ts bigint, - response_code integer, - response_json bytea, - has_been_referenced smallint DEFAULT 0 -); - - - -CREATE TABLE redactions ( - event_id text NOT NULL, - redacts text NOT NULL -); - - - -CREATE TABLE rejections ( - event_id text NOT NULL, - reason text NOT NULL, - last_check text NOT NULL -); - - - -CREATE TABLE remote_media_cache ( - media_origin text, - media_id text, - media_type text, - created_ts bigint, - upload_name text, - media_length integer, - filesystem_id text, - last_access_ts bigint, - quarantined_by text -); - - - -CREATE TABLE remote_media_cache_thumbnails ( - media_origin text, - media_id text, - thumbnail_width integer, - thumbnail_height integer, - thumbnail_method text, - thumbnail_type text, - thumbnail_length integer, - filesystem_id text -); - - - -CREATE TABLE remote_profile_cache ( - user_id text NOT NULL, - displayname text, - avatar_url text, - last_check bigint NOT NULL -); - - - -CREATE TABLE room_account_data ( - user_id text NOT NULL, - room_id text NOT NULL, - account_data_type text NOT NULL, - stream_id bigint NOT NULL, - content text NOT NULL -); - - - -CREATE TABLE room_alias_servers ( - room_alias text NOT NULL, - server text NOT NULL -); - - - -CREATE TABLE room_aliases ( - room_alias text NOT NULL, - room_id text NOT NULL, - creator text -); - - - -CREATE TABLE room_depth ( - room_id text NOT NULL, - min_depth integer NOT NULL -); - - - -CREATE TABLE room_memberships ( - event_id text NOT NULL, - user_id text NOT NULL, - sender text NOT NULL, - room_id text NOT NULL, - membership text NOT NULL, - forgotten integer DEFAULT 0, - display_name text, - avatar_url text -); - - - -CREATE TABLE room_names ( - event_id text NOT NULL, - room_id text NOT NULL, - name text NOT NULL -); - - - -CREATE TABLE room_state ( - room_id text NOT NULL, - join_rules text, - history_visibility text, - encryption text, - name text, - topic text, - avatar text, - canonical_alias text -); - - - -CREATE TABLE room_stats ( - room_id text NOT NULL, - ts bigint NOT NULL, - bucket_size integer NOT NULL, - current_state_events integer NOT NULL, - joined_members integer NOT NULL, - invited_members integer NOT NULL, - left_members integer NOT NULL, - banned_members integer NOT NULL, - state_events integer NOT NULL -); - - - -CREATE TABLE room_stats_earliest_token ( - room_id text NOT NULL, - token bigint NOT NULL -); - - - -CREATE TABLE room_tags ( - user_id text NOT NULL, - room_id text NOT NULL, - tag text NOT NULL, - content text NOT NULL -); - - - -CREATE TABLE room_tags_revisions ( - user_id text NOT NULL, - room_id text NOT NULL, - stream_id bigint NOT NULL -); - - - -CREATE TABLE rooms ( - room_id text NOT NULL, - is_public boolean, - creator text -); - - - -CREATE TABLE server_keys_json ( - server_name text NOT NULL, - key_id text NOT NULL, - from_server text NOT NULL, - ts_added_ms bigint NOT NULL, - ts_valid_until_ms bigint NOT NULL, - key_json bytea NOT NULL -); - - - -CREATE TABLE server_signature_keys ( - server_name text, - key_id text, - from_server text, - ts_added_ms bigint, - verify_key bytea, - ts_valid_until_ms bigint -); - - - -CREATE TABLE state_events ( - event_id text NOT NULL, - room_id text NOT NULL, - type text NOT NULL, - state_key text NOT NULL, - prev_state text -); - - - -CREATE TABLE stats_stream_pos ( - lock character(1) DEFAULT 'X'::bpchar NOT NULL, - stream_id bigint, - CONSTRAINT stats_stream_pos_lock_check CHECK ((lock = 'X'::bpchar)) -); - - - -CREATE TABLE stream_ordering_to_exterm ( - stream_ordering bigint NOT NULL, - room_id text NOT NULL, - event_id text NOT NULL -); - - - -CREATE TABLE threepid_guest_access_tokens ( - medium text, - address text, - guest_access_token text, - first_inviter text -); - - - -CREATE TABLE topics ( - event_id text NOT NULL, - room_id text NOT NULL, - topic text NOT NULL -); - - - -CREATE TABLE user_daily_visits ( - user_id text NOT NULL, - device_id text, - "timestamp" bigint NOT NULL -); - - - -CREATE TABLE user_directory ( - user_id text NOT NULL, - room_id text, - display_name text, - avatar_url text -); - - - -CREATE TABLE user_directory_search ( - user_id text NOT NULL, - vector tsvector -); - - - -CREATE TABLE user_directory_stream_pos ( - lock character(1) DEFAULT 'X'::bpchar NOT NULL, - stream_id bigint, - CONSTRAINT user_directory_stream_pos_lock_check CHECK ((lock = 'X'::bpchar)) -); - - - -CREATE TABLE user_filters ( - user_id text, - filter_id bigint, - filter_json bytea -); - - - -CREATE TABLE user_ips ( - user_id text NOT NULL, - access_token text NOT NULL, - device_id text, - ip text NOT NULL, - user_agent text NOT NULL, - last_seen bigint NOT NULL -); - - - -CREATE TABLE user_stats ( - user_id text NOT NULL, - ts bigint NOT NULL, - bucket_size integer NOT NULL, - public_rooms integer NOT NULL, - private_rooms integer NOT NULL -); - - - -CREATE TABLE user_threepid_id_server ( - user_id text NOT NULL, - medium text NOT NULL, - address text NOT NULL, - id_server text NOT NULL -); - - - -CREATE TABLE user_threepids ( - user_id text NOT NULL, - medium text NOT NULL, - address text NOT NULL, - validated_at bigint NOT NULL, - added_at bigint NOT NULL -); - - - -CREATE TABLE users ( - name text, - password_hash text, - creation_ts bigint, - admin smallint DEFAULT 0 NOT NULL, - upgrade_ts bigint, - is_guest smallint DEFAULT 0 NOT NULL, - appservice_id text, - consent_version text, - consent_server_notice_sent text, - user_type text -); - - - -CREATE TABLE users_in_public_rooms ( - user_id text NOT NULL, - room_id text NOT NULL -); - - - -CREATE TABLE users_pending_deactivation ( - user_id text NOT NULL -); - - - -CREATE TABLE users_who_share_private_rooms ( - user_id text NOT NULL, - other_user_id text NOT NULL, - room_id text NOT NULL -); - - - -ALTER TABLE ONLY access_tokens - ADD CONSTRAINT access_tokens_pkey PRIMARY KEY (id); - - - -ALTER TABLE ONLY access_tokens - ADD CONSTRAINT access_tokens_token_key UNIQUE (token); - - - -ALTER TABLE ONLY account_data - ADD CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type); - - - -ALTER TABLE ONLY account_validity - ADD CONSTRAINT account_validity_pkey PRIMARY KEY (user_id); - - - -ALTER TABLE ONLY application_services_state - ADD CONSTRAINT application_services_state_pkey PRIMARY KEY (as_id); - - - -ALTER TABLE ONLY application_services_txns - ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id); - - - -ALTER TABLE ONLY appservice_stream_position - ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock); - - - -ALTER TABLE ONLY current_state_events - ADD CONSTRAINT current_state_events_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY current_state_events - ADD CONSTRAINT current_state_events_room_id_type_state_key_key UNIQUE (room_id, type, state_key); - - - -ALTER TABLE ONLY destinations - ADD CONSTRAINT destinations_pkey PRIMARY KEY (destination); - - - -ALTER TABLE ONLY devices - ADD CONSTRAINT device_uniqueness UNIQUE (user_id, device_id); - - - -ALTER TABLE ONLY e2e_device_keys_json - ADD CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id); - - - -ALTER TABLE ONLY e2e_one_time_keys_json - ADD CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id); - - - -ALTER TABLE ONLY event_backward_extremities - ADD CONSTRAINT event_backward_extremities_event_id_room_id_key UNIQUE (event_id, room_id); - - - -ALTER TABLE ONLY event_edges - ADD CONSTRAINT event_edges_event_id_prev_event_id_room_id_is_state_key UNIQUE (event_id, prev_event_id, room_id, is_state); - - - -ALTER TABLE ONLY event_forward_extremities - ADD CONSTRAINT event_forward_extremities_event_id_room_id_key UNIQUE (event_id, room_id); - - - -ALTER TABLE ONLY event_push_actions - ADD CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag); - - - -ALTER TABLE ONLY event_json - ADD CONSTRAINT event_json_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY event_push_summary_stream_ordering - ADD CONSTRAINT event_push_summary_stream_ordering_lock_key UNIQUE (lock); - - - -ALTER TABLE ONLY event_reference_hashes - ADD CONSTRAINT event_reference_hashes_event_id_algorithm_key UNIQUE (event_id, algorithm); - - - -ALTER TABLE ONLY event_reports - ADD CONSTRAINT event_reports_pkey PRIMARY KEY (id); - - - -ALTER TABLE ONLY event_to_state_groups - ADD CONSTRAINT event_to_state_groups_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY events - ADD CONSTRAINT events_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY events - ADD CONSTRAINT events_pkey PRIMARY KEY (stream_ordering); - - - -ALTER TABLE ONLY ex_outlier_stream - ADD CONSTRAINT ex_outlier_stream_pkey PRIMARY KEY (event_stream_ordering); - - - -ALTER TABLE ONLY group_roles - ADD CONSTRAINT group_roles_group_id_role_id_key UNIQUE (group_id, role_id); - - - -ALTER TABLE ONLY group_room_categories - ADD CONSTRAINT group_room_categories_group_id_category_id_key UNIQUE (group_id, category_id); - - - -ALTER TABLE ONLY group_summary_roles - ADD CONSTRAINT group_summary_roles_group_id_role_id_role_order_key UNIQUE (group_id, role_id, role_order); - - - -ALTER TABLE ONLY group_summary_room_categories - ADD CONSTRAINT group_summary_room_categories_group_id_category_id_cat_orde_key UNIQUE (group_id, category_id, cat_order); - - - -ALTER TABLE ONLY group_summary_rooms - ADD CONSTRAINT group_summary_rooms_group_id_category_id_room_id_room_order_key UNIQUE (group_id, category_id, room_id, room_order); - - - -ALTER TABLE ONLY guest_access - ADD CONSTRAINT guest_access_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY history_visibility - ADD CONSTRAINT history_visibility_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY local_media_repository - ADD CONSTRAINT local_media_repository_media_id_key UNIQUE (media_id); - - - -ALTER TABLE ONLY local_media_repository_thumbnails - ADD CONSTRAINT local_media_repository_thumbn_media_id_thumbnail_width_thum_key UNIQUE (media_id, thumbnail_width, thumbnail_height, thumbnail_type); - - - -ALTER TABLE ONLY user_threepids - ADD CONSTRAINT medium_address UNIQUE (medium, address); - - - -ALTER TABLE ONLY open_id_tokens - ADD CONSTRAINT open_id_tokens_pkey PRIMARY KEY (token); - - - -ALTER TABLE ONLY presence_allow_inbound - ADD CONSTRAINT presence_allow_inbound_observed_user_id_observer_user_id_key UNIQUE (observed_user_id, observer_user_id); - - - -ALTER TABLE ONLY presence - ADD CONSTRAINT presence_user_id_key UNIQUE (user_id); - - - -ALTER TABLE ONLY account_data_max_stream_id - ADD CONSTRAINT private_user_data_max_stream_id_lock_key UNIQUE (lock); - - - -ALTER TABLE ONLY profiles - ADD CONSTRAINT profiles_user_id_key UNIQUE (user_id); - - - -ALTER TABLE ONLY push_rules_enable - ADD CONSTRAINT push_rules_enable_pkey PRIMARY KEY (id); - - - -ALTER TABLE ONLY push_rules_enable - ADD CONSTRAINT push_rules_enable_user_name_rule_id_key UNIQUE (user_name, rule_id); - - - -ALTER TABLE ONLY push_rules - ADD CONSTRAINT push_rules_pkey PRIMARY KEY (id); - - - -ALTER TABLE ONLY push_rules - ADD CONSTRAINT push_rules_user_name_rule_id_key UNIQUE (user_name, rule_id); - - - -ALTER TABLE ONLY pusher_throttle - ADD CONSTRAINT pusher_throttle_pkey PRIMARY KEY (pusher, room_id); - - - -ALTER TABLE ONLY pushers - ADD CONSTRAINT pushers2_app_id_pushkey_user_name_key UNIQUE (app_id, pushkey, user_name); - - - -ALTER TABLE ONLY pushers - ADD CONSTRAINT pushers2_pkey PRIMARY KEY (id); - - - -ALTER TABLE ONLY receipts_graph - ADD CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id); - - - -ALTER TABLE ONLY receipts_linearized - ADD CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id); - - - -ALTER TABLE ONLY received_transactions - ADD CONSTRAINT received_transactions_transaction_id_origin_key UNIQUE (transaction_id, origin); - - - -ALTER TABLE ONLY redactions - ADD CONSTRAINT redactions_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY rejections - ADD CONSTRAINT rejections_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY remote_media_cache - ADD CONSTRAINT remote_media_cache_media_origin_media_id_key UNIQUE (media_origin, media_id); - - - -ALTER TABLE ONLY remote_media_cache_thumbnails - ADD CONSTRAINT remote_media_cache_thumbnails_media_origin_media_id_thumbna_key UNIQUE (media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type); - - - -ALTER TABLE ONLY room_account_data - ADD CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type); - - - -ALTER TABLE ONLY room_aliases - ADD CONSTRAINT room_aliases_room_alias_key UNIQUE (room_alias); - - - -ALTER TABLE ONLY room_depth - ADD CONSTRAINT room_depth_room_id_key UNIQUE (room_id); - - - -ALTER TABLE ONLY room_memberships - ADD CONSTRAINT room_memberships_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY room_names - ADD CONSTRAINT room_names_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY room_tags_revisions - ADD CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id); - - - -ALTER TABLE ONLY room_tags - ADD CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag); - - - -ALTER TABLE ONLY rooms - ADD CONSTRAINT rooms_pkey PRIMARY KEY (room_id); - - - -ALTER TABLE ONLY server_keys_json - ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server); - - - -ALTER TABLE ONLY server_signature_keys - ADD CONSTRAINT server_signature_keys_server_name_key_id_key UNIQUE (server_name, key_id); - - - -ALTER TABLE ONLY state_events - ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id); - - -ALTER TABLE ONLY stats_stream_pos - ADD CONSTRAINT stats_stream_pos_lock_key UNIQUE (lock); - - - -ALTER TABLE ONLY topics - ADD CONSTRAINT topics_event_id_key UNIQUE (event_id); - - - -ALTER TABLE ONLY user_directory_stream_pos - ADD CONSTRAINT user_directory_stream_pos_lock_key UNIQUE (lock); - - - -ALTER TABLE ONLY users - ADD CONSTRAINT users_name_key UNIQUE (name); - - - -CREATE INDEX access_tokens_device_id ON access_tokens USING btree (user_id, device_id); - - - -CREATE INDEX account_data_stream_id ON account_data USING btree (user_id, stream_id); - - - -CREATE INDEX application_services_txns_id ON application_services_txns USING btree (as_id); - - - -CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list USING btree (appservice_id, network_id, room_id); - - - -CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms USING btree (room_id); - - - -CREATE INDEX cache_invalidation_stream_id ON cache_invalidation_stream USING btree (stream_id); - - - -CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING btree (stream_id); - - - -CREATE INDEX current_state_events_member_index ON current_state_events USING btree (state_key) WHERE (type = 'm.room.member'::text); - - - -CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id); - - - -CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox USING btree (origin, message_id); - - - -CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox USING btree (destination, stream_id); - - - -CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree (stream_id); - - - -CREATE INDEX device_inbox_stream_id_user_id ON device_inbox USING btree (stream_id, user_id); - - - -CREATE INDEX device_inbox_user_stream_id ON device_inbox USING btree (user_id, device_id, stream_id); - - - -CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success USING btree (destination, user_id, stream_id); - - - -CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes USING btree (destination, stream_id); - - - -CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes USING btree (stream_id); - - - -CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USING btree (destination, user_id); - - - -CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache USING btree (user_id, device_id); - - - -CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties USING btree (user_id); - - - -CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id); - - - -CREATE INDEX device_lists_stream_user_id ON device_lists_stream USING btree (user_id, device_id); - - - -CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys USING btree (user_id, room_id, session_id); - - - -CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions USING btree (user_id, version); - - - -CREATE UNIQUE INDEX erased_users_user ON erased_users USING btree (user_id); - - - -CREATE INDEX ev_b_extrem_id ON event_backward_extremities USING btree (event_id); - - - -CREATE INDEX ev_b_extrem_room ON event_backward_extremities USING btree (room_id); - - - -CREATE INDEX ev_edges_id ON event_edges USING btree (event_id); - - - -CREATE INDEX ev_edges_prev_id ON event_edges USING btree (prev_event_id); - - - -CREATE INDEX ev_extrem_id ON event_forward_extremities USING btree (event_id); - - - -CREATE INDEX ev_extrem_room ON event_forward_extremities USING btree (room_id); - - - -CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id); - - - -CREATE INDEX event_contains_url_index ON events USING btree (room_id, topological_ordering, stream_ordering) WHERE ((contains_url = true) AND (outlier = false)); - - - -CREATE INDEX event_json_room_id ON event_json USING btree (room_id); - - - -CREATE INDEX event_push_actions_highlights_index ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering) WHERE (highlight = 1); - - - -CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering); - - - -CREATE INDEX event_push_actions_room_id_user_id ON event_push_actions USING btree (room_id, user_id); - - - -CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging USING btree (event_id); - - - -CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btree (stream_ordering, user_id); - - - -CREATE INDEX event_push_actions_u_highlight ON event_push_actions USING btree (user_id, stream_ordering); - - - -CREATE INDEX event_push_summary_user_rm ON event_push_summary USING btree (user_id, room_id); - - - -CREATE INDEX event_reference_hashes_id ON event_reference_hashes USING btree (event_id); - - - -CREATE UNIQUE INDEX event_relations_id ON event_relations USING btree (event_id); - - - -CREATE INDEX event_relations_relates ON event_relations USING btree (relates_to_id, relation_type, aggregation_key); - - - -CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id); - - - -CREATE UNIQUE INDEX event_search_event_id_idx ON event_search USING btree (event_id); - - - -CREATE INDEX event_search_fts_idx ON event_search USING gin (vector); - - - -CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups USING btree (state_group); - - - -CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering); - - - -CREATE INDEX events_room_stream ON events USING btree (room_id, stream_ordering); - - - -CREATE INDEX events_ts ON events USING btree (origin_server_ts, stream_ordering); - - - -CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote USING btree (group_id, user_id); - - - -CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote USING btree (user_id); - - - -CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote USING btree (valid_until_ms); - - - -CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals USING btree (group_id, user_id); - - - -CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals USING btree (user_id); - - - -CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals USING btree (valid_until_ms); - - - -CREATE UNIQUE INDEX group_invites_g_idx ON group_invites USING btree (group_id, user_id); - - - -CREATE INDEX group_invites_u_idx ON group_invites USING btree (user_id); - - - -CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms USING btree (group_id, room_id); - - - -CREATE INDEX group_rooms_r_idx ON group_rooms USING btree (room_id); - - - -CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms USING btree (group_id, room_id, category_id); - - - -CREATE INDEX group_summary_users_g_idx ON group_summary_users USING btree (group_id); - - - -CREATE UNIQUE INDEX group_users_g_idx ON group_users USING btree (group_id, user_id); - - - -CREATE INDEX group_users_u_idx ON group_users USING btree (user_id); - - - -CREATE UNIQUE INDEX groups_idx ON groups USING btree (group_id); - - - -CREATE INDEX local_group_membership_g_idx ON local_group_membership USING btree (group_id); - - - -CREATE INDEX local_group_membership_u_idx ON local_group_membership USING btree (user_id, group_id); - - - -CREATE INDEX local_invites_for_user_idx ON local_invites USING btree (invitee, locally_rejected, replaced_by, room_id); - - - -CREATE INDEX local_invites_id ON local_invites USING btree (stream_id); - - - -CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails USING btree (media_id); - - - -CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache USING btree (url, download_ts); - - - -CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache USING btree (expires_ts); - - - -CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache USING btree (media_id); - - - -CREATE INDEX local_media_repository_url_idx ON local_media_repository USING btree (created_ts) WHERE (url_cache IS NOT NULL); - - - -CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp"); - - - -CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users USING btree (user_id); - - - -CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens USING btree (ts_valid_until_ms); - - - -CREATE INDEX presence_stream_id ON presence_stream USING btree (stream_id, user_id); - - - -CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id); - - - -CREATE INDEX public_room_index ON rooms USING btree (is_public); - - - -CREATE INDEX public_room_list_stream_idx ON public_room_list_stream USING btree (stream_id); - - - -CREATE INDEX public_room_list_stream_rm_idx ON public_room_list_stream USING btree (room_id, stream_id); - - - -CREATE INDEX push_rules_enable_user_name ON push_rules_enable USING btree (user_name); - - - -CREATE INDEX push_rules_stream_id ON push_rules_stream USING btree (stream_id); - - - -CREATE INDEX push_rules_stream_user_stream_id ON push_rules_stream USING btree (user_id, stream_id); - - - -CREATE INDEX push_rules_user_name ON push_rules USING btree (user_name); - - - -CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override USING btree (user_id); - - - -CREATE INDEX receipts_linearized_id ON receipts_linearized USING btree (stream_id); - - - -CREATE INDEX receipts_linearized_room_stream ON receipts_linearized USING btree (room_id, stream_id); - - - -CREATE INDEX receipts_linearized_user ON receipts_linearized USING btree (user_id); - - - -CREATE INDEX received_transactions_ts ON received_transactions USING btree (ts); - - - -CREATE INDEX redactions_redacts ON redactions USING btree (redacts); - - - -CREATE INDEX remote_profile_cache_time ON remote_profile_cache USING btree (last_check); - - - -CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache USING btree (user_id); - - - -CREATE INDEX room_account_data_stream_id ON room_account_data USING btree (user_id, stream_id); - - - -CREATE INDEX room_alias_servers_alias ON room_alias_servers USING btree (room_alias); - - - -CREATE INDEX room_aliases_id ON room_aliases USING btree (room_id); - - - -CREATE INDEX room_depth_room ON room_depth USING btree (room_id); - - - -CREATE INDEX room_memberships_room_id ON room_memberships USING btree (room_id); - - - -CREATE INDEX room_memberships_user_id ON room_memberships USING btree (user_id); - - - -CREATE INDEX room_names_room_id ON room_names USING btree (room_id); - - - -CREATE UNIQUE INDEX room_state_room ON room_state USING btree (room_id); - - - -CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token USING btree (room_id); - - - -CREATE UNIQUE INDEX room_stats_room_ts ON room_stats USING btree (room_id, ts); - - - -CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering); - - - -CREATE INDEX stream_ordering_to_exterm_rm_idx ON stream_ordering_to_exterm USING btree (room_id, stream_ordering); - - - -CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens USING btree (medium, address); - - - -CREATE INDEX topics_room_id ON topics USING btree (room_id); - - - -CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits USING btree ("timestamp"); - - - -CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits USING btree (user_id, "timestamp"); - - - -CREATE INDEX user_directory_room_idx ON user_directory USING btree (room_id); - - - -CREATE INDEX user_directory_search_fts_idx ON user_directory_search USING gin (vector); - - - -CREATE UNIQUE INDEX user_directory_search_user_idx ON user_directory_search USING btree (user_id); - - - -CREATE UNIQUE INDEX user_directory_user_idx ON user_directory USING btree (user_id); - - - -CREATE INDEX user_filters_by_user_id_filter_id ON user_filters USING btree (user_id, filter_id); - - - -CREATE INDEX user_ips_device_id ON user_ips USING btree (user_id, device_id, last_seen); - - - -CREATE INDEX user_ips_last_seen ON user_ips USING btree (user_id, last_seen); - - - -CREATE INDEX user_ips_last_seen_only ON user_ips USING btree (last_seen); - - - -CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips USING btree (user_id, access_token, ip); - - - -CREATE UNIQUE INDEX user_stats_user_ts ON user_stats USING btree (user_id, ts); - - - -CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server USING btree (user_id, medium, address, id_server); - - - -CREATE INDEX user_threepids_medium_address ON user_threepids USING btree (medium, address); - - - -CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id); - - - -CREATE INDEX users_creation_ts ON users USING btree (creation_ts); - - - -CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id); - - - -CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms USING btree (other_user_id); - - - -CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms USING btree (room_id); - - - -CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms USING btree (user_id, other_user_id, room_id); diff --git a/synapse/storage/schema/main/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/main/full_schemas/54/full.sql.sqlite deleted file mode 100644 index 308124e531c3..000000000000 --- a/synapse/storage/schema/main/full_schemas/54/full.sql.sqlite +++ /dev/null @@ -1,243 +0,0 @@ -CREATE TABLE application_services_state( as_id TEXT PRIMARY KEY, state VARCHAR(5), last_txn INTEGER ); -CREATE TABLE application_services_txns( as_id TEXT NOT NULL, txn_id INTEGER NOT NULL, event_ids TEXT NOT NULL, UNIQUE(as_id, txn_id) ); -CREATE INDEX application_services_txns_id ON application_services_txns ( as_id ); -CREATE TABLE presence( user_id TEXT NOT NULL, state VARCHAR(20), status_msg TEXT, mtime BIGINT, UNIQUE (user_id) ); -CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_user_id TEXT NOT NULL, UNIQUE (observed_user_id, observer_user_id) ); -CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) ); -CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) ); -CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL ); -CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) ); -CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) ); -CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER ); -CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) ); -CREATE INDEX events_order_room ON events ( room_id, topological_ordering, stream_ordering ); -CREATE TABLE event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, internal_metadata TEXT NOT NULL, json TEXT NOT NULL, format_version INTEGER, UNIQUE (event_id) ); -CREATE INDEX event_json_room_id ON event_json(room_id); -CREATE TABLE state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, prev_state TEXT, UNIQUE (event_id) ); -CREATE TABLE current_state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, UNIQUE (event_id), UNIQUE (room_id, type, state_key) ); -CREATE TABLE room_memberships( event_id TEXT NOT NULL, user_id TEXT NOT NULL, sender TEXT NOT NULL, room_id TEXT NOT NULL, membership TEXT NOT NULL, forgotten INTEGER DEFAULT 0, display_name TEXT, avatar_url TEXT, UNIQUE (event_id) ); -CREATE INDEX room_memberships_room_id ON room_memberships (room_id); -CREATE INDEX room_memberships_user_id ON room_memberships (user_id); -CREATE TABLE topics( event_id TEXT NOT NULL, room_id TEXT NOT NULL, topic TEXT NOT NULL, UNIQUE (event_id) ); -CREATE INDEX topics_room_id ON topics(room_id); -CREATE TABLE room_names( event_id TEXT NOT NULL, room_id TEXT NOT NULL, name TEXT NOT NULL, UNIQUE (event_id) ); -CREATE INDEX room_names_room_id ON room_names(room_id); -CREATE TABLE rooms( room_id TEXT PRIMARY KEY NOT NULL, is_public BOOL, creator TEXT ); -CREATE TABLE server_signature_keys( server_name TEXT, key_id TEXT, from_server TEXT, ts_added_ms BIGINT, verify_key bytea, ts_valid_until_ms BIGINT, UNIQUE (server_name, key_id) ); -CREATE TABLE rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, UNIQUE (event_id) ); -CREATE TABLE push_rules ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, priority_class SMALLINT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, conditions TEXT NOT NULL, actions TEXT NOT NULL, UNIQUE(user_name, rule_id) ); -CREATE INDEX push_rules_user_name on push_rules (user_name); -CREATE TABLE user_filters( user_id TEXT, filter_id BIGINT, filter_json bytea ); -CREATE INDEX user_filters_by_user_id_filter_id ON user_filters( user_id, filter_id ); -CREATE TABLE push_rules_enable ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, enabled SMALLINT, UNIQUE(user_name, rule_id) ); -CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name); -CREATE TABLE event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) ); -CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id); -CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id); -CREATE TABLE event_backward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) ); -CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id); -CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id); -CREATE TABLE event_edges( event_id TEXT NOT NULL, prev_event_id TEXT NOT NULL, room_id TEXT NOT NULL, is_state BOOL NOT NULL, UNIQUE (event_id, prev_event_id, room_id, is_state) ); -CREATE INDEX ev_edges_id ON event_edges(event_id); -CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id); -CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) ); -CREATE INDEX room_depth_room ON room_depth(room_id); -CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) ); -CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) ); -CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) ); -CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id); -CREATE TABLE remote_media_cache ( media_origin TEXT, media_id TEXT, media_type TEXT, created_ts BIGINT, upload_name TEXT, media_length INTEGER, filesystem_id TEXT, last_access_ts BIGINT, quarantined_by TEXT, UNIQUE (media_origin, media_id) ); -CREATE TABLE remote_media_cache_thumbnails ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type ) ); -CREATE TABLE redactions ( event_id TEXT NOT NULL, redacts TEXT NOT NULL, UNIQUE (event_id) ); -CREATE INDEX redactions_redacts ON redactions (redacts); -CREATE TABLE room_aliases( room_alias TEXT NOT NULL, room_id TEXT NOT NULL, creator TEXT, UNIQUE (room_alias) ); -CREATE INDEX room_aliases_id ON room_aliases(room_id); -CREATE TABLE room_alias_servers( room_alias TEXT NOT NULL, server TEXT NOT NULL ); -CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias); -CREATE TABLE event_reference_hashes ( event_id TEXT, algorithm TEXT, hash bytea, UNIQUE (event_id, algorithm) ); -CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id); -CREATE TABLE IF NOT EXISTS "server_keys_json" ( server_name TEXT NOT NULL, key_id TEXT NOT NULL, from_server TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, ts_valid_until_ms BIGINT NOT NULL, key_json bytea NOT NULL, CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) ); -CREATE TABLE e2e_device_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id) ); -CREATE TABLE e2e_one_time_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, algorithm TEXT NOT NULL, key_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id) ); -CREATE TABLE receipts_graph( room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_ids TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id) ); -CREATE TABLE receipts_linearized ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_id TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id) ); -CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id ); -CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id ); -CREATE TABLE IF NOT EXISTS "user_threepids" ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, validated_at BIGINT NOT NULL, added_at BIGINT NOT NULL, CONSTRAINT medium_address UNIQUE (medium, address) ); -CREATE INDEX user_threepids_user_id ON user_threepids(user_id); -CREATE VIRTUAL TABLE event_search USING fts4 ( event_id, room_id, sender, key, value ) -/* event_search(event_id,room_id,sender,"key",value) */; -CREATE TABLE guest_access( event_id TEXT NOT NULL, room_id TEXT NOT NULL, guest_access TEXT NOT NULL, UNIQUE (event_id) ); -CREATE TABLE history_visibility( event_id TEXT NOT NULL, room_id TEXT NOT NULL, history_visibility TEXT NOT NULL, UNIQUE (event_id) ); -CREATE TABLE room_tags( user_id TEXT NOT NULL, room_id TEXT NOT NULL, tag TEXT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag) ); -CREATE TABLE room_tags_revisions ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, stream_id BIGINT NOT NULL, CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id) ); -CREATE TABLE IF NOT EXISTS "account_data_max_stream_id"( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT NOT NULL, CHECK (Lock='X') ); -CREATE TABLE account_data( user_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type) ); -CREATE TABLE room_account_data( user_id TEXT NOT NULL, room_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type) ); -CREATE INDEX account_data_stream_id on account_data(user_id, stream_id); -CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id); -CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering); -CREATE TABLE event_push_actions( room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, topological_ordering BIGINT, stream_ordering BIGINT, notif SMALLINT, highlight SMALLINT, CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) ); -CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); -CREATE INDEX events_room_stream on events(room_id, stream_ordering); -CREATE INDEX public_room_index on rooms(is_public); -CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id ); -CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); -CREATE TABLE presence_stream( stream_id BIGINT, user_id TEXT, state TEXT, last_active_ts BIGINT, last_federation_update_ts BIGINT, last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN ); -CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id); -CREATE INDEX presence_stream_user_id ON presence_stream(user_id); -CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, priority_class SMALLINT, priority INTEGER, conditions TEXT, actions TEXT ); -CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id); -CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id); -CREATE TABLE ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, state_group BIGINT NOT NULL ); -CREATE TABLE threepid_guest_access_tokens( medium TEXT, address TEXT, guest_access_token TEXT, first_inviter TEXT ); -CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address); -CREATE TABLE local_invites( stream_id BIGINT NOT NULL, inviter TEXT NOT NULL, invitee TEXT NOT NULL, event_id TEXT NOT NULL, room_id TEXT NOT NULL, locally_rejected TEXT, replaced_by TEXT ); -CREATE INDEX local_invites_id ON local_invites(stream_id); -CREATE INDEX local_invites_for_user_idx ON local_invites(invitee, locally_rejected, replaced_by, room_id); -CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); -CREATE TABLE open_id_tokens ( token TEXT NOT NULL PRIMARY KEY, ts_valid_until_ms bigint NOT NULL, user_id TEXT NOT NULL, UNIQUE (token) ); -CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens(ts_valid_until_ms); -CREATE TABLE pusher_throttle( pusher BIGINT NOT NULL, room_id TEXT NOT NULL, last_sent_ts BIGINT, throttle_ms BIGINT, PRIMARY KEY (pusher, room_id) ); -CREATE TABLE event_reports( id BIGINT NOT NULL PRIMARY KEY, received_ts BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, reason TEXT, content TEXT ); -CREATE TABLE devices ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, display_name TEXT, CONSTRAINT device_uniqueness UNIQUE (user_id, device_id) ); -CREATE TABLE appservice_stream_position( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT, CHECK (Lock='X') ); -CREATE TABLE device_inbox ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, stream_id BIGINT NOT NULL, message_json TEXT NOT NULL ); -CREATE INDEX device_inbox_user_stream_id ON device_inbox(user_id, device_id, stream_id); -CREATE INDEX received_transactions_ts ON received_transactions(ts); -CREATE TABLE device_federation_outbox ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, queued_ts BIGINT NOT NULL, messages_json TEXT NOT NULL ); -CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox(destination, stream_id); -CREATE TABLE device_federation_inbox ( origin TEXT NOT NULL, message_id TEXT NOT NULL, received_ts BIGINT NOT NULL ); -CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox(origin, message_id); -CREATE TABLE device_max_stream_id ( stream_id BIGINT NOT NULL ); -CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT); -CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id ); -CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id ); -CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL ); -CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering ); -CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering ); -CREATE TABLE IF NOT EXISTS "event_auth"( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, room_id TEXT NOT NULL ); -CREATE INDEX evauth_edges_id ON event_auth(event_id); -CREATE INDEX user_threepids_medium_address on user_threepids (medium, address); -CREATE TABLE appservice_room_list( appservice_id TEXT NOT NULL, network_id TEXT NOT NULL, room_id TEXT NOT NULL ); -CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list( appservice_id, network_id, room_id ); -CREATE INDEX device_federation_outbox_id ON device_federation_outbox(stream_id); -CREATE TABLE federation_stream_position( type TEXT NOT NULL, stream_id INTEGER NOT NULL ); -CREATE TABLE device_lists_remote_cache ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, content TEXT NOT NULL ); -CREATE TABLE device_lists_remote_extremeties ( user_id TEXT NOT NULL, stream_id TEXT NOT NULL ); -CREATE TABLE device_lists_stream ( stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL ); -CREATE INDEX device_lists_stream_id ON device_lists_stream(stream_id, user_id); -CREATE TABLE device_lists_outbound_pokes ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL, sent BOOLEAN NOT NULL, ts BIGINT NOT NULL ); -CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes(destination, stream_id); -CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes(destination, user_id); -CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, notif_count BIGINT NOT NULL, stream_ordering BIGINT NOT NULL ); -CREATE INDEX event_push_summary_user_rm ON event_push_summary(user_id, room_id); -CREATE TABLE event_push_summary_stream_ordering ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT NOT NULL, CHECK (Lock='X') ); -CREATE TABLE IF NOT EXISTS "pushers" ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, profile_tag TEXT NOT NULL, kind TEXT NOT NULL, app_id TEXT NOT NULL, app_display_name TEXT NOT NULL, device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, failing_since BIGINT, UNIQUE (app_id, pushkey, user_name) ); -CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id); -CREATE TABLE ratelimit_override ( user_id TEXT NOT NULL, messages_per_second BIGINT, burst_count BIGINT ); -CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override(user_id); -CREATE TABLE current_state_delta_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT, prev_event_id TEXT ); -CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream(stream_id); -CREATE TABLE device_lists_outbound_last_success ( destination TEXT NOT NULL, user_id TEXT NOT NULL, stream_id BIGINT NOT NULL ); -CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success( destination, user_id, stream_id ); -CREATE TABLE user_directory_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') ); -CREATE VIRTUAL TABLE user_directory_search USING fts4 ( user_id, value ) -/* user_directory_search(user_id,value) */; -CREATE TABLE blocked_rooms ( room_id TEXT NOT NULL, user_id TEXT NOT NULL ); -CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms(room_id); -CREATE TABLE IF NOT EXISTS "local_media_repository_url_cache"( url TEXT, response_code INTEGER, etag TEXT, expires_ts BIGINT, og TEXT, media_id TEXT, download_ts BIGINT ); -CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts); -CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts); -CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id); -CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, is_public BOOLEAN NOT NULL ); -CREATE TABLE group_invites ( group_id TEXT NOT NULL, user_id TEXT NOT NULL ); -CREATE TABLE group_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, is_public BOOLEAN NOT NULL ); -CREATE TABLE group_summary_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, category_id TEXT NOT NULL, room_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id, room_id, room_order), CHECK (room_order > 0) ); -CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms(group_id, room_id, category_id); -CREATE TABLE group_summary_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, cat_order BIGINT NOT NULL, UNIQUE (group_id, category_id, cat_order), CHECK (cat_order > 0) ); -CREATE TABLE group_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id) ); -CREATE TABLE group_summary_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, role_id TEXT NOT NULL, user_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL ); -CREATE INDEX group_summary_users_g_idx ON group_summary_users(group_id); -CREATE TABLE group_summary_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, role_order BIGINT NOT NULL, UNIQUE (group_id, role_id, role_order), CHECK (role_order > 0) ); -CREATE TABLE group_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, role_id) ); -CREATE TABLE group_attestations_renewals ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL ); -CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals(group_id, user_id); -CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals(user_id); -CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals(valid_until_ms); -CREATE TABLE group_attestations_remote ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL, attestation_json TEXT NOT NULL ); -CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id); -CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_id); -CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms); -CREATE TABLE local_group_membership ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, membership TEXT NOT NULL, is_publicised BOOLEAN NOT NULL, content TEXT NOT NULL ); -CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id); -CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id); -CREATE TABLE local_group_updates ( stream_id BIGINT NOT NULL, group_id TEXT NOT NULL, user_id TEXT NOT NULL, type TEXT NOT NULL, content TEXT NOT NULL ); -CREATE TABLE remote_profile_cache ( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, last_check BIGINT NOT NULL ); -CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache(user_id); -CREATE INDEX remote_profile_cache_time ON remote_profile_cache(last_check); -CREATE TABLE IF NOT EXISTS "deleted_pushers" ( stream_id BIGINT NOT NULL, app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL ); -CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id); -CREATE TABLE IF NOT EXISTS "groups" ( group_id TEXT NOT NULL, name TEXT, avatar_url TEXT, short_description TEXT, long_description TEXT, is_public BOOL NOT NULL , join_policy TEXT NOT NULL DEFAULT 'invite'); -CREATE UNIQUE INDEX groups_idx ON groups(group_id); -CREATE TABLE IF NOT EXISTS "user_directory" ( user_id TEXT NOT NULL, room_id TEXT, display_name TEXT, avatar_url TEXT ); -CREATE INDEX user_directory_room_idx ON user_directory(room_id); -CREATE UNIQUE INDEX user_directory_user_idx ON user_directory(user_id); -CREATE TABLE event_push_actions_staging ( event_id TEXT NOT NULL, user_id TEXT NOT NULL, actions TEXT NOT NULL, notif SMALLINT NOT NULL, highlight SMALLINT NOT NULL ); -CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); -CREATE TABLE users_pending_deactivation ( user_id TEXT NOT NULL ); -CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id); -CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id); -CREATE INDEX group_users_u_idx ON group_users(user_id); -CREATE INDEX group_invites_u_idx ON group_invites(user_id); -CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id); -CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); -CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL ); -CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp); -CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp); -CREATE TABLE erased_users ( user_id TEXT NOT NULL ); -CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id); -CREATE TABLE monthly_active_users ( user_id TEXT NOT NULL, timestamp BIGINT NOT NULL ); -CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id); -CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp); -CREATE TABLE IF NOT EXISTS "e2e_room_keys_versions" ( user_id TEXT NOT NULL, version BIGINT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL, deleted SMALLINT DEFAULT 0 NOT NULL ); -CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); -CREATE TABLE IF NOT EXISTS "e2e_room_keys" ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, version BIGINT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, session_data TEXT NOT NULL ); -CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); -CREATE TABLE users_who_share_private_rooms ( user_id TEXT NOT NULL, other_user_id TEXT NOT NULL, room_id TEXT NOT NULL ); -CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id); -CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id); -CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id); -CREATE TABLE user_threepid_id_server ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, id_server TEXT NOT NULL ); -CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server( user_id, medium, address, id_server ); -CREATE TABLE users_in_public_rooms ( user_id TEXT NOT NULL, room_id TEXT NOT NULL ); -CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id); -CREATE TABLE account_validity ( user_id TEXT PRIMARY KEY, expiration_ts_ms BIGINT NOT NULL, email_sent BOOLEAN NOT NULL, renewal_token TEXT ); -CREATE TABLE event_relations ( event_id TEXT NOT NULL, relates_to_id TEXT NOT NULL, relation_type TEXT NOT NULL, aggregation_key TEXT ); -CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id); -CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key); -CREATE TABLE stats_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') ); -CREATE TABLE user_stats ( user_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, public_rooms INT NOT NULL, private_rooms INT NOT NULL ); -CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts); -CREATE TABLE room_stats ( room_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, current_state_events INT NOT NULL, joined_members INT NOT NULL, invited_members INT NOT NULL, left_members INT NOT NULL, banned_members INT NOT NULL, state_events INT NOT NULL ); -CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts); -CREATE TABLE room_state ( room_id TEXT NOT NULL, join_rules TEXT, history_visibility TEXT, encryption TEXT, name TEXT, topic TEXT, avatar TEXT, canonical_alias TEXT ); -CREATE UNIQUE INDEX room_state_room ON room_state(room_id); -CREATE TABLE room_stats_earliest_token ( room_id TEXT NOT NULL, token BIGINT NOT NULL ); -CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id); -CREATE INDEX access_tokens_device_id ON access_tokens (user_id, device_id); -CREATE INDEX user_ips_device_id ON user_ips (user_id, device_id, last_seen); -CREATE INDEX event_contains_url_index ON events (room_id, topological_ordering, stream_ordering); -CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); -CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); -CREATE INDEX current_state_events_member_index ON current_state_events (state_key); -CREATE INDEX device_inbox_stream_id_user_id ON device_inbox (stream_id, user_id); -CREATE INDEX device_lists_stream_user_id ON device_lists_stream (user_id, device_id); -CREATE INDEX local_media_repository_url_idx ON local_media_repository (created_ts); -CREATE INDEX user_ips_last_seen ON user_ips (user_id, last_seen); -CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen); -CREATE INDEX users_creation_ts ON users (creation_ts); -CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group); -CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id); -CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id); -CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip); diff --git a/synapse/storage/schema/main/full_schemas/54/stream_positions.sql b/synapse/storage/schema/main/full_schemas/54/stream_positions.sql deleted file mode 100644 index 91d21b2921fe..000000000000 --- a/synapse/storage/schema/main/full_schemas/54/stream_positions.sql +++ /dev/null @@ -1,8 +0,0 @@ - -INSERT INTO appservice_stream_position (stream_ordering) SELECT COALESCE(MAX(stream_ordering), 0) FROM events; -INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', -1); -INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; -INSERT INTO user_directory_stream_pos (stream_id) VALUES (0); -INSERT INTO stats_stream_pos (stream_id) VALUES (0); -INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); --- device_max_stream_id is handled separately in 56/device_stream_id_insert.sql \ No newline at end of file diff --git a/synapse/storage/schema/state/full_schemas/54/full.sql b/synapse/storage/schema/state/full_schemas/54/full.sql deleted file mode 100644 index 35f97d6b3d47..000000000000 --- a/synapse/storage/schema/state/full_schemas/54/full.sql +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2019 The Matrix.org Foundation C.I.C - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE state_groups ( - id BIGINT PRIMARY KEY, - room_id TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE state_groups_state ( - state_group BIGINT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE state_group_edges ( - state_group BIGINT NOT NULL, - prev_state_group BIGINT NOT NULL -); - -CREATE INDEX state_group_edges_idx ON state_group_edges (state_group); -CREATE INDEX state_group_edges_prev_idx ON state_group_edges (prev_state_group); -CREATE INDEX state_groups_state_type_idx ON state_groups_state (state_group, type, state_key); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index d2c874b9a8d7..bd3c81827f69 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -133,6 +133,15 @@ def get_current_token_for_writer(self, instance_name: str) -> int: """ raise NotImplementedError() + @abc.abstractmethod + def get_minimal_local_current_token(self) -> int: + """Tries to return a minimal current token for the local instance, + i.e. for writers this would be the last successful write. + + If local instance is not a writer (or has written yet) then falls back + to returning the normal "current token". + """ + @abc.abstractmethod def get_next(self) -> AsyncContextManager[int]: """ @@ -312,6 +321,9 @@ def get_current_token(self) -> int: def get_current_token_for_writer(self, instance_name: str) -> int: return self.get_current_token() + def get_minimal_local_current_token(self) -> int: + return self.get_current_token() + class MultiWriterIdGenerator(AbstractStreamIdGenerator): """Generates and tracks stream IDs for a stream with multiple writers. @@ -408,6 +420,11 @@ def __init__( # The maximum stream ID that we have seen been allocated across any writer. self._max_seen_allocated_stream_id = 1 + # The maximum position of the local instance. This can be higher than + # the corresponding position in `current_positions` table when there are + # no active writes in progress. + self._max_position_of_local_instance = self._max_seen_allocated_stream_id + self._sequence_gen = PostgresSequenceGenerator(sequence_name) # We check that the table and sequence haven't diverged. @@ -427,6 +444,16 @@ def __init__( self._current_positions.values(), default=1 ) + # For the case where `stream_positions` is not up to date, + # `_persisted_upto_position` may be higher. + self._max_seen_allocated_stream_id = max( + self._max_seen_allocated_stream_id, self._persisted_upto_position + ) + + # Bump our local maximum position now that we've loaded things from the + # DB. + self._max_position_of_local_instance = self._max_seen_allocated_stream_id + if not writers: # If there have been no explicit writers given then any instance can # write to the stream. In which case, let's pre-seed our own @@ -545,6 +572,14 @@ def sort_by_stream_id_key_func(row: Tuple[str, int]) -> int: if instance == self._instance_name: self._current_positions[instance] = stream_id + if self._writers: + # If we have explicit writers then make sure that each instance has + # a position. + for writer in self._writers: + self._current_positions.setdefault( + writer, self._persisted_upto_position + ) + cur.close() def _load_next_id_txn(self, txn: Cursor) -> int: @@ -615,8 +650,8 @@ def get_next_txn(self, txn: LoggingTransaction) -> int: next_id = self._load_next_id_txn(txn) - txn.call_after(self._mark_id_as_finished, next_id) - txn.call_on_exception(self._mark_id_as_finished, next_id) + txn.call_after(self._mark_ids_as_finished, [next_id]) + txn.call_on_exception(self._mark_ids_as_finished, [next_id]) txn.call_after(self._notifier.notify_replication) # Update the `stream_positions` table with newly updated stream @@ -636,14 +671,50 @@ def get_next_txn(self, txn: LoggingTransaction) -> int: return self._return_factor * next_id - def _mark_id_as_finished(self, next_id: int) -> None: - """The ID has finished being processed so we should advance the + def get_next_mult_txn(self, txn: LoggingTransaction, n: int) -> List[int]: + """ + Usage: + + stream_id = stream_id_gen.get_next_txn(txn) + # ... persist event ... + """ + + # If we have a list of instances that are allowed to write to this + # stream, make sure we're in it. + if self._writers and self._instance_name not in self._writers: + raise Exception("Tried to allocate stream ID on non-writer") + + next_ids = self._load_next_mult_id_txn(txn, n) + + txn.call_after(self._mark_ids_as_finished, next_ids) + txn.call_on_exception(self._mark_ids_as_finished, next_ids) + txn.call_after(self._notifier.notify_replication) + + # Update the `stream_positions` table with newly updated stream + # ID (unless self._writers is not set in which case we don't + # bother, as nothing will read it). + # + # We only do this on the success path so that the persisted current + # position points to a persisted row with the correct instance name. + if self._writers: + txn.call_after( + run_as_background_process, + "MultiWriterIdGenerator._update_table", + self._db.runInteraction, + "MultiWriterIdGenerator._update_table", + self._update_stream_positions_table_txn, + ) + + return [self._return_factor * next_id for next_id in next_ids] + + def _mark_ids_as_finished(self, next_ids: List[int]) -> None: + """These IDs have finished being processed so we should advance the current position if possible. """ with self._lock: - self._unfinished_ids.discard(next_id) - self._finished_ids.add(next_id) + self._unfinished_ids.difference_update(next_ids) + self._finished_ids.update(next_ids) new_cur: Optional[int] = None @@ -688,8 +759,14 @@ def _mark_id_as_finished(self, next_id: int) -> None: if new_cur: curr = self._current_positions.get(self._instance_name, 0) self._current_positions[self._instance_name] = max(curr, new_cur) + self._max_position_of_local_instance = max( + curr, new_cur, self._max_position_of_local_instance + ) - self._add_persisted_position(next_id) + # TODO Can we call this for just the last position or somehow batch + # _add_persisted_position. + for next_id in next_ids: + self._add_persisted_position(next_id) def get_current_token(self) -> int: return self.get_persisted_upto_position() @@ -702,10 +779,26 @@ def get_current_token_for_writer(self, instance_name: str) -> int: # persisted up to position. This stops Synapse from doing a full table # scan when a new writer announces itself over replication. with self._lock: - return self._return_factor * self._current_positions.get( + if self._instance_name == instance_name: + return self._return_factor * self._max_position_of_local_instance + + pos = self._current_positions.get( instance_name, self._persisted_upto_position ) + # We want to return the maximum "current token" that we can for a + # writer, this helps ensure that streams progress as fast as + # possible. + pos = max(pos, self._persisted_upto_position) + + return self._return_factor * pos + + def get_minimal_local_current_token(self) -> int: + with self._lock: + return self._return_factor * self._current_positions.get( + self._instance_name, self._persisted_upto_position + ) + def get_positions(self) -> Dict[str, int]: """Get a copy of the current positon map. @@ -774,6 +867,18 @@ def _add_persisted_position(self, new_id: int) -> None: self._persisted_upto_position = max(min_curr, self._persisted_upto_position) + # Advance our local max position. + self._max_position_of_local_instance = max( + self._max_position_of_local_instance, self._persisted_upto_position + ) + + if not self._unfinished_ids and not self._in_flight_fetches: + # If we don't have anything in flight, it's safe to advance to the + # max seen stream ID. + self._max_position_of_local_instance = max( + self._max_seen_allocated_stream_id, self._max_position_of_local_instance + ) + # We now iterate through the seen positions, discarding those that are # less than the current min positions, and incrementing the min position # if its exactly one greater. @@ -867,8 +972,7 @@ async def __aexit__( exc: Optional[BaseException], tb: Optional[TracebackType], ) -> bool: - for i in self.stream_ids: - self.id_gen._mark_id_as_finished(i) + self.id_gen._mark_ids_as_finished(self.stream_ids) self.notifier.notify_replication() diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 609a0978a9ec..d0bb83b18447 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -23,7 +23,7 @@ from synapse.handlers.typing import TypingNotificationEventSource from synapse.logging.opentracing import trace from synapse.streams import EventSource -from synapse.types import StreamKeyType, StreamToken +from synapse.types import MultiWriterStreamToken, StreamKeyType, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer @@ -111,7 +111,7 @@ async def get_current_token_for_pagination(self, room_id: str) -> StreamToken: room_key=await self.sources.room.get_current_key_for_room(room_id), presence_key=0, typing_key=0, - receipt_key=0, + receipt_key=MultiWriterStreamToken(stream=0), account_data_key=0, push_rules_key=0, to_device_key=0, diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 09a88c86a7a4..4c5b26ad935b 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -695,6 +695,90 @@ async def to_string(self, store: "DataStore") -> str: return "s%d" % (self.stream,) +@attr.s(frozen=True, slots=True, order=False) +class MultiWriterStreamToken(AbstractMultiWriterStreamToken): + """A basic stream token class for streams that supports multiple writers.""" + + @classmethod + async def parse(cls, store: "DataStore", string: str) -> "MultiWriterStreamToken": + try: + if string[0].isdigit(): + return cls(stream=int(string)) + if string[0] == "m": + parts = string[1:].split("~") + stream = int(parts[0]) + + instance_map = {} + for part in parts[1:]: + key, value = part.split(".") + instance_id = int(key) + pos = int(value) + + instance_name = await store.get_name_from_instance_id(instance_id) + instance_map[instance_name] = pos + + return cls( + stream=stream, + instance_map=immutabledict(instance_map), + ) + except CancelledError: + raise + except Exception: + pass + raise SynapseError(400, "Invalid stream token %r" % (string,)) + + async def to_string(self, store: "DataStore") -> str: + if self.instance_map: + entries = [] + for name, pos in self.instance_map.items(): + if pos <= self.stream: + # Ignore instances who are below the minimum stream position + # (we might know they've advanced without seeing a recent + # write from them). + continue + + instance_id = await store.get_id_for_instance(name) + entries.append(f"{instance_id}.{pos}") + + encoded_map = "~".join(entries) + return f"m{self.stream}~{encoded_map}" + else: + return str(self.stream) + + @staticmethod + def is_stream_position_in_range( + low: Optional["AbstractMultiWriterStreamToken"], + high: Optional["AbstractMultiWriterStreamToken"], + instance_name: Optional[str], + pos: int, + ) -> bool: + """Checks if a given persisted position is between the two given tokens. + + If `instance_name` is None then the row was persisted before multi + writer support. + """ + + if low: + if instance_name: + low_stream = low.instance_map.get(instance_name, low.stream) + else: + low_stream = low.stream + + if pos <= low_stream: + return False + + if high: + if instance_name: + high_stream = high.instance_map.get(instance_name, high.stream) + else: + high_stream = high.stream + + if high_stream < pos: + return False + + return True + + class StreamKeyType(Enum): """Known stream types. @@ -776,7 +860,9 @@ class StreamToken: ) presence_key: int typing_key: int - receipt_key: int + receipt_key: MultiWriterStreamToken = attr.ib( + validator=attr.validators.instance_of(MultiWriterStreamToken) + ) account_data_key: int push_rules_key: int to_device_key: int @@ -799,8 +885,31 @@ async def from_string(cls, store: "DataStore", string: str) -> "StreamToken": while len(keys) < len(attr.fields(cls)): # i.e. old token from before receipt_key keys.append("0") + + ( + room_key, + presence_key, + typing_key, + receipt_key, + account_data_key, + push_rules_key, + to_device_key, + device_list_key, + groups_key, + un_partial_stated_rooms_key, + ) = keys + return cls( - await RoomStreamToken.parse(store, keys[0]), *(int(k) for k in keys[1:]) + room_key=await RoomStreamToken.parse(store, room_key), + presence_key=int(presence_key), + typing_key=int(typing_key), + receipt_key=await MultiWriterStreamToken.parse(store, receipt_key), + account_data_key=int(account_data_key), + push_rules_key=int(push_rules_key), + to_device_key=int(to_device_key), + device_list_key=int(device_list_key), + groups_key=int(groups_key), + un_partial_stated_rooms_key=int(un_partial_stated_rooms_key), ) except CancelledError: raise @@ -813,7 +922,7 @@ async def to_string(self, store: "DataStore") -> str: await self.room_key.to_string(store), str(self.presence_key), str(self.typing_key), - str(self.receipt_key), + await self.receipt_key.to_string(store), str(self.account_data_key), str(self.push_rules_key), str(self.to_device_key), @@ -841,6 +950,11 @@ def copy_and_advance(self, key: StreamKeyType, new_value: Any) -> "StreamToken": StreamKeyType.ROOM, self.room_key.copy_and_advance(new_value) ) return new_token + elif key == StreamKeyType.RECEIPT: + new_token = self.copy_and_replace( + StreamKeyType.RECEIPT, self.receipt_key.copy_and_advance(new_value) + ) + return new_token new_token = self.copy_and_replace(key, new_value) new_id = new_token.get_field(key) @@ -858,6 +972,10 @@ def copy_and_replace(self, key: StreamKeyType, new_value: Any) -> "StreamToken": def get_field(self, key: Literal[StreamKeyType.ROOM]) -> RoomStreamToken: ... + @overload + def get_field(self, key: Literal[StreamKeyType.RECEIPT]) -> MultiWriterStreamToken: + ... + @overload def get_field( self, @@ -866,7 +984,6 @@ def get_field( StreamKeyType.DEVICE_LIST, StreamKeyType.PRESENCE, StreamKeyType.PUSH_RULES, - StreamKeyType.RECEIPT, StreamKeyType.TO_DEVICE, StreamKeyType.TYPING, StreamKeyType.UN_PARTIAL_STATED_ROOMS, @@ -875,15 +992,21 @@ def get_field( ... @overload - def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + def get_field( + self, key: StreamKeyType + ) -> Union[int, RoomStreamToken, MultiWriterStreamToken]: ... - def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + def get_field( + self, key: StreamKeyType + ) -> Union[int, RoomStreamToken, MultiWriterStreamToken]: """Returns the stream ID for the given key.""" return getattr(self, key.value) -StreamToken.START = StreamToken(RoomStreamToken(stream=0), 0, 0, 0, 0, 0, 0, 0, 0, 0) +StreamToken.START = StreamToken( + RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0 +) @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9f3b8741c1db..8d9df352b25f 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -93,7 +93,7 @@ class Clock: _reactor: IReactorTime = attr.ib() - @defer.inlineCallbacks # type: ignore[arg-type] # Issue in Twisted's type annotations + @defer.inlineCallbacks def sleep(self, seconds: float) -> "Generator[Deferred[float], Any, Any]": d: defer.Deferred[float] = defer.Deferred() with context.PreserveLoggingContext(): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 0cbeb0c365d6..8a55e4e41d1c 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -345,6 +345,7 @@ async def yieldable_gather_results_delaying_cancellation( T1 = TypeVar("T1") T2 = TypeVar("T2") T3 = TypeVar("T3") +T4 = TypeVar("T4") @overload @@ -380,6 +381,19 @@ def gather_results( ... +@overload +def gather_results( + deferredList: Tuple[ + "defer.Deferred[T1]", + "defer.Deferred[T2]", + "defer.Deferred[T3]", + "defer.Deferred[T4]", + ], + consumeErrors: bool = ..., +) -> "defer.Deferred[Tuple[T1, T2, T3, T4]]": + ... + + def gather_results( # type: ignore[misc] deferredList: Tuple["defer.Deferred[T1]", ...], consumeErrors: bool = False, diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index f7cead9e1206..6f008734a061 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -189,7 +189,8 @@ def check_requirements(extra: Optional[str] = None) -> None: errors.append(_not_installed(requirement, extra)) else: if dist.version is None: - # This shouldn't happen---it suggests a borked virtualenv. (See #12223) + # This shouldn't happen---it suggests a borked virtualenv. (See + # https://github.com/matrix-org/synapse/issues/12223) # Try to give a vaguely helpful error message anyway. # Type-ignore: the annotations don't reflect reality: see # https://github.com/python/typeshed/issues/7513 diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 46771a401b50..26b46be5e1f1 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -13,7 +13,7 @@ # limitations under the License. import queue -from typing import BinaryIO, Optional, Union, cast +from typing import Any, BinaryIO, Optional, Union, cast from twisted.internet import threads from twisted.internet.defer import Deferred @@ -58,7 +58,9 @@ def __init__(self, file_obj: BinaryIO, reactor: ISynapseReactor) -> None: self._bytes_queue: queue.Queue[Optional[bytes]] = queue.Queue() # Deferred that is resolved when finished writing - self._finished_deferred: Optional[Deferred[None]] = None + # + # This is really Deferred[None], but mypy doesn't seem to like that. + self._finished_deferred: Optional[Deferred[Any]] = None # If the _writer thread throws an exception it gets stored here. self._write_exception: Optional[Exception] = None @@ -80,9 +82,13 @@ def registerProducer( self.streaming = streaming self._finished_deferred = run_in_background( threads.deferToThreadPool, - self._reactor, - self._reactor.getThreadPool(), - self._writer, + # mypy seems to get confused with the chaining of ParamSpec from + # run_in_background to deferToThreadPool. + # + # For Twisted trunk, ignore arg-type; for Twisted release ignore unused-ignore. + self._reactor, # type: ignore[arg-type,unused-ignore] + self._reactor.getThreadPool(), # type: ignore[arg-type,unused-ignore] + self._writer, # type: ignore[arg-type,unused-ignore] ) if not streaming: self._producer.resumeProducing() diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index a0efb96d3b46..f4c0194af02f 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -135,3 +135,54 @@ def sorted_topologically( degree_map[edge] -= 1 if degree_map[edge] == 0: heapq.heappush(zero_degree, edge) + + +def sorted_topologically_batched( + nodes: Iterable[T], + graph: Mapping[T, Collection[T]], +) -> Generator[Collection[T], None, None]: + r"""Walk the graph topologically, returning batches of nodes where all nodes + that references it have been previously returned. + + For example, given the following graph: + + A + / \ + B C + \ / + D + + This function will return: `[[A], [B, C], [D]]`. + + This function is useful for e.g. batch persisting events in an auth chain, + where we can only persist an event if all its auth events have already been + persisted. + """ + + degree_map = {node: 0 for node in nodes} + reverse_graph: Dict[T, Set[T]] = {} + + for node, edges in graph.items(): + if node not in degree_map: + continue + + for edge in set(edges): + if edge in degree_map: + degree_map[node] += 1 + + reverse_graph.setdefault(edge, set()).add(node) + reverse_graph.setdefault(node, set()) + + zero_degree = [node for node, degree in degree_map.items() if degree == 0] + + while zero_degree: + new_zero_degree = [] + for node in zero_degree: + for edge in reverse_graph.get(node, []): + if edge in degree_map: + degree_map[edge] -= 1 + if degree_map[edge] == 0: + new_zero_degree.append(edge) + + yield zero_degree + zero_degree = new_zero_degree diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index caf13b3474be..b254d3f84cf9 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -71,7 +71,7 @@ class TaskScheduler: # Time before a complete or failed task is deleted from the DB KEEP_TASKS_FOR_MS = 7 * 24 * 60 * 60 * 1000 # 1 week # Maximum number of tasks that can run at the same time - MAX_CONCURRENT_RUNNING_TASKS = 10 + MAX_CONCURRENT_RUNNING_TASKS = 5 # Time from the last task update after which we will log a warning LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000 # 24hrs @@ -193,7 +193,7 @@ async def update_task( result: Optional[JsonMapping] = None, error: Optional[str] = None, ) -> bool: - """Update some task associated values. This is exposed publically so it can + """Update some task associated values. This is exposed publicly so it can be used inside task functions, mainly to update the result and be able to resume a task at a specific step after a restart of synapse. @@ -377,7 +377,7 @@ async def wrapper() -> None: self._running_tasks.remove(task.id) # Try launch a new task since we've finished with this one. - self._clock.call_later(1, self._launch_scheduled_tasks) + self._clock.call_later(0.1, self._launch_scheduled_tasks) if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return diff --git a/sytest-blacklist b/sytest-blacklist index d5fa36cec7ae..9ec0cecfd4ce 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -29,5 +29,5 @@ We can't peek into rooms with joined history_visibility Local users can peek by room alias Peeked rooms only turn up in the sync for the device who peeked them -# Validation needs to be added to Synapse: #10554 +# Validation needs to be added to Synapse: https://github.com/matrix-org/synapse/issues/10554 Rejects invalid device keys diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 5c91031746e4..b1a9db02109b 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -22,15 +22,7 @@ from tests.server import get_clock, setup_test_homeserver from tests.unittest import TestCase, skip_unless -from tests.utils import default_config - -try: - import authlib # noqa: F401 - - HAS_AUTHLIB = True -except ImportError: - HAS_AUTHLIB = False - +from tests.utils import HAS_AUTHLIB, default_config # These are a few constants that are used as config parameters in the tests. SERVER_NAME = "test" diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 75ae740b435d..08214b001316 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -100,7 +100,6 @@ def get_destination_room(self, room: str, destination: str = "host2") -> dict: event_id, stream_ordering = self.get_success( self.hs.get_datastores().main.db_pool.execute( "test:get_destination_rooms", - None, """ SELECT event_id, stream_ordering FROM destination_rooms dr diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index caf04b54cbca..2c970fc8279c 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -478,7 +478,7 @@ def test_upload_signatures(self) -> None: # expect two edus, in one or two transactions. We don't know what order the # devices will be updated. self.assertEqual(len(self.edus), 2) - stream_id = None # FIXME: there is a discontinuity in the stream IDs: see #7142 + stream_id = None # FIXME: there is a discontinuity in the stream IDs: see https://github.com/matrix-org/synapse/issues/7142 for edu in self.edus: self.assertEqual(edu["edu_type"], EduTypes.DEVICE_LIST_UPDATE) c = edu["content"] diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 867dbd600186..78646cb5dccc 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -31,7 +31,12 @@ from synapse.handlers.appservice import ApplicationServicesHandler from synapse.rest.client import login, receipts, register, room, sendtodevice from synapse.server import HomeServer -from synapse.types import JsonDict, RoomStreamToken, StreamKeyType +from synapse.types import ( + JsonDict, + MultiWriterStreamToken, + RoomStreamToken, + StreamKeyType, +) from synapse.util import Clock from synapse.util.stringutils import random_string @@ -156,6 +161,7 @@ def test_query_room_alias_exists(self) -> None: result = self.successResultOf( defer.ensureDeferred(self.handler.query_room_alias_exists(room_alias)) ) + assert result is not None self.mock_as_api.query_alias.assert_called_once_with( interested_service, room_alias_str @@ -304,7 +310,9 @@ def test_notify_interested_services_ephemeral(self) -> None: ) self.handler.notify_interested_services_ephemeral( - StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, + MultiWriterStreamToken(stream=580), + ["@fakerecipient:example.com"], ) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, ephemeral=[event] @@ -332,7 +340,9 @@ def test_notify_interested_services_ephemeral_out_of_order(self) -> None: ) self.handler.notify_interested_services_ephemeral( - StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, + MultiWriterStreamToken(stream=580), + ["@fakerecipient:example.com"], ) # This method will be called, but with an empty list of events self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( @@ -635,7 +645,7 @@ def test_sending_read_receipt_batches_to_application_services(self) -> None: self.hs.get_application_service_handler()._notify_interested_services_ephemeral( services=[interested_appservice], stream_key=StreamKeyType.RECEIPT, - new_token=stream_token, + new_token=MultiWriterStreamToken(stream=stream_token), users=[self.exclusive_as_user], ) ) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index c5556f284491..07eb63f95efb 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -174,6 +174,164 @@ def test_claim_one_time_key(self) -> None: }, ) + def test_claim_one_time_key_bulk(self) -> None: + """Like test_claim_one_time_key but claims multiple keys in one handler call.""" + # Apologies to the reader. This test is a little too verbose. It is particularly + # tricky to make assertions neatly with all these nested dictionaries in play. + + # Three users with two devices each. Each device uses two algorithms. + # Each algorithm is invoked with two keys. + alice = f"@alice:{self.hs.hostname}" + brian = f"@brian:{self.hs.hostname}" + chris = f"@chris:{self.hs.hostname}" + one_time_keys = { + alice: { + "alice_dev_1": { + "alg1:k1": {"dummy_id": 1}, + "alg1:k2": {"dummy_id": 2}, + "alg2:k3": {"dummy_id": 3}, + "alg2:k4": {"dummy_id": 4}, + }, + "alice_dev_2": { + "alg1:k5": {"dummy_id": 5}, + "alg1:k6": {"dummy_id": 6}, + "alg2:k7": {"dummy_id": 7}, + "alg2:k8": {"dummy_id": 8}, + }, + }, + brian: { + "brian_dev_1": { + "alg1:k9": {"dummy_id": 9}, + "alg1:k10": {"dummy_id": 10}, + "alg2:k11": {"dummy_id": 11}, + "alg2:k12": {"dummy_id": 12}, + }, + "brian_dev_2": { + "alg1:k13": {"dummy_id": 13}, + "alg1:k14": {"dummy_id": 14}, + "alg2:k15": {"dummy_id": 15}, + "alg2:k16": {"dummy_id": 16}, + }, + }, + chris: { + "chris_dev_1": { + "alg1:k17": {"dummy_id": 17}, + "alg1:k18": {"dummy_id": 18}, + "alg2:k19": {"dummy_id": 19}, + "alg2:k20": {"dummy_id": 20}, + }, + "chris_dev_2": { + "alg1:k21": {"dummy_id": 21}, + "alg1:k22": {"dummy_id": 22}, + "alg2:k23": {"dummy_id": 23}, + "alg2:k24": {"dummy_id": 24}, + }, + }, + } + for user_id, devices in one_time_keys.items(): + for device_id, keys_dict in devices.items(): + counts = self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + {"one_time_keys": keys_dict}, + ) + ) + # The upload should report 2 keys per algorithm. + expected_counts = { + "one_time_key_counts": { + # See count_e2e_one_time_keys for why this is hardcoded. + "signed_curve25519": 0, + "alg1": 2, + "alg2": 2, + }, + } + self.assertEqual(counts, expected_counts) + + # Claim a variety of keys. + # Raw format, easier to make test assertions about. + claims_to_make = { + (alice, "alice_dev_1", "alg1"): 1, + (alice, "alice_dev_1", "alg2"): 2, + (alice, "alice_dev_2", "alg2"): 1, + (brian, "brian_dev_1", "alg1"): 2, + (brian, "brian_dev_2", "alg2"): 9001, + (chris, "chris_dev_2", "alg2"): 1, + } + # Convert to the format the handler wants. + query: Dict[str, Dict[str, Dict[str, int]]] = {} + for (user_id, device_id, algorithm), count in claims_to_make.items(): + query.setdefault(user_id, {}).setdefault(device_id, {})[algorithm] = count + claim_res = self.get_success( + self.handler.claim_one_time_keys( + query, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + + # No failures, please! + self.assertEqual(claim_res["failures"], {}) + + # Check that we get exactly the (user, device, algorithm)s we asked for. + got_otks = claim_res["one_time_keys"] + claimed_user_device_algorithms = { + (user_id, device_id, alg_key_id.split(":")[0]) + for user_id, devices in got_otks.items() + for device_id, key_dict in devices.items() + for alg_key_id in key_dict + } + self.assertEqual(claimed_user_device_algorithms, set(claims_to_make)) + + # Now check the keys we got are what we expected. + def assertExactlyOneOtk( + user_id: str, device_id: str, *alg_key_pairs: str + ) -> None: + key_dict = got_otks[user_id][device_id] + found = 0 + for alg_key in alg_key_pairs: + if alg_key in key_dict: + expected_key_json = one_time_keys[user_id][device_id][alg_key] + self.assertEqual(key_dict[alg_key], expected_key_json) + found += 1 + self.assertEqual(found, 1) + + def assertAllOtks(user_id: str, device_id: str, *alg_key_pairs: str) -> None: + key_dict = got_otks[user_id][device_id] + for alg_key in alg_key_pairs: + expected_key_json = one_time_keys[user_id][device_id][alg_key] + self.assertEqual(key_dict[alg_key], expected_key_json) + + # Expect a single arbitrary key to be returned. + assertExactlyOneOtk(alice, "alice_dev_1", "alg1:k1", "alg1:k2") + assertExactlyOneOtk(alice, "alice_dev_2", "alg2:k7", "alg2:k8") + assertExactlyOneOtk(chris, "chris_dev_2", "alg2:k23", "alg2:k24") + + assertAllOtks(alice, "alice_dev_1", "alg2:k3", "alg2:k4") + assertAllOtks(brian, "brian_dev_1", "alg1:k9", "alg1:k10") + assertAllOtks(brian, "brian_dev_2", "alg2:k15", "alg2:k16") + + # Now check the unused key counts. + for user_id, devices in one_time_keys.items(): + for device_id in devices: + counts_by_alg = self.get_success( + self.store.count_e2e_one_time_keys(user_id, device_id) + ) + # Somewhat fiddley to compute the expected count dict. + expected_counts_by_alg = { + "signed_curve25519": 0, + } + for alg in ["alg1", "alg2"]: + claim_count = claims_to_make.get((user_id, device_id, alg), 0) + remaining_count = max(0, 2 - claim_count) + if remaining_count > 0: + expected_counts_by_alg[alg] = remaining_count + + self.assertEqual( + counts_by_alg, expected_counts_by_alg, f"{user_id}:{device_id}" + ) + def test_fallback_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" @@ -322,6 +480,83 @@ def test_fallback_key(self) -> None: {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) + def test_fallback_key_bulk(self) -> None: + """Like test_fallback_key, but claims multiple keys in one handler call.""" + alice = f"@alice:{self.hs.hostname}" + brian = f"@brian:{self.hs.hostname}" + chris = f"@chris:{self.hs.hostname}" + + # Have three users upload fallback keys for two devices. + fallback_keys = { + alice: { + "alice_dev_1": {"alg1:k1": "fallback_key1"}, + "alice_dev_2": {"alg2:k2": "fallback_key2"}, + }, + brian: { + "brian_dev_1": {"alg1:k3": "fallback_key3"}, + "brian_dev_2": {"alg2:k4": "fallback_key4"}, + }, + chris: { + "chris_dev_1": {"alg1:k5": "fallback_key5"}, + "chris_dev_2": {"alg2:k6": "fallback_key6"}, + }, + } + + for user_id, devices in fallback_keys.items(): + for device_id, key_dict in devices.items(): + self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + {"fallback_keys": key_dict}, + ) + ) + + # Each device should have an unused fallback key. + for user_id, devices in fallback_keys.items(): + for device_id in devices: + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + expected_algorithm_name = f"alg{device_id[-1]}" + self.assertEqual(fallback_res, [expected_algorithm_name]) + + # Claim the fallback key for one device per user. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + { + alice: {"alice_dev_1": {"alg1": 1}}, + brian: {"brian_dev_2": {"alg2": 1}}, + chris: {"chris_dev_2": {"alg2": 1}}, + }, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + expected_claims = { + alice: {"alice_dev_1": {"alg1:k1": "fallback_key1"}}, + brian: {"brian_dev_2": {"alg2:k4": "fallback_key4"}}, + chris: {"chris_dev_2": {"alg2:k6": "fallback_key6"}}, + } + self.assertEqual( + claim_res, + {"failures": {}, "one_time_keys": expected_claims}, + ) + + for user_id, devices in fallback_keys.items(): + for device_id in devices: + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + # Claimed fallback keys should no longer show up as unused. + # Unclaimed fallback keys should still be unused. + if device_id in expected_claims[user_id]: + self.assertEqual(fallback_res, []) + else: + expected_algorithm_name = f"alg{device_id[-1]}" + self.assertEqual(fallback_res, [expected_algorithm_name]) + def test_fallback_key_always_returned(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" @@ -1367,3 +1602,50 @@ def test_query_local_devices_appservice(self) -> None: } }, ) + + def test_check_cross_signing_setup(self) -> None: + # First check what happens with no master key. + alice = "@alice:test" + exists, replaceable_without_uia = self.get_success( + self.handler.check_cross_signing_setup(alice) + ) + self.assertIs(exists, False) + self.assertIs(replaceable_without_uia, False) + + # Upload a master key but don't specify a replacement timestamp. + dummy_key = {"keys": {"a": "b"}} + self.get_success( + self.store.set_e2e_cross_signing_key("@alice:test", "master", dummy_key) + ) + + # Should now find the key exists. + exists, replaceable_without_uia = self.get_success( + self.handler.check_cross_signing_setup(alice) + ) + self.assertIs(exists, True) + self.assertIs(replaceable_without_uia, False) + + # Set an expiry timestamp in the future. + self.get_success( + self.store.allow_master_cross_signing_key_replacement_without_uia( + alice, + 1000, + ) + ) + + # Should now be allowed to replace the key without UIA. + exists, replaceable_without_uia = self.get_success( + self.handler.check_cross_signing_setup(alice) + ) + self.assertIs(exists, True) + self.assertIs(replaceable_without_uia, True) + + # Wait 2 seconds, so that the timestamp is in the past. + self.reactor.advance(2.0) + + # Should no longer be allowed to replace the key without UIA. + exists, replaceable_without_uia = self.get_success( + self.handler.check_cross_signing_setup(alice) + ) + self.assertIs(exists, True) + self.assertIs(replaceable_without_uia, False) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 4fc074241341..a035232905e2 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -112,7 +112,7 @@ def test_rejected_message_event_state(self) -> None: """ Check that we store the state group correctly for rejected non-state events. - Regression test for #6289. + Regression test for https://github.com/matrix-org/synapse/issues/6289. """ OTHER_SERVER = "otherserver" OTHER_USER = "@otheruser:" + OTHER_SERVER @@ -165,7 +165,7 @@ def test_rejected_state_event_state(self) -> None: """ Check that we store the state group correctly for rejected state events. - Regression test for #6289. + Regression test for https://github.com/matrix-org/synapse/issues/6289. """ OTHER_SERVER = "otherserver" OTHER_USER = "@otheruser:" + OTHER_SERVER @@ -222,7 +222,7 @@ def test_backfill_with_many_backward_extremities(self) -> None: of backwards extremities(the magic number is more than 5), no errors are thrown. - Regression test, see #11027 + Regression test, see https://github.com/matrix-org/synapse/pull/11027 """ # create the room user_id = self.register_user("kermit", "test") diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index a72ecfdc97a9..a2b8e3d5621a 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -13,7 +13,8 @@ # limitations under the License. from http import HTTPStatus -from typing import Any, Dict, Union +from io import BytesIO +from typing import Any, Dict, Optional, Union from unittest.mock import ANY, AsyncMock, Mock from urllib.parse import parse_qs @@ -25,6 +26,8 @@ from signedjson.sign import sign_json from twisted.test.proto_helpers import MemoryReactor +from twisted.web.http_headers import Headers +from twisted.web.iweb import IResponse from synapse.api.errors import ( AuthError, @@ -33,23 +36,17 @@ OAuthInsufficientScopeError, SynapseError, ) +from synapse.http.site import SynapseRequest from synapse.rest import admin from synapse.rest.client import account, devices, keys, login, logout, register from synapse.server import HomeServer -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import Clock +from tests.server import FakeChannel from tests.test_utils import FakeResponse, get_awaitable_result -from tests.unittest import HomeserverTestCase, skip_unless -from tests.utils import mock_getRawHeaders - -try: - import authlib # noqa: F401 - - HAS_AUTHLIB = True -except ImportError: - HAS_AUTHLIB = False - +from tests.unittest import HomeserverTestCase, override_config, skip_unless +from tests.utils import HAS_AUTHLIB, checked_cast, mock_getRawHeaders # These are a few constants that are used as config parameters in the tests. SERVER_NAME = "test" @@ -75,6 +72,7 @@ SUBJECT = "abc-def-ghi" USERNAME = "test-user" USER_ID = "@" + USERNAME + ":" + SERVER_NAME +OIDC_ADMIN_USERID = f"@__oidc_admin:{SERVER_NAME}" async def get_json(url: str) -> JsonDict: @@ -134,7 +132,10 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver(proxied_http_client=self.http_client) - self.auth = hs.get_auth() + # Import this here so that we've checked that authlib is available. + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + self.auth = checked_cast(MSC3861DelegatedAuth, hs.get_auth()) return hs @@ -675,7 +676,8 @@ def test_admin_token(self) -> None: request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) self.assertEqual( - requester.user.to_string(), "@%s:%s" % ("__oidc_admin", SERVER_NAME) + requester.user.to_string(), + OIDC_ADMIN_USERID, ) self.assertEqual(requester.is_guest, False) self.assertEqual(requester.device_id, None) @@ -685,3 +687,102 @@ def test_admin_token(self) -> None: # There should be no call to the introspection endpoint self.http_client.request.assert_not_called() + + @override_config({"mau_stats_only": True}) + def test_request_tracking(self) -> None: + """Using an access token should update the client_ips and MAU tables.""" + # To start, there are no MAU users. + store = self.hs.get_datastores().main + mau = self.get_success(store.get_monthly_active_count()) + self.assertEqual(mau, 0) + + known_token = "token-token-GOOD-:)" + + async def mock_http_client_request( + method: str, + uri: str, + data: Optional[bytes] = None, + headers: Optional[Headers] = None, + ) -> IResponse: + """Mocked auth provider response.""" + assert method == "POST" + token = parse_qs(data)[b"token"][0].decode("utf-8") + if token == known_token: + return FakeResponse.json( + code=200, + payload={ + "active": True, + "scope": MATRIX_USER_SCOPE, + "sub": SUBJECT, + "username": USERNAME, + }, + ) + + return FakeResponse.json(code=200, payload={"active": False}) + + self.http_client.request = mock_http_client_request + + EXAMPLE_IPV4_ADDR = "123.123.123.123" + EXAMPLE_USER_AGENT = "httprettygood" + + # First test a known access token + channel = FakeChannel(self.site, self.reactor) + # type-ignore: FakeChannel is a mock of an HTTPChannel, not a proper HTTPChannel + req = SynapseRequest(channel, self.site) # type: ignore[arg-type] + req.client.host = EXAMPLE_IPV4_ADDR + req.requestHeaders.addRawHeader("Authorization", f"Bearer {known_token}") + req.requestHeaders.addRawHeader("User-Agent", EXAMPLE_USER_AGENT) + req.content = BytesIO(b"") + req.requestReceived( + b"GET", + b"/_matrix/client/v3/account/whoami", + b"1.1", + ) + channel.await_result() + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual(channel.json_body["user_id"], USER_ID, channel.json_body) + + # Expect to see one MAU entry, from the first request + mau = self.get_success(store.get_monthly_active_count()) + self.assertEqual(mau, 1) + + conn_infos = self.get_success( + store.get_user_ip_and_agents(UserID.from_string(USER_ID)) + ) + self.assertEqual(len(conn_infos), 1, conn_infos) + conn_info = conn_infos[0] + self.assertEqual(conn_info["access_token"], known_token) + self.assertEqual(conn_info["ip"], EXAMPLE_IPV4_ADDR) + self.assertEqual(conn_info["user_agent"], EXAMPLE_USER_AGENT) + + # Now test MAS making a request using the special __oidc_admin token + MAS_IPV4_ADDR = "127.0.0.1" + MAS_USER_AGENT = "masmasmas" + + channel = FakeChannel(self.site, self.reactor) + req = SynapseRequest(channel, self.site) # type: ignore[arg-type] + req.client.host = MAS_IPV4_ADDR + req.requestHeaders.addRawHeader( + "Authorization", f"Bearer {self.auth._admin_token}" + ) + req.requestHeaders.addRawHeader("User-Agent", MAS_USER_AGENT) + req.content = BytesIO(b"") + req.requestReceived( + b"GET", + b"/_matrix/client/v3/account/whoami", + b"1.1", + ) + channel.await_result() + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + self.assertEqual( + channel.json_body["user_id"], OIDC_ADMIN_USERID, channel.json_body + ) + + # Still expect to see one MAU entry, from the first request + mau = self.get_success(store.get_monthly_active_count()) + self.assertEqual(mau, 1) + + conn_infos = self.get_success( + store.get_user_ip_and_agents(UserID.from_string(OIDC_ADMIN_USERID)) + ) + self.assertEqual(conn_infos, []) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 41c8c44e0241..173b14521a15 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import itertools from typing import Optional, cast from unittest.mock import Mock, call @@ -33,6 +33,7 @@ IDLE_TIMER, LAST_ACTIVE_GRANULARITY, SYNC_ONLINE_TIMEOUT, + PresenceHandler, handle_timeout, handle_update, ) @@ -66,7 +67,12 @@ def test_offline_to_online(self) -> None: ) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -108,7 +114,12 @@ def test_online_to_online(self) -> None: ) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertFalse(persist_and_notify) @@ -153,7 +164,12 @@ def test_online_to_online_last_active_noop(self) -> None: ) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertFalse(persist_and_notify) @@ -196,7 +212,12 @@ def test_online_to_online_last_active(self) -> None: new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -231,7 +252,12 @@ def test_remote_ping_timer(self) -> None: new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=False, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=False, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertFalse(persist_and_notify) @@ -265,7 +291,12 @@ def test_online_to_offline(self) -> None: new_state = prev_state.copy_and_replace(state=PresenceState.OFFLINE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -287,7 +318,12 @@ def test_online_to_idle(self) -> None: new_state = prev_state.copy_and_replace(state=PresenceState.UNAVAILABLE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -347,6 +383,41 @@ def test_persisting_presence_updates(self) -> None: # They should be identical. self.assertEqual(presence_states_compare, db_presence_states) + @parameterized.expand( + itertools.permutations( + ( + PresenceState.BUSY, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + ), + 2, + ) + ) + def test_override(self, initial_state: str, final_state: str) -> None: + """Overridden statuses should not go into the wheel timer.""" + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + prev_state = prev_state.copy_and_replace( + state=initial_state, last_active_ts=now, currently_active=True + ) + + new_state = prev_state.copy_and_replace(state=final_state, last_active_ts=now) + + handle_update( + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=True, + ) + + wheel_timer.insert.assert_not_called() + class PresenceTimeoutTestCase(unittest.TestCase): """Tests different timers and that the timer does not change `status_msg` of user.""" @@ -738,7 +809,6 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.presence_handler = hs.get_presence_handler() - self.clock = hs.get_clock() def test_external_process_timeout(self) -> None: """Test that if an external process doesn't update the records for a while @@ -1471,6 +1541,29 @@ def _set_presencestate_with_status_msg( self.assertEqual(new_state.state, state) self.assertEqual(new_state.status_msg, status_msg) + @unittest.override_config({"presence": {"enabled": "untracked"}}) + def test_untracked_does_not_idle(self) -> None: + """Untracked presence should not idle.""" + + # Mark user as online, this needs to reach into internals in order to + # bypass checks. + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + assert isinstance(self.presence_handler, PresenceHandler) + self.get_success( + self.presence_handler._update_states( + [state.copy_and_replace(state=PresenceState.ONLINE)] + ) + ) + + # Ensure the update took. + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + self.assertEqual(state.state, PresenceState.ONLINE) + + # The timeout should not fire and the state should be the same. + self.reactor.advance(SYNC_ONLINE_TIMEOUT) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + self.assertEqual(state.state, PresenceState.ONLINE) + class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index e9fbf32c7ce9..032b89d684d6 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -342,10 +342,10 @@ def test_auto_create_auto_join_rooms_federated(self) -> None: # Ensure the room is properly not federated. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertFalse(room["federatable"]) - self.assertFalse(room["public"]) - self.assertEqual(room["join_rules"], "public") - self.assertIsNone(room["guest_access"]) + self.assertFalse(room.federatable) + self.assertFalse(room.public) + self.assertEqual(room.join_rules, "public") + self.assertIsNone(room.guest_access) # The user should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -372,7 +372,7 @@ def test_auto_join_mxid_localpart(self) -> None: # Ensure the room is properly a public room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertEqual(room["join_rules"], "public") + self.assertEqual(room.join_rules, "public") # Both users should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(inviter)) @@ -411,9 +411,9 @@ def test_auto_create_auto_join_room_preset(self) -> None: # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertFalse(room["public"]) - self.assertEqual(room["join_rules"], "invite") - self.assertEqual(room["guest_access"], "can_join") + self.assertFalse(room.public) + self.assertEqual(room.join_rules, "invite") + self.assertEqual(room.guest_access, "can_join") # Both users should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(inviter)) @@ -455,9 +455,9 @@ def test_auto_create_auto_join_room_preset_guest(self) -> None: # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertFalse(room["public"]) - self.assertEqual(room["join_rules"], "invite") - self.assertEqual(room["guest_access"], "can_join") + self.assertFalse(room.public) + self.assertEqual(room.join_rules, "invite") + self.assertEqual(room.guest_access, "can_join") # Both users should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(inviter)) diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index d11ded6c5b63..15e19b15fb38 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple, cast from twisted.test.proto_helpers import MemoryReactor @@ -68,10 +68,14 @@ def _add_background_updates(self) -> None: ) ) - async def get_all_room_state(self) -> List[Dict[str, Any]]: - return await self.store.db_pool.simple_select_list( - "room_stats_state", None, retcols=("name", "topic", "canonical_alias") + async def get_all_room_state(self) -> List[Optional[str]]: + rows = cast( + List[Tuple[Optional[str]]], + await self.store.db_pool.simple_select_list( + "room_stats_state", None, retcols=("topic",) + ), ) + return [r[0] for r in rows] def _get_current_stats( self, stats_type: str, stat_id: str @@ -80,7 +84,7 @@ def _get_current_stats( cols = list(stats.ABSOLUTE_STATS_FIELDS[stats_type]) - return self.get_success( + row = self.get_success( self.store.db_pool.simple_select_one( table + "_current", {id_col: stat_id}, @@ -89,6 +93,8 @@ def _get_current_stats( ) ) + return None if row is None else dict(zip(cols, row)) + def _perform_background_initial_update(self) -> None: # Do the initial population of the stats via the background update self._add_background_updates() @@ -130,7 +136,7 @@ def test_initial_room(self) -> None: r = self.get_success(self.get_all_room_state()) self.assertEqual(len(r), 1) - self.assertEqual(r[0]["topic"], "foo") + self.assertEqual(r[0], "foo") def test_create_user(self) -> None: """ diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index b5f15aa7d425..388447eea6dc 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -366,7 +366,7 @@ def test_handle_local_profile_change_with_support_user(self) -> None: ) profile = self.get_success(self.store._get_user_in_directory(regular_user_id)) assert profile is not None - self.assertTrue(profile["display_name"] == display_name) + self.assertTrue(profile[0] == display_name) def test_handle_local_profile_change_with_deactivated_user(self) -> None: # create user @@ -385,7 +385,7 @@ def test_handle_local_profile_change_with_deactivated_user(self) -> None: # profile is in directory profile = self.get_success(self.store._get_user_in_directory(r_user_id)) assert profile is not None - self.assertTrue(profile["display_name"] == display_name) + self.assertEqual(profile[0], display_name) # deactivate user self.get_success(self.store.set_user_deactivated_status(r_user_id, True)) diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 528cdee34b8b..9108a3007b90 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -15,14 +15,20 @@ import subprocess from typing import List +from incremental import Version from zope.interface import implementer +import twisted from OpenSSL import SSL from OpenSSL.SSL import Connection from twisted.internet.address import IPv4Address -from twisted.internet.interfaces import IOpenSSLServerConnectionCreator +from twisted.internet.interfaces import ( + IOpenSSLServerConnectionCreator, + IProtocolFactory, + IReactorTime, +) from twisted.internet.ssl import Certificate, trustRootFromCertificates -from twisted.protocols.tls import TLSMemoryBIOProtocol +from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.client import BrowserLikePolicyForHTTPS # noqa: F401 from twisted.web.iweb import IPolicyForHTTPS # noqa: F401 @@ -153,6 +159,33 @@ def serverConnectionForTLS(self, tlsProtocol: TLSMemoryBIOProtocol) -> Connectio return Connection(ctx, None) +def wrap_server_factory_for_tls( + factory: IProtocolFactory, clock: IReactorTime, sanlist: List[bytes] +) -> TLSMemoryBIOFactory: + """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory + + The resultant factory will create a TLS server which presents a certificate + signed by our test CA, valid for the domains in `sanlist` + + Args: + factory: protocol factory to wrap + sanlist: list of domains the cert should be valid for + + Returns: + interfaces.IProtocolFactory + """ + connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist) + # Twisted > 23.8.0 has a different API that accepts a clock. + if twisted.version <= Version("Twisted", 23, 8, 0): + return TLSMemoryBIOFactory( + connection_creator, isClient=False, wrappedFactory=factory + ) + else: + return TLSMemoryBIOFactory( + connection_creator, isClient=False, wrappedFactory=factory, clock=clock + ) + + # A dummy address, useful for tests that use FakeTransport and don't care about where # packets are going to/coming from. dummy_address = IPv4Address("TCP", "127.0.0.1", 80) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 9f63fa6fa815..0f623ae50b11 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -31,7 +31,7 @@ IProtocolFactory, ) from twisted.internet.protocol import Factory, Protocol -from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol +from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.web._newclient import ResponseNeverReceived from twisted.web.client import Agent from twisted.web.http import HTTPChannel, Request @@ -57,11 +57,7 @@ from synapse.util.caches.ttlcache import TTLCache from tests import unittest -from tests.http import ( - TestServerTLSConnectionFactory, - dummy_address, - get_test_ca_cert_file, -) +from tests.http import dummy_address, get_test_ca_cert_file, wrap_server_factory_for_tls from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.utils import checked_cast, default_config @@ -125,7 +121,18 @@ def _make_connection( # build the test server server_factory = _get_test_protocol_factory() if ssl: - server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) + server_factory = wrap_server_factory_for_tls( + server_factory, + self.reactor, + tls_sanlist + or [ + b"DNS:testserv", + b"DNS:target-server", + b"DNS:xn--bcher-kva.com", + b"IP:1.2.3.4", + b"IP:::1", + ], + ) server_protocol = server_factory.buildProtocol(dummy_address) assert server_protocol is not None @@ -435,8 +442,16 @@ def _do_get_via_proxy( request.finish() # now we make another test server to act as the upstream HTTP server. - server_ssl_protocol = _wrap_server_factory_for_tls( - _get_test_protocol_factory() + server_ssl_protocol = wrap_server_factory_for_tls( + _get_test_protocol_factory(), + self.reactor, + sanlist=[ + b"DNS:testserv", + b"DNS:target-server", + b"DNS:xn--bcher-kva.com", + b"IP:1.2.3.4", + b"IP:::1", + ], ).buildProtocol(dummy_address) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. @@ -1786,33 +1801,6 @@ def _check_logcontext(context: LoggingContextOrSentinel) -> None: raise AssertionError("Expected logcontext %s but was %s" % (context, current)) -def _wrap_server_factory_for_tls( - factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> TLSMemoryBIOFactory: - """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory - The resultant factory will create a TLS server which presents a certificate - signed by our test CA, valid for the domains in `sanlist` - Args: - factory: protocol factory to wrap - sanlist: list of domains the cert should be valid for - Returns: - interfaces.IProtocolFactory - """ - if sanlist is None: - sanlist = [ - b"DNS:testserv", - b"DNS:target-server", - b"DNS:xn--bcher-kva.com", - b"IP:1.2.3.4", - b"IP:::1", - ] - - connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist) - return TLSMemoryBIOFactory( - connection_creator, isClient=False, wrappedFactory=factory - ) - - def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel Returns: diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 36472e57a839..d524c183f88d 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -335,7 +335,7 @@ def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): self._request_number = request_number self._seen_awaits = seen_awaits - self._original_Deferred___next__ = Deferred.__next__ + self._original_Deferred___next__ = Deferred.__next__ # type: ignore[misc,unused-ignore] # The number of `await`s on `Deferred`s we have seen so far. self.awaits_seen = 0 diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index ab94f3f67abe..b7337d39265a 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -70,7 +70,7 @@ def test_client_get(self) -> None: """ @defer.inlineCallbacks - def do_request() -> Generator["Deferred[object]", object, object]: + def do_request() -> Generator["Deferred[Any]", object, object]: with LoggingContext("one") as context: fetch_d = defer.ensureDeferred( self.cl.get_json("testserv:8008", "foo/bar") @@ -368,7 +368,8 @@ def test_client_requires_trailing_slashes(self) -> None: """ If a connection is made to a client but the client rejects it due to requiring a trailing slash. We need to retry the request with a - trailing slash. Workaround for Synapse <= v0.99.3, explained in #3622. + trailing slash. Workaround for Synapse <= v0.99.3, explained in + https://github.com/matrix-org/synapse/issues/3622. """ d = defer.ensureDeferred( self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 8164b0b78e31..1f117276cf01 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -29,18 +29,14 @@ ) from twisted.internet.interfaces import IProtocol, IProtocolFactory from twisted.internet.protocol import Factory, Protocol -from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol +from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel from synapse.http.client import BlocklistingReactorWrapper from synapse.http.connectproxyclient import BasicProxyCredentials from synapse.http.proxyagent import ProxyAgent, parse_proxy -from tests.http import ( - TestServerTLSConnectionFactory, - dummy_address, - get_test_https_policy, -) +from tests.http import dummy_address, get_test_https_policy, wrap_server_factory_for_tls from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase from tests.utils import checked_cast @@ -217,6 +213,27 @@ def test_parse_proxy( ) +class TestBasicProxyCredentials(TestCase): + def test_long_user_pass_string_encoded_without_newlines(self) -> None: + """Reproduces https://github.com/matrix-org/synapse/pull/16504.""" + proxy_connection_string = b"looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooonguser:pass@proxy.local:9988" + _, _, _, creds = parse_proxy(proxy_connection_string) + assert creds is not None # for mypy's benefit + self.assertIsInstance(creds, BasicProxyCredentials) + + auth_value = creds.as_proxy_authorization_value() + self.assertNotIn(b"\n", auth_value) + self.assertEqual( + creds.as_proxy_authorization_value(), + b"Basic bG9vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vbmd1c2VyOnBhc3M=", + ) + basic_auth_payload = creds.as_proxy_authorization_value().split(b" ")[1] + self.assertEqual( + base64.b64decode(basic_auth_payload), + b"looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooonguser:pass", + ) + + class MatrixFederationAgentTests(TestCase): def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() @@ -251,7 +268,9 @@ def _make_connection( the server Protocol returned by server_factory """ if ssl: - server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) + server_factory = wrap_server_factory_for_tls( + server_factory, self.reactor, tls_sanlist or [b"DNS:test.com"] + ) server_protocol = server_factory.buildProtocol(dummy_address) assert server_protocol is not None @@ -618,8 +637,8 @@ def _do_https_request_via_proxy( request.finish() # now we make another test server to act as the upstream HTTP server. - server_ssl_protocol = _wrap_server_factory_for_tls( - _get_test_protocol_factory() + server_ssl_protocol = wrap_server_factory_for_tls( + _get_test_protocol_factory(), self.reactor, sanlist=[b"DNS:test.com"] ).buildProtocol(dummy_address) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. @@ -785,7 +804,9 @@ def test_https_request_via_uppercase_proxy_with_blocklist(self) -> None: request.finish() # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel - ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory()) + ssl_factory = wrap_server_factory_for_tls( + _get_test_protocol_factory(), self.reactor, sanlist=[b"DNS:test.com"] + ) ssl_protocol = ssl_factory.buildProtocol(dummy_address) assert isinstance(ssl_protocol, TLSMemoryBIOProtocol) http_server = ssl_protocol.wrappedProtocol @@ -849,30 +870,6 @@ def test_proxy_with_https_scheme(self) -> None: self.assertEqual(proxy_ep._wrappedEndpoint._port, 8888) -def _wrap_server_factory_for_tls( - factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> TLSMemoryBIOFactory: - """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory - - The resultant factory will create a TLS server which presents a certificate - signed by our test CA, valid for the domains in `sanlist` - - Args: - factory: protocol factory to wrap - sanlist: list of domains the cert should be valid for - - Returns: - interfaces.IProtocolFactory - """ - if sanlist is None: - sanlist = [b"DNS:test.com"] - - connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist) - return TLSMemoryBIOFactory( - connection_creator, isClient=False, wrappedFactory=factory - ) - - def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel diff --git a/tests/rest/media/test_media_retention.py b/tests/media/test_media_retention.py similarity index 95% rename from tests/rest/media/test_media_retention.py rename to tests/media/test_media_retention.py index b59d9dfd4da0..27a663a23b6d 100644 --- a/tests/rest/media/test_media_retention.py +++ b/tests/media/test_media_retention.py @@ -267,23 +267,23 @@ def _assert_if_mxc_uris_purged( def _assert_mxc_uri_purge_state(mxc_uri: MXCUri, expect_purged: bool) -> None: """Given an MXC URI, assert whether it has been purged or not.""" if mxc_uri.server_name == self.hs.config.server.server_name: - found_media_dict = self.get_success( - self.store.get_local_media(mxc_uri.media_id) + found_media = bool( + self.get_success(self.store.get_local_media(mxc_uri.media_id)) ) else: - found_media_dict = self.get_success( - self.store.get_cached_remote_media( - mxc_uri.server_name, mxc_uri.media_id + found_media = bool( + self.get_success( + self.store.get_cached_remote_media( + mxc_uri.server_name, mxc_uri.media_id + ) ) ) if expect_purged: - self.assertIsNone( - found_media_dict, msg=f"{mxc_uri} unexpectedly not purged" - ) + self.assertFalse(found_media, msg=f"{mxc_uri} unexpectedly not purged") else: - self.assertIsNotNone( - found_media_dict, + self.assertTrue( + found_media, msg=f"{mxc_uri} unexpectedly purged", ) diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 15f5d644e40e..f262304c3daa 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -318,7 +318,9 @@ def _req( self.assertEqual( self.fetches[0][2], "/_matrix/media/r0/download/" + self.media_id ) - self.assertEqual(self.fetches[0][3], {"allow_remote": "false"}) + self.assertEqual( + self.fetches[0][3], {"allow_remote": "false", "timeout_ms": "20000"} + ) headers = { b"Content-Length": [b"%d" % (len(self.test_image.data))], @@ -504,7 +506,7 @@ def test_thumbnail_repeated_thumbnail(self) -> None: origin, media_id = self.media_id.split("/") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) assert info is not None - file_id = info["filesystem_id"] + file_id = info.filesystem_id thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( origin, file_id diff --git a/tests/module_api/test_event_unsigned_addition.py b/tests/module_api/test_event_unsigned_addition.py new file mode 100644 index 000000000000..b64426b1ac58 --- /dev/null +++ b/tests/module_api/test_event_unsigned_addition.py @@ -0,0 +1,59 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.test.proto_helpers import MemoryReactor + +from synapse.events import EventBase +from synapse.rest import admin, login, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + + +class EventUnsignedAdditionTestCase(HomeserverTestCase): + servlets = [ + room.register_servlets, + admin.register_servlets, + login.register_servlets, + ] + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self._store = homeserver.get_datastores().main + self._module_api = homeserver.get_module_api() + self._account_data_mgr = self._module_api.account_data_manager + + def test_annotate_event(self) -> None: + """Test that we can annotate an event when we request it from the + server. + """ + + async def add_unsigned_event(event: EventBase) -> JsonDict: + return {"test_key": event.event_id} + + self._module_api.register_add_extra_fields_to_unsigned_client_event_callbacks( + add_field_to_unsigned_callback=add_unsigned_event + ) + + user_id = self.register_user("user", "password") + token = self.login("user", "password") + + room_id = self.helper.create_room_as(user_id, tok=token) + result = self.helper.send(room_id, "Hello!", tok=token) + event_id = result["event_id"] + + event_json = self.helper.get_event(room_id, event_id, tok=token) + self.assertEqual(event_json["unsigned"].get("test_key"), event_id) diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 7c23b77e0a11..907ee1488c58 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -92,7 +92,7 @@ def test_action_for_event_by_user_handles_noninteger_room_power_levels( - the bad power level value for "room", before JSON serisalistion - whether Bob should expect the message to be highlighted - Reproduces #14060. + Reproduces https://github.com/matrix-org/synapse/issues/14060. A lack of validation: the gift that keeps on giving. """ diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py index ab379e8cf1eb..85adf84ece89 100644 --- a/tests/replication/tcp/streams/test_to_device.py +++ b/tests/replication/tcp/streams/test_to_device.py @@ -62,7 +62,7 @@ def test_to_device_stream(self) -> None: ) # add one more message, for user2 this time - # this message would be dropped before fixing #15335 + # this message would be dropped before fixing https://github.com/matrix-org/synapse/issues/15335 msg["content"] = {"device": {}} messages = {user2: {"device": msg}} diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index 5a38ac831f26..20b3d431ba7c 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -35,6 +35,10 @@ def test_typing(self) -> None: typing = self.hs.get_typing_handler() assert isinstance(typing, TypingWriterHandler) + # Create a typing update before we reconnect so that there is a missing + # update to fetch. + typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) + self.reconnect() typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) @@ -91,6 +95,10 @@ def test_reset(self) -> None: typing = self.hs.get_typing_handler() assert isinstance(typing, TypingWriterHandler) + # Create a typing update before we reconnect so that there is a missing + # update to fetch. + typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) + self.reconnect() typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=True) diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index b230a6c361b4..1e9994cc0bc7 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -15,9 +15,7 @@ import os from typing import Any, Optional, Tuple -from twisted.internet.interfaces import IOpenSSLServerConnectionCreator from twisted.internet.protocol import Factory -from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.test.proto_helpers import MemoryReactor from twisted.web.http import HTTPChannel from twisted.web.server import Request @@ -27,7 +25,11 @@ from synapse.server import HomeServer from synapse.util import Clock -from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file +from tests.http import ( + TestServerTLSConnectionFactory, + get_test_ca_cert_file, + wrap_server_factory_for_tls, +) from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeChannel, FakeTransport, make_request from tests.test_utils import SMALL_PNG @@ -94,7 +96,13 @@ def _get_media_req( (host, port, client_factory, _timeout, _bindAddress) = clients.pop() # build the test server - server_tls_protocol = _build_test_server(get_connection_factory()) + server_factory = Factory.forProtocol(HTTPChannel) + # Request.finish expects the factory to have a 'log' method. + server_factory.log = _log_request + + server_tls_protocol = wrap_server_factory_for_tls( + server_factory, self.reactor, sanlist=[b"DNS:example.com"] + ).buildProtocol(None) # now, tell the client protocol factory to build the client protocol (it will be a # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an @@ -114,7 +122,7 @@ def _get_media_req( ) # fish the test server back out of the server-side TLS protocol. - http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # type: ignore[assignment] + http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # give the reactor a pump to get the TLS juices flowing. self.reactor.pump((0.1,)) @@ -240,40 +248,6 @@ def _count_remote_thumbnails(self) -> int: return sum(len(files) for _, _, files in os.walk(path)) -def get_connection_factory() -> TestServerTLSConnectionFactory: - # this needs to happen once, but not until we are ready to run the first test - global test_server_connection_factory - if test_server_connection_factory is None: - test_server_connection_factory = TestServerTLSConnectionFactory( - sanlist=[b"DNS:example.com"] - ) - return test_server_connection_factory - - -def _build_test_server( - connection_creator: IOpenSSLServerConnectionCreator, -) -> TLSMemoryBIOProtocol: - """Construct a test server - - This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol - - Args: - connection_creator: thing to build SSL connections - - Returns: - TLSMemoryBIOProtocol - """ - server_factory = Factory.forProtocol(HTTPChannel) - # Request.finish expects the factory to have a 'log' method. - server_factory.log = _log_request - - server_tls_factory = TLSMemoryBIOFactory( - connection_creator, isClient=False, wrappedFactory=server_factory - ) - - return server_tls_factory.buildProtocol(None) - - def _log_request(request: Request) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info("Completed request %s", request) diff --git a/tests/replication/test_sharded_receipts.py b/tests/replication/test_sharded_receipts.py new file mode 100644 index 000000000000..41876b36de4a --- /dev/null +++ b/tests/replication/test_sharded_receipts.py @@ -0,0 +1,243 @@ +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import ReceiptTypes +from synapse.rest import admin +from synapse.rest.client import login, receipts, room, sync +from synapse.server import HomeServer +from synapse.storage.util.id_generators import MultiWriterIdGenerator +from synapse.types import StreamToken +from synapse.util import Clock + +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.server import make_request + +logger = logging.getLogger(__name__) + + +class ReceiptsShardTestCase(BaseMultiWorkerStreamTestCase): + """Checks receipts sharding works""" + + servlets = [ + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + sync.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + # Register a user who sends a message that we'll get notified about + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + self.room_creator = self.hs.get_room_creation_handler() + self.store = hs.get_datastores().main + + def default_config(self) -> dict: + conf = super().default_config() + conf["stream_writers"] = {"receipts": ["worker1", "worker2"]} + conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, + "worker1": {"host": "testserv", "port": 1001}, + "worker2": {"host": "testserv", "port": 1002}, + } + return conf + + def test_basic(self) -> None: + """Simple test to ensure that receipts can be sent on multiple + workers. + """ + + worker1 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker1"}, + ) + worker1_site = self._hs_to_site[worker1] + + worker2 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker2"}, + ) + worker2_site = self._hs_to_site[worker2] + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Create a room + room_id = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join( + room=room_id, user=self.other_user_id, tok=self.other_access_token + ) + + # First user sends a message, the other users sends a receipt. + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + event_id = response["event_id"] + + channel = make_request( + reactor=self.reactor, + site=worker1_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_id}", + access_token=access_token, + content={}, + ) + self.assertEqual(200, channel.code) + + # Now we do it again using the second worker + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + event_id = response["event_id"] + + channel = make_request( + reactor=self.reactor, + site=worker2_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_id}", + access_token=access_token, + content={}, + ) + self.assertEqual(200, channel.code) + + def test_vector_clock_token(self) -> None: + """Tests that using a stream token with a vector clock component works + correctly with basic /sync usage. + """ + + worker_hs1 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker1"}, + ) + worker1_site = self._hs_to_site[worker_hs1] + + worker_hs2 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker2"}, + ) + worker2_site = self._hs_to_site[worker_hs2] + + sync_hs = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "sync"}, + ) + sync_hs_site = self._hs_to_site[sync_hs] + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + store = self.hs.get_datastores().main + + room_id = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join( + room=room_id, user=self.other_user_id, tok=self.other_access_token + ) + + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + first_event = response["event_id"] + + # Do an initial sync so that we're up to date. + channel = make_request( + self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token + ) + next_batch = channel.json_body["next_batch"] + + # We now gut wrench into the events stream MultiWriterIdGenerator on + # worker2 to mimic it getting stuck persisting a receipt. This ensures + # that when we send an event on worker1 we end up in a state where + # worker2 events stream position lags that on worker1, resulting in a + # receipts token with a non-empty instance map component. + # + # Worker2's receipts stream position will not advance until we call + # __aexit__ again. + worker_store2 = worker_hs2.get_datastores().main + assert isinstance(worker_store2._receipts_id_gen, MultiWriterIdGenerator) + + actx = worker_store2._receipts_id_gen.get_next() + self.get_success(actx.__aenter__()) + + channel = make_request( + reactor=self.reactor, + site=worker1_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{first_event}", + access_token=access_token, + content={}, + ) + self.assertEqual(200, channel.code) + + # Assert that the current stream token has an instance map component, as + # we are trying to test vector clock tokens. + receipts_token = store.get_max_receipt_stream_id() + self.assertGreater(len(receipts_token.instance_map), 0) + + # Check that syncing still gets the new receipt, despite the gap in the + # stream IDs. + channel = make_request( + self.reactor, + sync_hs_site, + "GET", + f"/sync?since={next_batch}", + access_token=access_token, + ) + + # We should only see the new event and nothing else + self.assertIn(room_id, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id]["ephemeral"]["events"] + self.assertEqual(len(events), 1) + self.assertIn(first_event, events[0]["content"]) + + # Get the next batch and makes sure its a vector clock style token. + vector_clock_token = channel.json_body["next_batch"] + parsed_token = self.get_success( + StreamToken.from_string(store, vector_clock_token) + ) + self.assertGreaterEqual(len(parsed_token.receipt_key.instance_map), 1) + + # Now that we've got a vector clock token we finish the fake persisting + # a receipt we started above. + self.get_success(actx.__aexit__(None, None, None)) + + # Now try and send another receipts to the other worker. + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + second_event = response["event_id"] + + channel = make_request( + reactor=self.reactor, + site=worker2_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{second_event}", + access_token=access_token, + content={}, + ) + + channel = make_request( + self.reactor, + sync_hs_site, + "GET", + f"/sync?since={vector_clock_token}", + access_token=access_token, + ) + + self.assertIn(room_id, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id]["ephemeral"]["events"] + self.assertEqual(len(events), 1) + self.assertIn(second_event, events[0]["content"]) diff --git a/tests/rest/admin/test_jwks.py b/tests/rest/admin/test_jwks.py index a9a6191c7346..842e92c3d0e5 100644 --- a/tests/rest/admin/test_jwks.py +++ b/tests/rest/admin/test_jwks.py @@ -19,13 +19,7 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree from tests.unittest import HomeserverTestCase, override_config, skip_unless - -try: - import authlib # noqa: F401 - - HAS_AUTHLIB = True -except ImportError: - HAS_AUTHLIB = False +from tests.utils import HAS_AUTHLIB @skip_unless(HAS_AUTHLIB, "requires authlib") diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 278808abb5d9..dac79bd74555 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -642,7 +642,7 @@ def test_quarantine_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertFalse(media_info["quarantined_by"]) + self.assertFalse(media_info.quarantined_by) # quarantining channel = self.make_request( @@ -656,7 +656,7 @@ def test_quarantine_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertTrue(media_info["quarantined_by"]) + self.assertTrue(media_info.quarantined_by) # remove from quarantine channel = self.make_request( @@ -670,7 +670,7 @@ def test_quarantine_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertFalse(media_info["quarantined_by"]) + self.assertFalse(media_info.quarantined_by) def test_quarantine_protected_media(self) -> None: """ @@ -683,7 +683,7 @@ def test_quarantine_protected_media(self) -> None: # verify protection media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertTrue(media_info["safe_from_quarantine"]) + self.assertTrue(media_info.safe_from_quarantine) # quarantining channel = self.make_request( @@ -698,7 +698,7 @@ def test_quarantine_protected_media(self) -> None: # verify that is not in quarantine media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertFalse(media_info["quarantined_by"]) + self.assertFalse(media_info.quarantined_by) class ProtectMediaByIDTestCase(_AdminMediaTests): @@ -756,7 +756,7 @@ def test_protect_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertFalse(media_info["safe_from_quarantine"]) + self.assertFalse(media_info.safe_from_quarantine) # protect channel = self.make_request( @@ -770,7 +770,7 @@ def test_protect_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertTrue(media_info["safe_from_quarantine"]) + self.assertTrue(media_info.safe_from_quarantine) # unprotect channel = self.make_request( @@ -784,7 +784,7 @@ def test_protect_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None - self.assertFalse(media_info["safe_from_quarantine"]) + self.assertFalse(media_info.safe_from_quarantine) class PurgeMediaCacheTestCase(_AdminMediaTests): diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6ed451d7c465..206ca7f0839d 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -29,7 +29,7 @@ PURGE_ROOM_ACTION_NAME, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, ) -from synapse.rest.client import directory, events, login, room +from synapse.rest.client import directory, events, knock, login, room, sync from synapse.server import HomeServer from synapse.types import UserID from synapse.util import Clock @@ -49,6 +49,8 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): login.register_servlets, events.register_servlets, room.register_servlets, + knock.register_servlets, + sync.register_servlets, room.register_deprecated_servlets, ] @@ -254,6 +256,55 @@ def test_purge_room_and_not_block(self) -> None: self._is_blocked(self.room_id, expect=False) self._has_no_members(self.room_id) + def test_purge_room_unjoined(self) -> None: + """Test to purge a room when there are invited or knocked users.""" + # Test that room is not purged + with self.assertRaises(AssertionError): + self._is_purged(self.room_id) + + # Test that room is not blocked + self._is_blocked(self.room_id, expect=False) + + # Assert one user in room + self._is_member(room_id=self.room_id, user_id=self.other_user) + self.helper.send_state( + self.room_id, + EventTypes.JoinRules, + {"join_rule": "knock"}, + tok=self.other_user_tok, + ) + + # Invite a user. + invited_user = self.register_user("invited", "pass") + self.helper.invite( + self.room_id, self.other_user, invited_user, tok=self.other_user_tok + ) + + # Have a user knock. + knocked_user = self.register_user("knocked", "pass") + knocked_user_tok = self.login("knocked", "pass") + self.helper.knock(self.room_id, knocked_user, tok=knocked_user_tok) + + channel = self.make_request( + "DELETE", + self.url.encode("ascii"), + content={"block": False, "purge": True}, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(None, channel.json_body["new_room_id"]) + self.assertCountEqual( + [self.other_user, invited_user, knocked_user], + channel.json_body["kicked_users"], + ) + self.assertIn("failed_to_kick_users", channel.json_body) + self.assertIn("local_aliases", channel.json_body) + + self._is_purged(self.room_id) + self._is_blocked(self.room_id, expect=False) + self._has_no_members(self.room_id) + def test_block_room_and_not_purge(self) -> None: """Test to block a room without purging it. Members will not be moved to a new room and will not receive a message. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 37f37a09d8e4..cf71bbb46113 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1478,7 +1478,7 @@ def test_deactivate_user_erase_true(self) -> None: def test_deactivate_user_erase_true_avatar_nonnull_but_empty(self) -> None: """Check we can erase a user whose avatar is the empty string. - Reproduces #12257. + Reproduces https://github.com/matrix-org/synapse/issues/12257. """ # Patch `self.other_user` to have an empty string as their avatar. self.get_success( @@ -2706,7 +2706,7 @@ def test_change_name_deactivate_user_user_directory(self) -> None: # is in user directory profile = self.get_success(self.store._get_user_in_directory(self.other_user)) assert profile is not None - self.assertTrue(profile["display_name"] == "User") + self.assertEqual(profile[0], "User") # Deactivate user channel = self.make_request( @@ -4854,3 +4854,59 @@ def test_success(self) -> None: {"user_id": self.other_user}, channel.json_body, ) + + +class AllowCrossSigningReplacementTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + @staticmethod + def url(user: str) -> str: + template = ( + "/_synapse/admin/v1/users/{}/_allow_cross_signing_replacement_without_uia" + ) + return template.format(urllib.parse.quote(user)) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + + def test_error_cases(self) -> None: + fake_user = "@bums:other" + channel = self.make_request( + "POST", self.url(fake_user), access_token=self.admin_user_tok + ) + # Fail: user doesn't exist + self.assertEqual(404, channel.code, msg=channel.json_body) + + channel = self.make_request( + "POST", self.url(self.other_user), access_token=self.admin_user_tok + ) + # Fail: user exists, but has no master cross-signing key + self.assertEqual(404, channel.code, msg=channel.json_body) + + def test_success(self) -> None: + # Upload a master key. + dummy_key = {"keys": {"a": "b"}} + self.get_success( + self.store.set_e2e_cross_signing_key(self.other_user, "master", dummy_key) + ) + + channel = self.make_request( + "POST", self.url(self.other_user), access_token=self.admin_user_tok + ) + # Success! + self.assertEqual(200, channel.code, msg=channel.json_body) + + # Should now find that the key exists. + _, timestamp = self.get_success( + self.store.get_master_cross_signing_key_updatable_before(self.other_user) + ) + assert timestamp is not None + self.assertGreater(timestamp, self.clock.time_msec()) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index cffbda9a7de9..bd59bb50cf9d 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -139,12 +139,12 @@ def test_basic_password_reset(self) -> None: # # Note that we don't have the UI Auth session ID, so just pull out the single # row. - ui_auth_data = self.get_success( - self.store.db_pool.simple_select_one( - "ui_auth_sessions", keyvalues={}, retcols=("clientdict",) + result = self.get_success( + self.store.db_pool.simple_select_one_onecol( + "ui_auth_sessions", keyvalues={}, retcol="clientdict" ) ) - client_dict = db_to_json(ui_auth_data["clientdict"]) + client_dict = db_to_json(result) self.assertNotIn("new_password", client_dict) @override_config({"rc_3pid_validation": {"burst_count": 3}}) diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py index 141e0f57a33b..8bea860beba1 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py @@ -64,7 +64,7 @@ def test_stream_basic_permissions(self) -> None: # 403. However, since the v1 spec no longer exists and the v1 # implementation is now part of the r0 implementation, the newer # behaviour is used instead to be consistent with the r0 spec. - # see issue #2602 + # see issue https://github.com/matrix-org/synapse/issues/2602 channel = self.make_request( "GET", "/events?access_token=%s" % ("invalid" + self.token,) ) diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index 8ee548905704..a6023dff7abf 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -11,8 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License - +import urllib.parse from http import HTTPStatus +from unittest.mock import patch from signedjson.key import ( encode_verify_key_base64, @@ -24,11 +25,12 @@ from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import keys, login -from synapse.types import JsonDict +from synapse.types import JsonDict, Requester, create_requester from tests import unittest from tests.http.server._base import make_request_with_cancellation_test from tests.unittest import override_config +from tests.utils import HAS_AUTHLIB class KeyQueryTestCase(unittest.HomeserverTestCase): @@ -259,3 +261,179 @@ def test_device_signing_with_msc3967(self) -> None: alice_token, ) self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + +class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + keys.register_servlets, + ] + + OIDC_ADMIN_TOKEN = "_oidc_admin_token" + + @unittest.skip_unless(HAS_AUTHLIB, "requires authlib") + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": "https://issuer", + "account_management_url": "https://my-account.issuer", + "client_id": "id", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + "admin_token": OIDC_ADMIN_TOKEN, + }, + }, + } + ) + def test_master_cross_signing_key_replacement_msc3861(self) -> None: + # Provision a user like MAS would, cribbing from + # https://github.com/matrix-org/matrix-authentication-service/blob/08d46a79a4adb22819ac9d55e15f8375dfe2c5c7/crates/matrix-synapse/src/lib.rs#L224-L229 + alice = "@alice:test" + channel = self.make_request( + "PUT", + f"/_synapse/admin/v2/users/{urllib.parse.quote(alice)}", + access_token=self.OIDC_ADMIN_TOKEN, + content={}, + ) + self.assertEqual(channel.code, HTTPStatus.CREATED, channel.json_body) + + # Provision a device like MAS would, cribbing from + # https://github.com/matrix-org/matrix-authentication-service/blob/08d46a79a4adb22819ac9d55e15f8375dfe2c5c7/crates/matrix-synapse/src/lib.rs#L260-L262 + alice_device = "alice_device" + channel = self.make_request( + "POST", + f"/_synapse/admin/v2/users/{urllib.parse.quote(alice)}/devices", + access_token=self.OIDC_ADMIN_TOKEN, + content={"device_id": alice_device}, + ) + self.assertEqual(channel.code, HTTPStatus.CREATED, channel.json_body) + + # Prepare a mock MAS access token. + alice_token = "alice_token_1234_oidcwhatyoudidthere" + + async def mocked_get_user_by_access_token( + token: str, allow_expired: bool = False + ) -> Requester: + self.assertEqual(token, alice_token) + return create_requester( + user_id=alice, + device_id=alice_device, + scope=[], + is_guest=False, + ) + + patch_get_user_by_access_token = patch.object( + self.hs.get_auth(), + "get_user_by_access_token", + wraps=mocked_get_user_by_access_token, + ) + + # Copied from E2eKeysHandlerTestCase + master_pubkey = "nqOvzeuGWT/sRx3h7+MHoInYj3Uk2LD/unI9kDYcHwk" + master_pubkey2 = "fHZ3NPiKxoLQm5OoZbKa99SYxprOjNs4TwJUKP+twCM" + master_pubkey3 = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" + + master_key: JsonDict = { + "user_id": alice, + "usage": ["master"], + "keys": {"ed25519:" + master_pubkey: master_pubkey}, + } + master_key2: JsonDict = { + "user_id": alice, + "usage": ["master"], + "keys": {"ed25519:" + master_pubkey2: master_pubkey2}, + } + master_key3: JsonDict = { + "user_id": alice, + "usage": ["master"], + "keys": {"ed25519:" + master_pubkey3: master_pubkey3}, + } + + with patch_get_user_by_access_token: + # Upload an initial cross-signing key. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + access_token=alice_token, + content={ + "master_key": master_key, + }, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + + # Should not be able to upload another master key. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + access_token=alice_token, + content={ + "master_key": master_key2, + }, + ) + self.assertEqual( + channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body + ) + + # Pretend that MAS did UIA and allowed us to replace the master key. + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/users/{urllib.parse.quote(alice)}/_allow_cross_signing_replacement_without_uia", + access_token=self.OIDC_ADMIN_TOKEN, + ) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + + with patch_get_user_by_access_token: + # Should now be able to upload master key2. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + access_token=alice_token, + content={ + "master_key": master_key2, + }, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + + # Even though we're still in the grace period, we shouldn't be able to + # upload master key 3 immediately after uploading key 2. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + access_token=alice_token, + content={ + "master_key": master_key3, + }, + ) + self.assertEqual( + channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body + ) + + # Pretend that MAS did UIA and allowed us to replace the master key. + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/users/{urllib.parse.quote(alice)}/_allow_cross_signing_replacement_without_uia", + access_token=self.OIDC_ADMIN_TOKEN, + ) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + timestamp_ms = channel.json_body["updatable_without_uia_before_ms"] + + # Advance to 1 second after the replacement period ends. + self.reactor.advance(timestamp_ms - self.clock.time_msec() + 1000) + + with patch_get_user_by_access_token: + # We should not be able to upload master key3 because the replacement has + # expired. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + access_token=alice_token, + content={ + "master_key": master_key3, + }, + ) + self.assertEqual( + channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body + ) diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index 66b387cea37e..4e89107e54cf 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -50,7 +50,7 @@ def test_put_presence(self) -> None: PUT to the status endpoint with use_presence enabled will call set_state on the presence handler. """ - self.hs.config.server.use_presence = True + self.hs.config.server.presence_enabled = True body = {"presence": "here", "status_msg": "beep boop"} channel = self.make_request( @@ -63,7 +63,22 @@ def test_put_presence(self) -> None: @unittest.override_config({"use_presence": False}) def test_put_presence_disabled(self) -> None: """ - PUT to the status endpoint with use_presence disabled will NOT call + PUT to the status endpoint with presence disabled will NOT call + set_state on the presence handler. + """ + + body = {"presence": "here", "status_msg": "beep boop"} + channel = self.make_request( + "PUT", "/presence/%s/status" % (self.user_id,), body + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(self.presence_handler.set_state.call_count, 0) + + @unittest.override_config({"presence": {"enabled": "untracked"}}) + def test_put_presence_untracked(self) -> None: + """ + PUT to the status endpoint with presence untracked will NOT call set_state on the presence handler. """ diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index ecae092b477a..8f923fd40f89 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -170,7 +170,8 @@ def _get_displayname(self, name: Optional[str] = None) -> Optional[str]: ) self.assertEqual(channel.code, 200, channel.result) # FIXME: If a user has no displayname set, Synapse returns 200 and omits a - # displayname from the response. This contradicts the spec, see #13137. + # displayname from the response. This contradicts the spec, see + # https://github.com/matrix-org/synapse/issues/13137. return channel.json_body.get("displayname") def _get_avatar_url(self, name: Optional[str] = None) -> Optional[str]: @@ -179,7 +180,8 @@ def _get_avatar_url(self, name: Optional[str] = None) -> Optional[str]: ) self.assertEqual(channel.code, 200, channel.result) # FIXME: If a user has no avatar set, Synapse returns 200 and omits an - # avatar_url from the response. This contradicts the spec, see #13137. + # avatar_url from the response. This contradicts the spec, see + # https://github.com/matrix-org/synapse/issues/13137. return channel.json_body.get("avatar_url") @unittest.override_config({"max_avatar_size": 50}) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index ba4e017a0e80..b04094b7b352 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -270,15 +270,15 @@ def test_POST_registration_requires_token(self) -> None: self.assertLessEqual(det_data.items(), channel.json_body.items()) # Check the `completed` counter has been incremented and pending is 0 - res = self.get_success( + pending, completed = self.get_success( store.db_pool.simple_select_one( "registration_tokens", keyvalues={"token": token}, retcols=["pending", "completed"], ) ) - self.assertEqual(res["completed"], 1) - self.assertEqual(res["pending"], 0) + self.assertEqual(completed, 1) + self.assertEqual(pending, 0) @override_config({"registration_requires_token": True}) def test_POST_registration_token_invalid(self) -> None: @@ -372,15 +372,15 @@ def test_POST_registration_token_limit_uses(self) -> None: params1["auth"]["type"] = LoginType.DUMMY self.make_request(b"POST", self.url, params1) # Check pending=0 and completed=1 - res = self.get_success( + pending, completed = self.get_success( store.db_pool.simple_select_one( "registration_tokens", keyvalues={"token": token}, retcols=["pending", "completed"], ) ) - self.assertEqual(res["pending"], 0) - self.assertEqual(res["completed"], 1) + self.assertEqual(pending, 0) + self.assertEqual(completed, 1) # Check auth still fails when using token with session2 channel = self.make_request(b"POST", self.url, params2) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index d3e06bf6b3d1..534dc339f37f 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -243,7 +243,7 @@ def get_event(self, event_id: str, expect_none: bool = False) -> JsonDict: assert event is not None time_now = self.clock.time_msec() - serialized = self.serializer.serialize_event(event, time_now) + serialized = self.get_success(self.serializer.serialize_event(event, time_now)) return serialized diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index aaa4f3bba049..bb24ed6aa7b4 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -888,7 +888,8 @@ def _create_basic_room(self) -> Tuple[int, object]: ) def test_room_creation_ratelimiting(self) -> None: """ - Regression test for #14312, where ratelimiting was made too strict. + Regression test for https://github.com/matrix-org/synapse/issues/14312, + where ratelimiting was made too strict. Clients should be able to create 10 rooms in a row without hitting rate limits, using default rate limit config. (We override rate limiting config back to its default value.) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index d60665254eb5..07c81d7f7634 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -642,7 +642,7 @@ class SyncCacheTestCase(unittest.HomeserverTestCase): def test_noop_sync_does_not_tightloop(self) -> None: """If the sync times out, we shouldn't cache the result - Essentially a regression test for #8518. + Essentially a regression test for https://github.com/matrix-org/synapse/issues/8518. """ self.user_id = self.register_user("kermit", "monkey") self.tok = self.login("kermit", "monkey") diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 377243a1706d..7931a70abb29 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -16,13 +16,7 @@ from synapse.rest.well_known import well_known_resource from tests import unittest - -try: - import authlib # noqa: F401 - - HAS_AUTHLIB = True -except ImportError: - HAS_AUTHLIB = False +from tests.utils import HAS_AUTHLIB class WellKnownTests(unittest.HomeserverTestCase): diff --git a/tests/server.py b/tests/server.py index 08633fe640f4..2b63ed3dd866 100644 --- a/tests/server.py +++ b/tests/server.py @@ -43,9 +43,11 @@ from unittest.mock import Mock import attr +from incremental import Version from typing_extensions import ParamSpec from zope.interface import implementer +import twisted from twisted.internet import address, tcp, threads, udp from twisted.internet._resolver import SimpleResolverComplexifier from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed @@ -86,7 +88,7 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.engines import PostgresEngine, create_engine +from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.types import ISynapseReactor, JsonDict from synapse.util import Clock @@ -474,6 +476,16 @@ def getHostByName( return fail(DNSLookupError("OH NO: unknown %s" % (name,))) return succeed(lookups[name]) + # In order for the TLS protocol tests to work, modify _get_default_clock + # on newer Twisted versions to use the test reactor's clock. + # + # This is *super* dirty since it is never undone and relies on the next + # test to overwrite it. + if twisted.version > Version("Twisted", 23, 8, 0): + from twisted.protocols import tls + + tls._get_default_clock = lambda: self + self.nameResolver = SimpleResolverComplexifier(FakeResolver()) super().__init__() @@ -962,7 +974,7 @@ def setup_test_homeserver( database_config = { "name": "psycopg2", "args": { - "database": test_db, + "dbname": test_db, "host": POSTGRES_HOST, "password": POSTGRES_PASSWORD, "user": POSTGRES_USER, @@ -1017,18 +1029,15 @@ def setup_test_homeserver( # Create the database before we actually try and connect to it, based off # the template database we generate in setupdb() - if isinstance(db_engine, PostgresEngine): - import psycopg2.extensions - + if USE_POSTGRES_FOR_TESTS: db_conn = db_engine.module.connect( - database=POSTGRES_BASE_DB, + dbname=POSTGRES_BASE_DB, user=POSTGRES_USER, host=POSTGRES_HOST, port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) - assert isinstance(db_conn, psycopg2.extensions.connection) - db_conn.autocommit = True + db_engine.attempt_to_set_autocommit(db_conn, True) cur = db_conn.cursor() cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) cur.execute( @@ -1053,13 +1062,12 @@ def setup_test_homeserver( hs.setup() - if isinstance(db_engine, PostgresEngine): + if USE_POSTGRES_FOR_TESTS: database_pool = hs.get_datastores().databases[0] # We need to do cleanup on PostgreSQL def cleanup() -> None: import psycopg2 - import psycopg2.extensions # Close all the db pools database_pool._db_pool.close() @@ -1068,14 +1076,13 @@ def cleanup() -> None: # Drop the test database db_conn = db_engine.module.connect( - database=POSTGRES_BASE_DB, + dbname=POSTGRES_BASE_DB, user=POSTGRES_USER, host=POSTGRES_HOST, port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) - assert isinstance(db_conn, psycopg2.extensions.connection) - db_conn.autocommit = True + db_engine.attempt_to_set_autocommit(db_conn, True) cur = db_conn.cursor() # Try a few times to drop the DB. Some things may hold on to the diff --git a/tests/storage/databases/main/test_cache.py b/tests/storage/databases/main/test_cache.py new file mode 100644 index 000000000000..3f71f5d102da --- /dev/null +++ b/tests/storage/databases/main/test_cache.py @@ -0,0 +1,117 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest.mock import Mock, call + +from synapse.storage.database import LoggingTransaction + +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.unittest import HomeserverTestCase + + +class CacheInvalidationTestCase(HomeserverTestCase): + def setUp(self) -> None: + super().setUp() + self.store = self.hs.get_datastores().main + + def test_bulk_invalidation(self) -> None: + master_invalidate = Mock() + + self.store._get_cached_user_device.invalidate = master_invalidate + + keys_to_invalidate = [ + ("a", "b"), + ("c", "d"), + ("e", "f"), + ("g", "h"), + ] + + def test_txn(txn: LoggingTransaction) -> None: + self.store._invalidate_cache_and_stream_bulk( + txn, + # This is an arbitrarily chosen cached store function. It was chosen + # because it takes more than one argument. We'll use this later to + # check that the invalidation was actioned over replication. + cache_func=self.store._get_cached_user_device, + key_tuples=keys_to_invalidate, + ) + + self.get_success( + self.store.db_pool.runInteraction( + "test_invalidate_cache_and_stream_bulk", test_txn + ) + ) + + master_invalidate.assert_has_calls( + [call(key_list) for key_list in keys_to_invalidate], + any_order=True, + ) + + +class CacheInvalidationOverReplicationTestCase(BaseMultiWorkerStreamTestCase): + def setUp(self) -> None: + super().setUp() + self.store = self.hs.get_datastores().main + + def test_bulk_invalidation_replicates(self) -> None: + """Like test_bulk_invalidation, but also checks the invalidations replicate.""" + master_invalidate = Mock() + worker_invalidate = Mock() + + self.store._get_cached_user_device.invalidate = master_invalidate + worker = self.make_worker_hs("synapse.app.generic_worker") + worker_ds = worker.get_datastores().main + worker_ds._get_cached_user_device.invalidate = worker_invalidate + + keys_to_invalidate = [ + ("a", "b"), + ("c", "d"), + ("e", "f"), + ("g", "h"), + ] + + def test_txn(txn: LoggingTransaction) -> None: + self.store._invalidate_cache_and_stream_bulk( + txn, + # This is an arbitrarily chosen cached store function. It was chosen + # because it takes more than one argument. We'll use this later to + # check that the invalidation was actioned over replication. + cache_func=self.store._get_cached_user_device, + key_tuples=keys_to_invalidate, + ) + + assert self.store._cache_id_gen is not None + initial_token = self.store._cache_id_gen.get_current_token() + self.get_success( + self.database_pool.runInteraction( + "test_invalidate_cache_and_stream_bulk", test_txn + ) + ) + second_token = self.store._cache_id_gen.get_current_token() + + self.assertGreaterEqual(second_token, initial_token + len(keys_to_invalidate)) + + self.get_success( + worker.get_replication_data_handler().wait_for_stream_position( + "master", "caches", second_token + ) + ) + + master_invalidate.assert_has_calls( + [call(key_list) for key_list in keys_to_invalidate], + any_order=True, + ) + worker_invalidate.assert_has_calls( + [call(key_list) for key_list in keys_to_invalidate], + any_order=True, + ) diff --git a/tests/storage/databases/main/test_end_to_end_keys.py b/tests/storage/databases/main/test_end_to_end_keys.py new file mode 100644 index 000000000000..23e6f82c75e3 --- /dev/null +++ b/tests/storage/databases/main/test_end_to_end_keys.py @@ -0,0 +1,121 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage._base import db_to_json +from synapse.storage.database import LoggingTransaction +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + + +class EndToEndKeyWorkerStoreTestCase(HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_get_master_cross_signing_key_updatable_before(self) -> None: + # Should return False, None when there is no master key. + alice = "@alice:test" + exists, timestamp = self.get_success( + self.store.get_master_cross_signing_key_updatable_before(alice) + ) + self.assertIs(exists, False) + self.assertIsNone(timestamp) + + # Upload a master key. + dummy_key = {"keys": {"a": "b"}} + self.get_success( + self.store.set_e2e_cross_signing_key(alice, "master", dummy_key) + ) + + # Should now find that the key exists. + exists, timestamp = self.get_success( + self.store.get_master_cross_signing_key_updatable_before(alice) + ) + self.assertIs(exists, True) + self.assertIsNone(timestamp) + + # Write an updateable_before timestamp. + written_timestamp = self.get_success( + self.store.allow_master_cross_signing_key_replacement_without_uia( + alice, 1000 + ) + ) + + # Should now find that the key exists. + exists, timestamp = self.get_success( + self.store.get_master_cross_signing_key_updatable_before(alice) + ) + self.assertIs(exists, True) + self.assertEqual(timestamp, written_timestamp) + + def test_master_replacement_only_applies_to_latest_master_key( + self, + ) -> None: + """We shouldn't allow updates w/o UIA to old master keys or other key types.""" + alice = "@alice:test" + # Upload two master keys. + key1 = {"keys": {"a": "b"}} + key2 = {"keys": {"c": "d"}} + key3 = {"keys": {"e": "f"}} + self.get_success(self.store.set_e2e_cross_signing_key(alice, "master", key1)) + self.get_success(self.store.set_e2e_cross_signing_key(alice, "other", key2)) + self.get_success(self.store.set_e2e_cross_signing_key(alice, "master", key3)) + + # Third key should be the current one. + key = self.get_success( + self.store.get_e2e_cross_signing_key(alice, "master", alice) + ) + self.assertEqual(key, key3) + + timestamp = self.get_success( + self.store.allow_master_cross_signing_key_replacement_without_uia( + alice, 1000 + ) + ) + assert timestamp is not None + + def check_timestamp_column( + txn: LoggingTransaction, + ) -> List[Tuple[JsonDict, Optional[int]]]: + """Fetch all rows for Alice's keys.""" + txn.execute( + """ + SELECT keydata, updatable_without_uia_before_ms + FROM e2e_cross_signing_keys + WHERE user_id = ? + ORDER BY stream_id ASC; + """, + (alice,), + ) + return [(db_to_json(keydata), ts) for keydata, ts in txn.fetchall()] + + values = self.get_success( + self.store.db_pool.runInteraction( + "check_timestamp_column", + check_timestamp_column, + ) + ) + self.assertEqual( + values, + [ + (key1, None), + (key2, None), + (key3, timestamp), + ], + ) diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 35f77052a729..6c4d44c05c5c 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -66,9 +66,9 @@ async def task() -> None: # Run the tasks to completion. # To work around `Linearizer`s using a different reactor to sleep when - # contended (#12841), we call `runUntilCurrent` on - # `twisted.internet.reactor`, which is a different reactor to that used - # by the homeserver. + # contended (https://github.com/matrix-org/synapse/issues/12841), we call + # `runUntilCurrent` on `twisted.internet.reactor`, which is a different + # reactor to that used by the homeserver. assert isinstance(reactor, ReactorBase) self.get_success(task1) reactor.runUntilCurrent() @@ -217,9 +217,9 @@ async def task() -> None: # Run the tasks to completion. # To work around `Linearizer`s using a different reactor to sleep when - # contended (#12841), we call `runUntilCurrent` on - # `twisted.internet.reactor`, which is a different reactor to that used - # by the homeserver. + # contended (https://github.com/matrix-org/synapse/issues/12841), we call + # `runUntilCurrent` on `twisted.internet.reactor`, which is a different + # reactor to that used by the homeserver. assert isinstance(reactor, ReactorBase) self.get_success(task1) reactor.runUntilCurrent() @@ -269,9 +269,9 @@ async def task() -> None: # Run the tasks to completion. # To work around `Linearizer`s using a different reactor to sleep when - # contended (#12841), we call `runUntilCurrent` on - # `twisted.internet.reactor`, which is a different reactor to that used - # by the homeserver. + # contended (https://github.com/matrix-org/synapse/issues/12841), we call + # `runUntilCurrent` on `twisted.internet.reactor`, which is a different + # reactor to that used by the homeserver. assert isinstance(reactor, ReactorBase) self.get_success(task1) reactor.runUntilCurrent() diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index 71db47405ef5..98b01086bcf0 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -117,7 +117,7 @@ def drop_receipts_unique_index(txn: LoggingTransaction) -> None: if expected_row is not None: columns += expected_row.keys() - rows = self.get_success( + row_tuples = self.get_success( self.store.db_pool.simple_select_list( table=table, keyvalues={ @@ -134,22 +134,22 @@ def drop_receipts_unique_index(txn: LoggingTransaction) -> None: if expected_row is not None: self.assertEqual( - len(rows), + len(row_tuples), 1, f"Background update did not leave behind latest receipt in {table}", ) self.assertEqual( - rows[0], - { - "room_id": room_id, - "receipt_type": receipt_type, - "user_id": user_id, - **expected_row, - }, + row_tuples[0], + ( + room_id, + receipt_type, + user_id, + *expected_row.values(), + ), ) else: self.assertEqual( - len(rows), + len(row_tuples), 0, f"Background update did not remove all duplicate receipts from {table}", ) diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 8bbf936ae9d7..8cbc974ac431 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -14,7 +14,7 @@ # limitations under the License. import secrets -from typing import Generator, Tuple +from typing import Generator, List, Tuple, cast from twisted.test.proto_helpers import MemoryReactor @@ -47,15 +47,15 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ) def _dump_table_to_tuple(self) -> Generator[Tuple[int, str, str], None, None]: - res = self.get_success( - self.storage.db_pool.simple_select_list( - self.table_name, None, ["id, username, value"] - ) + yield from cast( + List[Tuple[int, str, str]], + self.get_success( + self.storage.db_pool.simple_select_list( + self.table_name, None, ["id, username, value"] + ) + ), ) - for i in res: - yield (i["id"], i["username"], i["value"]) - def test_upsert_many(self) -> None: """ Upsert_many will perform the upsert operation across a batch of data. diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index abf7d0564d81..67ea640902c1 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List, Tuple, cast from unittest.mock import AsyncMock, Mock import yaml @@ -456,8 +457,8 @@ def test_not_null_constraint(self) -> None: ); """ self.get_success( - self.store.db_pool.execute( - "test_not_null_constraint", lambda _: None, table_sql + self.store.db_pool.runInteraction( + "test_not_null_constraint", lambda txn: txn.execute(table_sql) ) ) @@ -465,8 +466,8 @@ def test_not_null_constraint(self) -> None: # using SQLite. index_sql = "CREATE INDEX test_index ON test_constraint(a)" self.get_success( - self.store.db_pool.execute( - "test_not_null_constraint", lambda _: None, index_sql + self.store.db_pool.runInteraction( + "test_not_null_constraint", lambda txn: txn.execute(index_sql) ) ) @@ -526,15 +527,18 @@ def delta(txn: LoggingTransaction) -> None: self.wait_for_background_updates() # Check the correct values are in the new table. - rows = self.get_success( - self.store.db_pool.simple_select_list( - table="test_constraint", - keyvalues={}, - retcols=("a", "b"), - ) + rows = cast( + List[Tuple[int, int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="test_constraint", + keyvalues={}, + retcols=("a", "b"), + ) + ), ) - self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}]) + self.assertCountEqual(rows, [(1, 1), (3, 3)]) # And check that invalid rows get correctly rejected. self.get_failure( @@ -570,13 +574,13 @@ def test_foreign_constraint(self) -> None: ); """ self.get_success( - self.store.db_pool.execute( - "test_foreign_key_constraint", lambda _: None, base_sql + self.store.db_pool.runInteraction( + "test_foreign_key_constraint", lambda txn: txn.execute(base_sql) ) ) self.get_success( - self.store.db_pool.execute( - "test_foreign_key_constraint", lambda _: None, table_sql + self.store.db_pool.runInteraction( + "test_foreign_key_constraint", lambda txn: txn.execute(table_sql) ) ) @@ -640,14 +644,17 @@ def delta(txn: LoggingTransaction) -> None: self.wait_for_background_updates() # Check the correct values are in the new table. - rows = self.get_success( - self.store.db_pool.simple_select_list( - table="test_constraint", - keyvalues={}, - retcols=("a", "b"), - ) + rows = cast( + List[Tuple[int, int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="test_constraint", + keyvalues={}, + retcols=("a", "b"), + ) + ), ) - self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}]) + self.assertCountEqual(rows, [(1, 1), (3, 3)]) # And check that invalid rows get correctly rejected. self.get_failure( diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 256d28e4c99a..491e6d5e63a8 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -14,7 +14,7 @@ from collections import OrderedDict from typing import Generator -from unittest.mock import Mock +from unittest.mock import Mock, call, patch from twisted.internet import defer @@ -24,43 +24,90 @@ from tests import unittest from tests.server import TestHomeServer -from tests.utils import default_config +from tests.utils import USE_POSTGRES_FOR_TESTS, default_config class SQLBaseStoreTestCase(unittest.TestCase): """Test the "simple" SQL generating methods in SQLBaseStore.""" def setUp(self) -> None: - self.db_pool = Mock(spec=["runInteraction"]) + # This is the Twisted connection pool. + conn_pool = Mock(spec=["runInteraction", "runWithConnection"]) self.mock_txn = Mock() - self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"]) + if USE_POSTGRES_FOR_TESTS: + # To avoid testing psycopg2 itself, patch execute_batch/execute_values + # to assert how it is called. + from psycopg2 import extras + + self.mock_execute_batch = Mock() + self.execute_batch_patcher = patch.object( + extras, "execute_batch", new=self.mock_execute_batch + ) + self.execute_batch_patcher.start() + self.mock_execute_values = Mock() + self.execute_values_patcher = patch.object( + extras, "execute_values", new=self.mock_execute_values + ) + self.execute_values_patcher.start() + + self.mock_conn = Mock( + spec_set=[ + "cursor", + "rollback", + "commit", + "closed", + "reconnect", + "set_session", + "encoding", + ] + ) + self.mock_conn.encoding = "UNICODE" + else: + self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"]) self.mock_conn.cursor.return_value = self.mock_txn + self.mock_txn.connection = self.mock_conn self.mock_conn.rollback.return_value = None # Our fake runInteraction just runs synchronously inline def runInteraction(func, *args, **kwargs) -> defer.Deferred: # type: ignore[no-untyped-def] return defer.succeed(func(self.mock_txn, *args, **kwargs)) - self.db_pool.runInteraction = runInteraction + conn_pool.runInteraction = runInteraction def runWithConnection(func, *args, **kwargs): # type: ignore[no-untyped-def] return defer.succeed(func(self.mock_conn, *args, **kwargs)) - self.db_pool.runWithConnection = runWithConnection + conn_pool.runWithConnection = runWithConnection config = default_config(name="test", parse=True) hs = TestHomeServer("test", config=config) - sqlite_config = {"name": "sqlite3"} - engine = create_engine(sqlite_config) + if USE_POSTGRES_FOR_TESTS: + db_config = {"name": "psycopg2", "args": {}} + else: + db_config = {"name": "sqlite3"} + engine = create_engine(db_config) + fake_engine = Mock(wraps=engine) fake_engine.in_transaction.return_value = False + fake_engine.module.OperationalError = engine.module.OperationalError + fake_engine.module.DatabaseError = engine.module.DatabaseError + fake_engine.module.IntegrityError = engine.module.IntegrityError + # Don't convert param style to make assertions easier. + fake_engine.convert_param_style = lambda sql: sql + # To fix isinstance(...) checks. + fake_engine.__class__ = engine.__class__ # type: ignore[assignment] - db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) - db._db_pool = self.db_pool + db = DatabasePool(Mock(), Mock(config=db_config), fake_engine) + db._db_pool = conn_pool self.datastore = SQLBaseStore(db, None, hs) # type: ignore[arg-type] + def tearDown(self) -> None: + if USE_POSTGRES_FOR_TESTS: + self.execute_batch_patcher.stop() + self.execute_values_patcher.stop() + @defer.inlineCallbacks def test_insert_1col(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -71,7 +118,7 @@ def test_insert_1col(self) -> Generator["defer.Deferred[object]", object, None]: ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "INSERT INTO tablename (columname) VALUES(?)", ("Value",) ) @@ -87,10 +134,65 @@ def test_insert_3cols(self) -> Generator["defer.Deferred[object]", object, None] ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)", (1, 2, 3) ) + @defer.inlineCallbacks + def test_insert_many(self) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_insert_many( + table="tablename", + keys=( + "col1", + "col2", + ), + values=[ + ( + "val1", + "val2", + ), + ("val3", "val4"), + ], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_called_once_with( + self.mock_txn, + "INSERT INTO tablename (col1, col2) VALUES ?", + [("val1", "val2"), ("val3", "val4")], + template=None, + fetch=False, + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "INSERT INTO tablename (col1, col2) VALUES(?, ?)", + [("val1", "val2"), ("val3", "val4")], + ) + + @defer.inlineCallbacks + def test_insert_many_no_iterable( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_insert_many( + table="tablename", + keys=( + "col1", + "col2", + ), + values=[], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_not_called() + else: + self.mock_txn.executemany.assert_not_called() + @defer.inlineCallbacks def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -103,7 +205,7 @@ def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, No ) self.assertEqual("Value", value) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "SELECT retcol FROM tablename WHERE keycol = ?", ["TheKey"] ) @@ -120,8 +222,8 @@ def test_select_one_3col(self) -> Generator["defer.Deferred[object]", object, No ) ) - self.assertEqual({"colA": 1, "colB": 2, "colC": 3}, ret) - self.mock_txn.execute.assert_called_with( + self.assertEqual((1, 2, 3), ret) + self.mock_txn.execute.assert_called_once_with( "SELECT colA, colB, colC FROM tablename WHERE keycol = ?", ["TheKey"] ) @@ -141,12 +243,12 @@ def test_select_one_missing( ) ) - self.assertFalse(ret) + self.assertIsNone(ret) @defer.inlineCallbacks def test_select_list(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 3 - self.mock_txn.__iter__ = Mock(return_value=iter([(1,), (2,), (3,)])) + self.mock_txn.fetchall.return_value = [(1,), (2,), (3,)] self.mock_txn.description = (("colA", None, None, None, None, None, None),) ret = yield defer.ensureDeferred( @@ -155,11 +257,59 @@ def test_select_list(self) -> Generator["defer.Deferred[object]", object, None]: ) ) - self.assertEqual([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret) - self.mock_txn.execute.assert_called_with( + self.assertEqual([(1,), (2,), (3,)], ret) + self.mock_txn.execute.assert_called_once_with( "SELECT colA FROM tablename WHERE keycol = ?", ["A set"] ) + @defer.inlineCallbacks + def test_select_many_batch( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 3 + self.mock_txn.fetchall.side_effect = [[(1,), (2,)], [(3,)]] + + ret = yield defer.ensureDeferred( + self.datastore.db_pool.simple_select_many_batch( + table="tablename", + column="col1", + iterable=("val1", "val2", "val3"), + retcols=("col2",), + keyvalues={"col3": "val4"}, + batch_size=2, + ) + ) + + self.mock_txn.execute.assert_has_calls( + [ + call( + "SELECT col2 FROM tablename WHERE col1 = ANY(?) AND col3 = ?", + [["val1", "val2"], "val4"], + ), + call( + "SELECT col2 FROM tablename WHERE col1 = ANY(?) AND col3 = ?", + [["val3"], "val4"], + ), + ], + ) + self.assertEqual([(1,), (2,), (3,)], ret) + + def test_select_many_no_iterable(self) -> None: + self.mock_txn.rowcount = 3 + self.mock_txn.fetchall.side_effect = [(1,), (2,)] + + ret = self.datastore.db_pool.simple_select_many_txn( + self.mock_txn, + table="tablename", + column="col1", + iterable=(), + retcols=("col2",), + keyvalues={"col3": "val4"}, + ) + + self.mock_txn.execute.assert_not_called() + self.assertEqual([], ret) + @defer.inlineCallbacks def test_update_one_1col(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -172,7 +322,7 @@ def test_update_one_1col(self) -> Generator["defer.Deferred[object]", object, No ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "UPDATE tablename SET columnname = ? WHERE keycol = ?", ["New Value", "TheKey"], ) @@ -191,11 +341,69 @@ def test_update_one_4cols( ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?", [3, 4, 1, 2], ) + @defer.inlineCallbacks + def test_update_many(self) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_update_many( + table="tablename", + key_names=("col1", "col2"), + key_values=[("val1", "val2")], + value_names=("col3",), + value_values=[("val3",)], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_batch.assert_called_once_with( + self.mock_txn, + "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", + [("val3", "val1", "val2")], + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", + [("val3", "val1", "val2")], + ) + + # key_values and value_values must be the same length. + with self.assertRaises(ValueError): + yield defer.ensureDeferred( + self.datastore.db_pool.simple_update_many( + table="tablename", + key_names=("col1", "col2"), + key_values=[("val1", "val2")], + value_names=("col3",), + value_values=[], + desc="", + ) + ) + + @defer.inlineCallbacks + def test_update_many_no_iterable( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_update_many( + table="tablename", + key_names=("col1", "col2"), + key_values=[], + value_names=("col3",), + value_values=[], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_batch.assert_not_called() + else: + self.mock_txn.executemany.assert_not_called() + @defer.inlineCallbacks def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -206,6 +414,393 @@ def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]: ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "DELETE FROM tablename WHERE keycol = ?", ["Go away"] ) + + @defer.inlineCallbacks + def test_delete_many(self) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 2 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_delete_many( + table="tablename", + column="col1", + iterable=("val1", "val2"), + keyvalues={"col2": "val3"}, + desc="", + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "DELETE FROM tablename WHERE col1 = ANY(?) AND col2 = ?", + [["val1", "val2"], "val3"], + ) + self.assertEqual(result, 2) + + @defer.inlineCallbacks + def test_delete_many_no_iterable( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_delete_many( + table="tablename", + column="col1", + iterable=(), + keyvalues={"col2": "val3"}, + desc="", + ) + ) + + self.mock_txn.execute.assert_not_called() + self.assertEqual(result, 0) + + @defer.inlineCallbacks + def test_delete_many_no_keyvalues( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 2 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_delete_many( + table="tablename", + column="col1", + iterable=("val1", "val2"), + keyvalues={}, + desc="", + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "DELETE FROM tablename WHERE col1 = ANY(?)", [["val1", "val2"]] + ) + self.assertEqual(result, 2) + + @defer.inlineCallbacks + def test_upsert(self) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname, othercol) VALUES (?, ?) ON CONFLICT (columnname) DO UPDATE SET othercol=EXCLUDED.othercol", + ["oldvalue", "newvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_no_values( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "value"}, + values={}, + insertion_values={"columnname": "value"}, + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname) VALUES (?) ON CONFLICT (columnname) DO NOTHING", + ["value"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_with_insertion( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + insertion_values={"thirdcol": "insertionval"}, + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname, thirdcol, othercol) VALUES (?, ?, ?) ON CONFLICT (columnname) DO UPDATE SET othercol=EXCLUDED.othercol", + ["oldvalue", "insertionval", "newvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_with_where( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + where_clause="thirdcol IS NULL", + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname, othercol) VALUES (?, ?) ON CONFLICT (columnname) WHERE thirdcol IS NULL DO UPDATE SET othercol=EXCLUDED.othercol", + ["oldvalue", "newvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_many(self) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert_many( + table="tablename", + key_names=["keycol1", "keycol2"], + key_values=[["keyval1", "keyval2"], ["keyval3", "keyval4"]], + value_names=["valuecol3"], + value_values=[["val5"], ["val6"]], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_called_once_with( + self.mock_txn, + "INSERT INTO tablename (keycol1, keycol2, valuecol3) VALUES ? ON CONFLICT (keycol1, keycol2) DO UPDATE SET valuecol3=EXCLUDED.valuecol3", + [("keyval1", "keyval2", "val5"), ("keyval3", "keyval4", "val6")], + template=None, + fetch=False, + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "INSERT INTO tablename (keycol1, keycol2, valuecol3) VALUES (?, ?, ?) ON CONFLICT (keycol1, keycol2) DO UPDATE SET valuecol3=EXCLUDED.valuecol3", + [("keyval1", "keyval2", "val5"), ("keyval3", "keyval4", "val6")], + ) + + @defer.inlineCallbacks + def test_upsert_many_no_values( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert_many( + table="tablename", + key_names=["columnname"], + key_values=[["oldvalue"]], + value_names=[], + value_values=[], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_called_once_with( + self.mock_txn, + "INSERT INTO tablename (columnname) VALUES ? ON CONFLICT (columnname) DO NOTHING", + [("oldvalue",)], + template=None, + fetch=False, + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "INSERT INTO tablename (columnname) VALUES (?) ON CONFLICT (columnname) DO NOTHING", + [("oldvalue",)], + ) + + @defer.inlineCallbacks + def test_upsert_emulated_no_values_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.fetchall.return_value = [(1,)] + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "value"}, + values={}, + insertion_values={"columnname": "value"}, + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call("SELECT 1 FROM tablename WHERE columnname = ?", ["value"]), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "SELECT 1 FROM tablename WHERE columnname = ?", ["value"] + ) + self.assertFalse(result) + + @defer.inlineCallbacks + def test_upsert_emulated_no_values_not_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.fetchall.return_value = [] + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "value"}, + values={}, + insertion_values={"columnname": "value"}, + ) + ) + + self.mock_txn.execute.assert_has_calls( + [ + call( + "SELECT 1 FROM tablename WHERE columnname = ?", + ["value"], + ), + call("INSERT INTO tablename (columnname) VALUES (?)", ["value"]), + ], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_insertion_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + insertion_values={"thirdcol": "insertionval"}, + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call( + "UPDATE tablename SET othercol = ? WHERE columnname = ?", + ["newvalue", "oldvalue"], + ), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "UPDATE tablename SET othercol = ? WHERE columnname = ?", + ["newvalue", "oldvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_insertion_not_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 0 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + insertion_values={"thirdcol": "insertionval"}, + ) + ) + + self.mock_txn.execute.assert_has_calls( + [ + call( + "UPDATE tablename SET othercol = ? WHERE columnname = ?", + ["newvalue", "oldvalue"], + ), + call( + "INSERT INTO tablename (columnname, othercol, thirdcol) VALUES (?, ?, ?)", + ["oldvalue", "newvalue", "insertionval"], + ), + ] + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_where( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + where_clause="thirdcol IS NULL", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call( + "UPDATE tablename SET othercol = ? WHERE columnname = ? AND thirdcol IS NULL", + ["newvalue", "oldvalue"], + ), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "UPDATE tablename SET othercol = ? WHERE columnname = ? AND thirdcol IS NULL", + ["newvalue", "oldvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_where_no_values( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={}, + where_clause="thirdcol IS NULL", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call( + "SELECT 1 FROM tablename WHERE columnname = ? AND thirdcol IS NULL", + ["oldvalue"], + ), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "SELECT 1 FROM tablename WHERE columnname = ? AND thirdcol IS NULL", + ["oldvalue"], + ) + self.assertFalse(result) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 0c054a598f2f..8e4393d843bb 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Dict, List, Optional, Tuple, cast from unittest.mock import AsyncMock from parameterized import parameterized @@ -97,26 +97,26 @@ def test_insert_new_client_ip_none_device_id(self) -> None: self.reactor.advance(200) self.pump(0) - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) self.assertEqual( - result, - [ - { - "access_token": "access_token", - "ip": "ip", - "user_agent": "user_agent", - "device_id": None, - "last_seen": 12345678000, - } - ], + result, [("access_token", "ip", "user_agent", None, 12345678000)] ) # Add another & trigger the storage loop @@ -128,26 +128,26 @@ def test_insert_new_client_ip_none_device_id(self) -> None: self.reactor.advance(10) self.pump(0) - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) # Only one result, has been upserted. self.assertEqual( - result, - [ - { - "access_token": "access_token", - "ip": "ip", - "user_agent": "user_agent", - "device_id": None, - "last_seen": 12345878000, - } - ], + result, [("access_token", "ip", "user_agent", None, 12345878000)] ) @parameterized.expand([(False,), (True,)]) @@ -177,25 +177,23 @@ def test_get_last_client_ip_by_device(self, after_persisting: bool) -> None: self.reactor.advance(10) else: # Check that the new IP and user agent has not been stored yet - db_result = self.get_success( - self.store.db_pool.simple_select_list( - table="devices", - keyvalues={}, - retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + db_result = cast( + List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + self.get_success( + self.store.db_pool.simple_select_list( + table="devices", + keyvalues={}, + retcols=( + "user_id", + "ip", + "user_agent", + "device_id", + "last_seen", + ), + ), ), ) - self.assertEqual( - db_result, - [ - { - "user_id": user_id, - "device_id": device_id, - "ip": None, - "user_agent": None, - "last_seen": None, - }, - ], - ) + self.assertEqual(db_result, [(user_id, None, None, device_id, None)]) result = self.get_success( self.store.get_last_client_ip_by_device(user_id, device_id) @@ -261,30 +259,21 @@ def test_get_last_client_ip_by_device_combined_data(self) -> None: ) # Check that the new IP and user agent has not been stored yet - db_result = self.get_success( - self.store.db_pool.simple_select_list( - table="devices", - keyvalues={}, - retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + db_result = cast( + List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + self.get_success( + self.store.db_pool.simple_select_list( + table="devices", + keyvalues={}, + retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + ), ), ) self.assertCountEqual( db_result, [ - { - "user_id": user_id, - "device_id": device_id_1, - "ip": "ip_1", - "user_agent": "user_agent_1", - "last_seen": 12345678000, - }, - { - "user_id": user_id, - "device_id": device_id_2, - "ip": "ip_2", - "user_agent": "user_agent_2", - "last_seen": 12345678000, - }, + (user_id, "ip_1", "user_agent_1", device_id_1, 12345678000), + (user_id, "ip_2", "user_agent_2", device_id_2, 12345678000), ], ) @@ -385,28 +374,21 @@ def test_get_user_ip_and_agents_combined_data(self) -> None: ) # Check that the new IP and user agent has not been stored yet - db_result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={}, - retcols=("access_token", "ip", "user_agent", "last_seen"), + db_result = cast( + List[Tuple[str, str, str, int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={}, + retcols=("access_token", "ip", "user_agent", "last_seen"), + ), ), ) self.assertEqual( db_result, [ - { - "access_token": "access_token", - "ip": "ip_1", - "user_agent": "user_agent_1", - "last_seen": 12345678000, - }, - { - "access_token": "access_token", - "ip": "ip_2", - "user_agent": "user_agent_2", - "last_seen": 12345678000, - }, + ("access_token", "ip_1", "user_agent_1", 12345678000), + ("access_token", "ip_2", "user_agent_2", 12345678000), ], ) @@ -600,39 +582,49 @@ def test_old_user_ips_pruned(self) -> None: self.reactor.advance(200) # We should see that in the DB - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) self.assertEqual( result, - [ - { - "access_token": "access_token", - "ip": "ip", - "user_agent": "user_agent", - "device_id": device_id, - "last_seen": 0, - } - ], + [("access_token", "ip", "user_agent", device_id, 0)], ) # Now advance by a couple of months self.reactor.advance(60 * 24 * 60 * 60) # We should get no results. - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) self.assertEqual(result, []) @@ -696,28 +688,26 @@ def test_invalid_user_agents_are_ignored(self) -> None: self.reactor.advance(200) # We should see that in the DB - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) # ensure user1 is filtered out - self.assertEqual( - result, - [ - { - "access_token": access_token2, - "ip": "ip", - "user_agent": "user_agent", - "device_id": device_id2, - "last_seen": 0, - } - ], - ) + self.assertEqual(result, [(access_token2, "ip", "user_agent", device_id2, 0)]) class ClientIpAuthTestCase(unittest.HomeserverTestCase): diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 8cd7c89ca2f8..4d0ebb550d29 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -213,7 +213,8 @@ def test_successful_retry(self) -> None: after_callback, exception_callback = self._run_interaction(_test_txn) # Calling both `after_callback`s when the first attempt failed is rather - # surprising (#12184). Let's document the behaviour in a test. + # surprising (https://github.com/matrix-org/synapse/issues/12184). + # Let's document the behaviour in a test. after_callback.assert_has_calls( [ call(123, 456, extra=789), diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index d3e20f44b29a..66a027887d83 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -1060,7 +1060,7 @@ def test_get_backfill_points_in_room_works_after_many_failed_pull_attempts_that_ self, ) -> None: """ - A test that reproduces #13929 (Postgres only). + A test that reproduces https://github.com/matrix-org/synapse/issues/13929 (Postgres only). Test to make sure we can still get backfill points after many failed pull attempts that cause us to backoff to the limit. Even if the backoff formula diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 9174fb096470..fd53b0644c12 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -259,8 +259,9 @@ def test_empty(self) -> None: id_gen = self._create_id_generator() - # The table is empty so we expect an empty map for positions - self.assertEqual(id_gen.get_positions(), {}) + # The table is empty so we expect the map for positions to have a dummy + # minimum value. + self.assertEqual(id_gen.get_positions(), {"master": 1}) def test_single_instance(self) -> None: """Test that reads and writes from a single process are handled @@ -349,15 +350,12 @@ def test_multi_instance(self) -> None: first_id_gen = self._create_id_generator("first", writers=["first", "second"]) second_id_gen = self._create_id_generator("second", writers=["first", "second"]) - # The first ID gen will notice that it can advance its token to 7 as it - # has no in progress writes... self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) - # ... but the second ID gen doesn't know that. self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) # Try allocating a new ID gen and check that we only see position @@ -398,6 +396,56 @@ async def _get_next_async2() -> None: second_id_gen.advance("first", 8) self.assertEqual(second_id_gen.get_positions(), {"first": 8, "second": 9}) + def test_multi_instance_empty_row(self) -> None: + """Test that reads and writes from multiple processes are handled + correctly, when one of the writers starts without any rows. + """ + # Insert some rows for two out of three of the ID gens. + self._insert_rows("first", 3) + self._insert_rows("second", 4) + + first_id_gen = self._create_id_generator( + "first", writers=["first", "second", "third"] + ) + second_id_gen = self._create_id_generator( + "second", writers=["first", "second", "third"] + ) + third_id_gen = self._create_id_generator( + "third", writers=["first", "second", "third"] + ) + + self.assertEqual( + first_id_gen.get_positions(), {"first": 3, "second": 7, "third": 7} + ) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) + self.assertEqual(first_id_gen.get_current_token_for_writer("third"), 7) + + self.assertEqual( + second_id_gen.get_positions(), {"first": 3, "second": 7, "third": 7} + ) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) + self.assertEqual(second_id_gen.get_current_token_for_writer("third"), 7) + + # Try allocating a new ID gen and check that we only see position + # advanced after we leave the context manager. + + async def _get_next_async() -> None: + async with third_id_gen.get_next() as stream_id: + self.assertEqual(stream_id, 8) + + self.assertEqual( + third_id_gen.get_positions(), {"first": 3, "second": 7, "third": 7} + ) + self.assertEqual(third_id_gen.get_persisted_upto_position(), 7) + + self.get_success(_get_next_async()) + + self.assertEqual( + third_id_gen.get_positions(), {"first": 3, "second": 7, "third": 8} + ) + def test_get_next_txn(self) -> None: """Test that the `get_next_txn` function works correctly.""" @@ -600,6 +648,70 @@ def _insert(txn: Cursor) -> None: with self.assertRaises(IncorrectDatabaseSetup): self._create_id_generator("first") + def test_minimal_local_token(self) -> None: + self._insert_rows("first", 3) + self._insert_rows("second", 4) + + first_id_gen = self._create_id_generator("first", writers=["first", "second"]) + second_id_gen = self._create_id_generator("second", writers=["first", "second"]) + + self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) + self.assertEqual(first_id_gen.get_minimal_local_current_token(), 3) + + self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) + self.assertEqual(second_id_gen.get_minimal_local_current_token(), 7) + + def test_current_token_gap(self) -> None: + """Test that getting the current token for a writer returns the maximal + token when there are no writes. + """ + self._insert_rows("first", 3) + self._insert_rows("second", 4) + + first_id_gen = self._create_id_generator( + "first", writers=["first", "second", "third"] + ) + second_id_gen = self._create_id_generator( + "second", writers=["first", "second", "third"] + ) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) + self.assertEqual(second_id_gen.get_current_token(), 7) + + # Check that the first ID gen advancing causes the second ID gen to + # advance (as the second ID gen has nothing in flight). + + async def _get_next_async() -> None: + async with first_id_gen.get_next_mult(2): + pass + + self.get_success(_get_next_async()) + second_id_gen.advance("first", 9) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 9) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 9) + self.assertEqual(second_id_gen.get_current_token(), 7) + + # Check that the first ID gen advancing doesn't advance the second ID + # gen when the second ID gen has stuff in flight. + self.get_success(_get_next_async()) + + ctxmgr = second_id_gen.get_next() + self.get_success(ctxmgr.__aenter__()) + + second_id_gen.advance("first", 11) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 11) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 9) + self.assertEqual(second_id_gen.get_current_token(), 7) + + self.get_success(ctxmgr.__aexit__(None, None, None)) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 11) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 12) + self.assertEqual(second_id_gen.get_current_token(), 7) + class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """Tests MultiWriterIdGenerator that produce *negative* stream IDs.""" @@ -712,8 +824,8 @@ async def _get_next_async() -> None: self.get_success(_get_next_async()) - self.assertEqual(id_gen_1.get_positions(), {"first": -1}) - self.assertEqual(id_gen_2.get_positions(), {"first": -1}) + self.assertEqual(id_gen_1.get_positions(), {"first": -1, "second": -1}) + self.assertEqual(id_gen_2.get_positions(), {"first": -1, "second": -1}) self.assertEqual(id_gen_1.get_persisted_upto_position(), -1) self.assertEqual(id_gen_2.get_persisted_upto_position(), -1) @@ -822,11 +934,11 @@ def test_load_existing_stream(self) -> None: second_id_gen = self._create_id_generator("second", writers=["first", "second"]) self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 6}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) - self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 6) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) self.assertEqual(first_id_gen.get_persisted_upto_position(), 7) self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) self.assertEqual(second_id_gen.get_persisted_upto_position(), 7) diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index b8823d6993cf..01c0e5e6716f 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -39,11 +39,11 @@ def test_get_users_paginate(self) -> None: ) self.assertEqual(1, total) - self.assertEqual(self.displayname, users.pop()["displayname"]) + self.assertEqual(self.displayname, users.pop().displayname) users, total = self.get_success( self.store.get_users_paginate(0, 10, name="BC", guests=False) ) self.assertEqual(1, total) - self.assertEqual(self.displayname, users.pop()["displayname"]) + self.assertEqual(self.displayname, users.pop().displayname) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 95f99f413011..6afb5403bd13 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -120,7 +120,7 @@ def f(txn: LoggingTransaction) -> None: res = self.get_success( self.store.db_pool.execute( - "", None, "SELECT full_user_id from profiles ORDER BY full_user_id" + "", "SELECT full_user_id from profiles ORDER BY full_user_id" ) ) self.assertEqual(len(res), len(expected_values)) diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 1e27f2c275a1..d3ffe963d32a 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -42,16 +42,9 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ) def test_get_room(self) -> None: - res = self.get_success(self.store.get_room(self.room.to_string())) - assert res is not None - self.assertLessEqual( - { - "room_id": self.room.to_string(), - "creator": self.u_creator.to_string(), - "is_public": True, - }.items(), - res.items(), - ) + room = self.get_success(self.store.get_room(self.room.to_string())) + assert room is not None + self.assertTrue(room[0]) def test_get_room_unknown_room(self) -> None: self.assertIsNone(self.get_success(self.store.get_room("!uknown:test"))) @@ -59,14 +52,9 @@ def test_get_room_unknown_room(self) -> None: def test_get_room_with_stats(self) -> None: res = self.get_success(self.store.get_room_with_stats(self.room.to_string())) assert res is not None - self.assertLessEqual( - { - "room_id": self.room.to_string(), - "creator": self.u_creator.to_string(), - "public": True, - }.items(), - res.items(), - ) + self.assertEqual(res.room_id, self.room.to_string()) + self.assertEqual(res.creator, self.u_creator.to_string()) + self.assertTrue(res.public) def test_get_room_with_stats_unknown_room(self) -> None: self.assertIsNone( diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index 52ffa91c8158..e3dc3623cbf0 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -93,7 +93,7 @@ def test_non_string(self) -> None: both strings and integers. When using Postgres, integers are automatically converted to strings. - Regression test for #11918. + Regression test for https://github.com/matrix-org/synapse/issues/11918. """ store = self.hs.get_datastores().main diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index f4c4661aafd8..36fcab06b572 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Optional, Tuple, cast + from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import Membership @@ -110,21 +112,24 @@ def test_count_known_servers_stat_counter_enabled(self) -> None: def test__null_byte_in_display_name_properly_handled(self) -> None: room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) - res = self.get_success( - self.store.db_pool.simple_select_list( - "room_memberships", - {"user_id": "@alice:test"}, - ["display_name", "event_id"], - ) + res = cast( + List[Tuple[Optional[str], str]], + self.get_success( + self.store.db_pool.simple_select_list( + "room_memberships", + {"user_id": "@alice:test"}, + ["display_name", "event_id"], + ) + ), ) # Check that we only got one result back self.assertEqual(len(res), 1) # Check that alice's display name is "alice" - self.assertEqual(res[0]["display_name"], "alice") + self.assertEqual(res[0][0], "alice") # Grab the event_id to use later - event_id = res[0]["event_id"] + event_id = res[0][1] # Create a profile with the offending null byte in the display name new_profile = {"displayname": "ali\u0000ce"} @@ -139,21 +144,24 @@ def test__null_byte_in_display_name_properly_handled(self) -> None: tok=self.t_alice, ) - res2 = self.get_success( - self.store.db_pool.simple_select_list( - "room_memberships", - {"user_id": "@alice:test"}, - ["display_name", "event_id"], - ) + res2 = cast( + List[Tuple[Optional[str], str]], + self.get_success( + self.store.db_pool.simple_select_list( + "room_memberships", + {"user_id": "@alice:test"}, + ["display_name", "event_id"], + ) + ), ) # Check that we only have two results self.assertEqual(len(res2), 2) # Filter out the previous event using the event_id we grabbed above - row = [row for row in res2 if row["event_id"] != event_id] + row = [row for row in res2 if row[1] != event_id] # Check that alice's display name is now None - self.assertEqual(row[0]["display_name"], None) + self.assertIsNone(row[0][0]) def test_room_is_locally_forgotten(self) -> None: """Test that when the last local user has forgotten a room it is known as forgotten.""" diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 0b9446c36c05..2715c73f16e5 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +from typing import List, Tuple, cast from immutabledict import immutabledict @@ -584,18 +585,21 @@ def test_batched_state_group_storing(self) -> None: ) # check that only state events are in state_groups, and all state events are in state_groups - res = self.get_success( - self.store.db_pool.simple_select_list( - table="state_groups", - keyvalues=None, - retcols=("event_id",), - ) + res = cast( + List[Tuple[str]], + self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups", + keyvalues=None, + retcols=("event_id",), + ) + ), ) events = [] for result in res: - self.assertNotIn(event3.event_id, result) - events.append(result.get("event_id")) + self.assertNotIn(event3.event_id, result) # XXX + events.append(result[0]) for event, _ in processed_events_and_context: if event.is_state(): @@ -606,23 +610,29 @@ def test_batched_state_group_storing(self) -> None: # has an entry and prev event in state_group_edges for event, context in processed_events_and_context: if event.is_state(): - state = self.get_success( - self.store.db_pool.simple_select_list( - table="state_groups_state", - keyvalues={"state_group": context.state_group_after_event}, - retcols=("type", "state_key"), - ) - ) - self.assertEqual(event.type, state[0].get("type")) - self.assertEqual(event.state_key, state[0].get("state_key")) - - groups = self.get_success( - self.store.db_pool.simple_select_list( - table="state_group_edges", - keyvalues={"state_group": str(context.state_group_after_event)}, - retcols=("*",), - ) + state = cast( + List[Tuple[str, str]], + self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups_state", + keyvalues={"state_group": context.state_group_after_event}, + retcols=("type", "state_key"), + ) + ), ) - self.assertEqual( - context.state_group_before_event, groups[0].get("prev_state_group") + self.assertEqual(event.type, state[0][0]) + self.assertEqual(event.state_key, state[0][1]) + + groups = cast( + List[Tuple[str]], + self.get_success( + self.store.db_pool.simple_select_list( + table="state_group_edges", + keyvalues={ + "state_group": str(context.state_group_after_event) + }, + retcols=("prev_state_group",), + ) + ), ) + self.assertEqual(context.state_group_before_event, groups[0][0]) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 8c72aa17225a..822c41dd9ff8 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -from typing import Any, Dict, Set, Tuple +from typing import Any, Dict, List, Optional, Set, Tuple, cast from unittest import mock from unittest.mock import Mock, patch @@ -62,14 +62,13 @@ async def get_users_in_public_rooms(self) -> Set[Tuple[str, str]]: Returns a list of tuples (user_id, room_id) where room_id is public and contains the user with the given id. """ - r = await self.store.db_pool.simple_select_list( - "users_in_public_rooms", None, ("user_id", "room_id") + r = cast( + List[Tuple[str, str]], + await self.store.db_pool.simple_select_list( + "users_in_public_rooms", None, ("user_id", "room_id") + ), ) - - retval = set() - for i in r: - retval.add((i["user_id"], i["room_id"])) - return retval + return set(r) async def get_users_who_share_private_rooms(self) -> Set[Tuple[str, str, str]]: """Fetch the entire `users_who_share_private_rooms` table. @@ -78,27 +77,30 @@ async def get_users_who_share_private_rooms(self) -> Set[Tuple[str, str, str]]: to the rows of `users_who_share_private_rooms`. """ - rows = await self.store.db_pool.simple_select_list( - "users_who_share_private_rooms", - None, - ["user_id", "other_user_id", "room_id"], + rows = cast( + List[Tuple[str, str, str]], + await self.store.db_pool.simple_select_list( + "users_who_share_private_rooms", + None, + ["user_id", "other_user_id", "room_id"], + ), ) - rv = set() - for row in rows: - rv.add((row["user_id"], row["other_user_id"], row["room_id"])) - return rv + return set(rows) async def get_users_in_user_directory(self) -> Set[str]: """Fetch the set of users in the `user_directory` table. This is useful when checking we've correctly excluded users from the directory. """ - result = await self.store.db_pool.simple_select_list( - "user_directory", - None, - ["user_id"], + result = cast( + List[Tuple[str]], + await self.store.db_pool.simple_select_list( + "user_directory", + None, + ["user_id"], + ), ) - return {row["user_id"] for row in result} + return {row[0] for row in result} async def get_profiles_in_user_directory(self) -> Dict[str, ProfileInfo]: """Fetch users and their profiles from the `user_directory` table. @@ -107,16 +109,17 @@ async def get_profiles_in_user_directory(self) -> Dict[str, ProfileInfo]: It's almost the entire contents of the `user_directory` table: the only thing missing is an unused room_id column. """ - rows = await self.store.db_pool.simple_select_list( - "user_directory", - None, - ("user_id", "display_name", "avatar_url"), + rows = cast( + List[Tuple[str, Optional[str], Optional[str]]], + await self.store.db_pool.simple_select_list( + "user_directory", + None, + ("user_id", "display_name", "avatar_url"), + ), ) return { - row["user_id"]: ProfileInfo( - display_name=row["display_name"], avatar_url=row["avatar_url"] - ) - for row in rows + user_id: ProfileInfo(display_name=display_name, avatar_url=avatar_url) + for user_id, display_name, avatar_url in rows } async def get_tables( diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py index d4637d9d1ebb..2da6a018e8a8 100644 --- a/tests/storage/test_user_filters.py +++ b/tests/storage/test_user_filters.py @@ -87,7 +87,7 @@ def f(txn: LoggingTransaction) -> None: res = self.get_success( self.store.db_pool.execute( - "", None, "SELECT full_user_id from user_filters ORDER BY full_user_id" + "", "SELECT full_user_id from user_filters ORDER BY full_user_id" ) ) self.assertEqual(len(res), len(expected_values)) diff --git a/tests/unittest.py b/tests/unittest.py index 99ad02eb0613..79c47fc3cc96 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -30,6 +30,7 @@ Generic, Iterable, List, + Mapping, NoReturn, Optional, Tuple, @@ -251,7 +252,7 @@ def assertObjectHasAttributes(self, attrs: Dict[str, object], obj: object) -> No except AssertionError as e: raise (type(e))(f"Assert error for '.{key}':") from e - def assert_dict(self, required: dict, actual: dict) -> None: + def assert_dict(self, required: Mapping, actual: Mapping) -> None: """Does a partial assert of a dict. Args: diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index aa20fe6780d9..c1392d8bfc27 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -89,7 +89,8 @@ def test_mandatory_dependency(self) -> None: def test_version_reported_as_none(self) -> None: """Complain if importlib.metadata.version() returns None. - This shouldn't normally happen, but it was seen in the wild (#12223). + This shouldn't normally happen, but it was seen in the wild + (https://github.com/matrix-org/synapse/issues/12223). """ with patch( "synapse.util.check_dependencies.metadata.requires", @@ -148,7 +149,7 @@ def test_release_candidates_satisfy_dependency(self) -> None: """ Tests that release candidates count as far as satisfying a dependency is concerned. - (Regression test, see #12176.) + (Regression test, see https://github.com/matrix-org/synapse/issues/12176.) """ with patch( "synapse.util.check_dependencies.metadata.requires", @@ -162,7 +163,10 @@ def test_release_candidates_satisfy_dependency(self) -> None: check_requirements() def test_setuptools_rust_ignored(self) -> None: - """Test a workaround for a `poetry build` problem. Reproduces #13926.""" + """ + Test a workaround for a `poetry build` problem. Reproduces + https://github.com/matrix-org/synapse/issues/13926. + """ with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["setuptools_rust >= 1.3"], diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py index 406c16cdcf30..fabb05c7e415 100644 --- a/tests/util/test_itertools.py +++ b/tests/util/test_itertools.py @@ -13,7 +13,11 @@ # limitations under the License. from typing import Dict, Iterable, List, Sequence -from synapse.util.iterutils import chunk_seq, sorted_topologically +from synapse.util.iterutils import ( + chunk_seq, + sorted_topologically, + sorted_topologically_batched, +) from tests.unittest import TestCase @@ -107,3 +111,73 @@ def test_multiple_paths(self) -> None: graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]} self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4]) + + +class SortTopologicallyBatched(TestCase): + "Test cases for `sorted_topologically_batched`" + + def test_empty(self) -> None: + "Test that an empty graph works correctly" + + graph: Dict[int, List[int]] = {} + self.assertEqual(list(sorted_topologically_batched([], graph)), []) + + def test_handle_empty_graph(self) -> None: + "Test that a graph where a node doesn't have an entry is treated as empty" + + graph: Dict[int, List[int]] = {} + + # For disconnected nodes the output is simply sorted. + self.assertEqual(list(sorted_topologically_batched([1, 2], graph)), [[1, 2]]) + + def test_disconnected(self) -> None: + "Test that a graph with no edges work" + + graph: Dict[int, List[int]] = {1: [], 2: []} + + # For disconnected nodes the output is simply sorted. + self.assertEqual(list(sorted_topologically_batched([1, 2], graph)), [[1, 2]]) + + def test_linear(self) -> None: + "Test that a simple `4 -> 3 -> 2 -> 1` graph works" + + graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]} + + self.assertEqual( + list(sorted_topologically_batched([4, 3, 2, 1], graph)), + [[1], [2], [3], [4]], + ) + + def test_subset(self) -> None: + "Test that only sorting a subset of the graph works" + graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]} + + self.assertEqual(list(sorted_topologically_batched([4, 3], graph)), [[3], [4]]) + + def test_fork(self) -> None: + "Test that a forked graph works" + graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]} + + # Valid orderings are `[1, 3, 2, 4]` or `[1, 2, 3, 4]`, but we should + # always get the same one. + self.assertEqual( + list(sorted_topologically_batched([4, 3, 2, 1], graph)), [[1], [2, 3], [4]] + ) + + def test_duplicates(self) -> None: + "Test that a graph with duplicate edges work" + graph: Dict[int, List[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]} + + self.assertEqual( + list(sorted_topologically_batched([4, 3, 2, 1], graph)), + [[1], [2], [3], [4]], + ) + + def test_multiple_paths(self) -> None: + "Test that a graph with multiple paths between two nodes work" + graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]} + + self.assertEqual( + list(sorted_topologically_batched([4, 3, 2, 1], graph)), + [[1], [2], [3], [4]], + ) diff --git a/tests/utils.py b/tests/utils.py index e73b46944bd9..e0066fe15a7b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -30,6 +30,13 @@ from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + # set this to True to run the tests against postgres instead of sqlite. # # When running under postgres, we first create a base database with the name @@ -80,7 +87,7 @@ def setupdb() -> None: # Set up in the db db_conn = db_engine.module.connect( - database=POSTGRES_BASE_DB, + dbname=POSTGRES_BASE_DB, user=POSTGRES_USER, host=POSTGRES_HOST, port=POSTGRES_PORT,