diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 513eb3bde9cc9b62f61b889c3ee56770e4b528df..7f42fad909d189d01624613010712818108db9a6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -173,11 +173,13 @@ steps: queue: "medium" command: - "bash .buildkite/merge_base_branch.sh" - - "bash .buildkite/synapse_sytest.sh" + - "bash /synapse_sytest.sh" plugins: - docker#v3.0.1: image: "matrixdotorg/sytest-synapse:py35" propagate-environment: true + always-pull: true + workdir: "/src" retry: automatic: - exit_status: -1 @@ -192,11 +194,13 @@ steps: POSTGRES: "1" command: - "bash .buildkite/merge_base_branch.sh" - - "bash .buildkite/synapse_sytest.sh" + - "bash /synapse_sytest.sh" plugins: - docker#v3.0.1: image: "matrixdotorg/sytest-synapse:py35" propagate-environment: true + always-pull: true + workdir: "/src" retry: automatic: - exit_status: -1 @@ -212,11 +216,13 @@ steps: WORKERS: "1" command: - "bash .buildkite/merge_base_branch.sh" - - "bash .buildkite/synapse_sytest.sh" + - "bash /synapse_sytest.sh" plugins: - docker#v3.0.1: image: "matrixdotorg/sytest-synapse:py35" propagate-environment: true + always-pull: true + workdir: "/src" soft_fail: true retry: automatic: diff --git a/.buildkite/synapse_sytest.sh b/.buildkite/synapse_sytest.sh deleted file mode 100644 index 3011b88bb79927dbaf0adbb99573c65fc060a8e2..0000000000000000000000000000000000000000 --- a/.buildkite/synapse_sytest.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/bash -# -# Fetch sytest, and then run the tests for synapse. The entrypoint for the -# sytest-synapse docker images. - -set -ex - -if [ -n "$BUILDKITE" ] -then - SYNAPSE_DIR=`pwd` -else - SYNAPSE_DIR="/src" -fi - -# Attempt to find a sytest to use. -# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image. -if [ -d "/sytest" ]; then - # If the user has mounted in a SyTest checkout, use that. - echo "Using local sytests..." - - # create ourselves a working directory and dos2unix some scripts therein - mkdir -p /work/jenkins - for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do - dos2unix -n "/sytest/$i" "/work/$i" - done - ln -sf /sytest/tests /work - ln -sf /sytest/keys /work - SYTEST_LIB="/sytest/lib" -else - if [ -n "BUILDKITE_BRANCH" ] - then - branch_name=$BUILDKITE_BRANCH - else - # Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch. - branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop" - fi - - # Try and fetch the branch - echo "Trying to get same-named sytest branch..." - wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || { - # Probably a 404, fall back to develop - echo "Using develop instead..." - wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz - } - - mkdir -p /work - tar -C /work --strip-components=1 -xf sytest.tar.gz - SYTEST_LIB="/work/lib" -fi - -cd /work - -# PostgreSQL setup -if [ -n "$POSTGRES" ] -then - export PGUSER=postgres - export POSTGRES_DB_1=pg1 - export POSTGRES_DB_2=pg2 - - # Start the database - su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres - - # Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse - jenkins/prep_sytest_for_postgres.sh - - # Make the test databases for the two Synapse servers that will be spun up - su -c 'psql -c "CREATE DATABASE pg1;"' postgres - su -c 'psql -c "CREATE DATABASE pg2;"' postgres - -fi - -if [ -n "$OFFLINE" ]; then - # if we're in offline mode, just put synapse into the virtualenv, and - # hope that the deps are up-to-date. - # - # (`pip install -e` likes to reinstall setuptools even if it's already installed, - # so we just run setup.py explicitly.) - # - (cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop) -else - # We've already created the virtualenv, but lets double check we have all - # deps. - /venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR - /venv/bin/pip install -q --upgrade --no-cache-dir \ - lxml psycopg2 coverage codecov tap.py - - # Make sure all Perl deps are installed -- this is done in the docker build - # so will only install packages added since the last Docker build - ./install-deps.pl -fi - - -# Run the tests ->&2 echo "+++ Running tests" - -RUN_TESTS=( - perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all -) - -TEST_STATUS=0 - -if [ -n "$WORKERS" ]; then - RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py) -else - RUN_TESTS+=(-I Synapse) -fi - -"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$? - -if [ $TEST_STATUS -ne 0 ]; then - >&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS" -else - >&2 echo -e "run-tests \e[32mPASSED\e[0m" -fi - ->&2 echo "--- Copying assets" - -# Copy out the logs -mkdir -p /logs -cp results.tap /logs/results.tap -rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*" - -# Upload coverage to codecov and upload files, if running on Buildkite -if [ -n "$BUILDKITE" ] -then - /venv/bin/coverage combine || true - /venv/bin/coverage xml || true - /venv/bin/codecov -X gcov -f coverage.xml - - wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz - tar xvf buildkite.tar.gz - chmod +x ./buildkite-agent - - # Upload the files - ./buildkite-agent artifact upload "/logs/**/*.log*" - ./buildkite-agent artifact upload "/logs/results.tap" - - if [ $TEST_STATUS -ne 0 ]; then - # Annotate, if failure - /venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL" - fi -fi - - -exit $TEST_STATUS diff --git a/.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md b/.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md index 77581596c4886e79dfd109ebc001eeeb292281b3..64c06fe3ce1879095df8ff3c54269ec947640a28 100644 --- a/.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md +++ b/.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md @@ -4,6 +4,7 @@ about: I need support for Synapse --- -# Please ask for support in [**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org) +Please don't file github issues asking for support. -## Don't file an issue as a support request. +Instead, please join [`#synapse:matrix.org`](https://matrix.to/#/#synapse:matrix.org) +(from a matrix.org account if necessary), and ask there. diff --git a/.github/SUPPORT.md b/.github/SUPPORT.md index 7a4244f67394d3dbc3dc3e77fce7b06954e51c77..fad2dd2efe26aa05fb6ff726ce9fc9d50727658a 100644 --- a/.github/SUPPORT.md +++ b/.github/SUPPORT.md @@ -1,3 +1,3 @@ -[**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org) is the official support room for Matrix, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html - -It can also be access via IRC bridge at irc://irc.freenode.net/matrix or on the web here: https://webchat.freenode.net/?channels=matrix +[**#synapse:matrix.org**](https://matrix.to/#/#synapse:matrix.org) is the official support room for +Synapse, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html. +Please ask for support there, rather than filing github issues. diff --git a/CHANGES.md b/CHANGES.md index 284e89b266e8d1dd3c03f1d0d376bffdf1c1f731..dc8c74fe580254e0c1c2cbbb52218a3f070c952f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,137 @@ +Synapse 1.1.0 (2019-07-04) +========================== + +As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4. +See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details. + +This release also deprecates the use of environment variables to configure the +docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support) +for more details. + +No changes since 1.1.0rc2. + + +Synapse 1.1.0rc2 (2019-07-03) +============================= + +Bugfixes +-------- + +- Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail. ([\#5593](https://github.com/matrix-org/synapse/issues/5593)) +- Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. ([\#5596](https://github.com/matrix-org/synapse/issues/5596)) +- Fix a number of "Starting txn from sentinel context" warnings. ([\#5605](https://github.com/matrix-org/synapse/issues/5605)) + + +Internal Changes +---------------- + +- Update github templates. ([\#5552](https://github.com/matrix-org/synapse/issues/5552)) + + +Synapse 1.1.0rc1 (2019-07-02) +============================= + +As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4. +See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details. + +Features +-------- + +- Added possibilty to disable local password authentication. Contributed by Daniel Hoffend. ([\#5092](https://github.com/matrix-org/synapse/issues/5092)) +- Add monthly active users to phonehome stats. ([\#5252](https://github.com/matrix-org/synapse/issues/5252)) +- Allow expired user to trigger renewal email sending manually. ([\#5363](https://github.com/matrix-org/synapse/issues/5363)) +- Statistics on forward extremities per room are now exposed via Prometheus. ([\#5384](https://github.com/matrix-org/synapse/issues/5384), [\#5458](https://github.com/matrix-org/synapse/issues/5458), [\#5461](https://github.com/matrix-org/synapse/issues/5461)) +- Add --no-daemonize option to run synapse in the foreground, per issue #4130. Contributed by Soham Gumaste. ([\#5412](https://github.com/matrix-org/synapse/issues/5412), [\#5587](https://github.com/matrix-org/synapse/issues/5587)) +- Fully support SAML2 authentication. Contributed by [Alexander Trost](https://github.com/galexrt) - thank you! ([\#5422](https://github.com/matrix-org/synapse/issues/5422)) +- Allow server admins to define implementations of extra rules for allowing or denying incoming events. ([\#5440](https://github.com/matrix-org/synapse/issues/5440), [\#5474](https://github.com/matrix-org/synapse/issues/5474), [\#5477](https://github.com/matrix-org/synapse/issues/5477)) +- Add support for handling pagination APIs on client reader worker. ([\#5505](https://github.com/matrix-org/synapse/issues/5505), [\#5513](https://github.com/matrix-org/synapse/issues/5513), [\#5531](https://github.com/matrix-org/synapse/issues/5531)) +- Improve help and cmdline option names for --generate-config options. ([\#5512](https://github.com/matrix-org/synapse/issues/5512)) +- Allow configuration of the path used for ACME account keys. ([\#5516](https://github.com/matrix-org/synapse/issues/5516), [\#5521](https://github.com/matrix-org/synapse/issues/5521), [\#5522](https://github.com/matrix-org/synapse/issues/5522)) +- Add --data-dir and --open-private-ports options. ([\#5524](https://github.com/matrix-org/synapse/issues/5524)) +- Split public rooms directory auth config in two settings, in order to manage client auth independently from the federation part of it. Obsoletes the "restrict_public_rooms_to_local_users" configuration setting. If "restrict_public_rooms_to_local_users" is set in the config, Synapse will act as if both new options are enabled, i.e. require authentication through the client API and deny federation requests. ([\#5534](https://github.com/matrix-org/synapse/issues/5534)) +- The minimum TLS version used for outgoing federation requests can now be set with `federation_client_minimum_tls_version`. ([\#5550](https://github.com/matrix-org/synapse/issues/5550)) +- Optimise devices changed query to not pull unnecessary rows from the database, reducing database load. ([\#5559](https://github.com/matrix-org/synapse/issues/5559)) +- Add new metrics for number of forward extremities being persisted and number of state groups involved in resolution. ([\#5476](https://github.com/matrix-org/synapse/issues/5476)) + +Bugfixes +-------- + +- Fix bug processing incoming events over federation if call to `/get_missing_events` fails. ([\#5042](https://github.com/matrix-org/synapse/issues/5042)) +- Prevent more than one room upgrade happening simultaneously on the same room. ([\#5051](https://github.com/matrix-org/synapse/issues/5051)) +- Fix a bug where running synapse_port_db would cause the account validity feature to fail because it didn't set the type of the email_sent column to boolean. ([\#5325](https://github.com/matrix-org/synapse/issues/5325)) +- Warn about disabling email-based password resets when a reset occurs, and remove warning when someone attempts a phone-based reset. ([\#5387](https://github.com/matrix-org/synapse/issues/5387)) +- Fix email notifications for unnamed rooms with multiple people. ([\#5388](https://github.com/matrix-org/synapse/issues/5388)) +- Fix exceptions in federation reader worker caused by attempting to renew attestations, which should only happen on master worker. ([\#5389](https://github.com/matrix-org/synapse/issues/5389)) +- Fix handling of failures fetching remote content to not log failures as exceptions. ([\#5390](https://github.com/matrix-org/synapse/issues/5390)) +- Fix a bug where deactivated users could receive renewal emails if the account validity feature is on. ([\#5394](https://github.com/matrix-org/synapse/issues/5394)) +- Fix missing invite state after exchanging 3PID invites over federaton. ([\#5464](https://github.com/matrix-org/synapse/issues/5464)) +- Fix intermittent exceptions on Apple hardware. Also fix bug that caused database activity times to be under-reported in log lines. ([\#5498](https://github.com/matrix-org/synapse/issues/5498)) +- Fix logging error when a tampered event is detected. ([\#5500](https://github.com/matrix-org/synapse/issues/5500)) +- Fix bug where clients could tight loop calling `/sync` for a period. ([\#5507](https://github.com/matrix-org/synapse/issues/5507)) +- Fix bug with `jinja2` preventing Synapse from starting. Users who had this problem should now simply need to run `pip install matrix-synapse`. ([\#5514](https://github.com/matrix-org/synapse/issues/5514)) +- Fix a regression where homeservers on private IP addresses were incorrectly blacklisted. ([\#5523](https://github.com/matrix-org/synapse/issues/5523)) +- Fixed m.login.jwt using unregistred user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. ([\#5555](https://github.com/matrix-org/synapse/issues/5555), [\#5586](https://github.com/matrix-org/synapse/issues/5586)) +- Fix a bug that would cause invited users to receive several emails for a single 3PID invite in case the inviter is rate limited. ([\#5576](https://github.com/matrix-org/synapse/issues/5576)) + + +Updates to the Docker image +--------------------------- +- Add ability to change Docker containers [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) with the `TZ` variable. ([\#5383](https://github.com/matrix-org/synapse/issues/5383)) +- Update docker image to use Python 3.7. ([\#5546](https://github.com/matrix-org/synapse/issues/5546)) +- Deprecate the use of environment variables for configuration, and make the use of a static configuration the default. ([\#5561](https://github.com/matrix-org/synapse/issues/5561), [\#5562](https://github.com/matrix-org/synapse/issues/5562), [\#5566](https://github.com/matrix-org/synapse/issues/5566), [\#5567](https://github.com/matrix-org/synapse/issues/5567)) +- Increase default log level for docker image to INFO. It can still be changed by editing the generated log.config file. ([\#5547](https://github.com/matrix-org/synapse/issues/5547)) +- Send synapse logs to the docker logging system, by default. ([\#5565](https://github.com/matrix-org/synapse/issues/5565)) +- Open the non-TLS port by default. ([\#5568](https://github.com/matrix-org/synapse/issues/5568)) +- Fix failure to start under docker with SAML support enabled. ([\#5490](https://github.com/matrix-org/synapse/issues/5490)) +- Use a sensible location for data files when generating a config file. ([\#5563](https://github.com/matrix-org/synapse/issues/5563)) + + +Deprecations and Removals +------------------------- + +- Python 2.7 is no longer a supported platform. Synapse now requires Python 3.5+ to run. ([\#5425](https://github.com/matrix-org/synapse/issues/5425)) +- PostgreSQL 9.4 is no longer supported. Synapse requires Postgres 9.5+ or above for Postgres support. ([\#5448](https://github.com/matrix-org/synapse/issues/5448)) +- Remove support for cpu_affinity setting. ([\#5525](https://github.com/matrix-org/synapse/issues/5525)) + + +Improved Documentation +---------------------- +- Improve README section on performance troubleshooting. ([\#4276](https://github.com/matrix-org/synapse/issues/4276)) +- Add information about how to install and run `black` on the codebase to code_style.rst. ([\#5537](https://github.com/matrix-org/synapse/issues/5537)) +- Improve install docs on choosing server_name. ([\#5558](https://github.com/matrix-org/synapse/issues/5558)) + + +Internal Changes +---------------- + +- Add logging to 3pid invite signature verification. ([\#5015](https://github.com/matrix-org/synapse/issues/5015)) +- Update example haproxy config to a more compatible setup. ([\#5313](https://github.com/matrix-org/synapse/issues/5313)) +- Track deactivated accounts in the database. ([\#5378](https://github.com/matrix-org/synapse/issues/5378), [\#5465](https://github.com/matrix-org/synapse/issues/5465), [\#5493](https://github.com/matrix-org/synapse/issues/5493)) +- Clean up code for sending federation EDUs. ([\#5381](https://github.com/matrix-org/synapse/issues/5381)) +- Add a sponsor button to the repo. ([\#5382](https://github.com/matrix-org/synapse/issues/5382), [\#5386](https://github.com/matrix-org/synapse/issues/5386)) +- Don't log non-200 responses from federation queries as exceptions. ([\#5383](https://github.com/matrix-org/synapse/issues/5383)) +- Update Python syntax in contrib/ to Python 3. ([\#5446](https://github.com/matrix-org/synapse/issues/5446)) +- Update federation_client dev script to support `.well-known` and work with python3. ([\#5447](https://github.com/matrix-org/synapse/issues/5447)) +- SyTest has been moved to Buildkite. ([\#5459](https://github.com/matrix-org/synapse/issues/5459)) +- Demo script now uses python3. ([\#5460](https://github.com/matrix-org/synapse/issues/5460)) +- Synapse can now handle RestServlets that return coroutines. ([\#5475](https://github.com/matrix-org/synapse/issues/5475), [\#5585](https://github.com/matrix-org/synapse/issues/5585)) +- The demo servers talk to each other again. ([\#5478](https://github.com/matrix-org/synapse/issues/5478)) +- Add an EXPERIMENTAL config option to try and periodically clean up extremities by sending dummy events. ([\#5480](https://github.com/matrix-org/synapse/issues/5480)) +- Synapse's codebase is now formatted by `black`. ([\#5482](https://github.com/matrix-org/synapse/issues/5482)) +- Some cleanups and sanity-checking in the CPU and database metrics. ([\#5499](https://github.com/matrix-org/synapse/issues/5499)) +- Improve email notification logging. ([\#5502](https://github.com/matrix-org/synapse/issues/5502)) +- Fix "Unexpected entry in 'full_schemas'" log warning. ([\#5509](https://github.com/matrix-org/synapse/issues/5509)) +- Improve logging when generating config files. ([\#5510](https://github.com/matrix-org/synapse/issues/5510)) +- Refactor and clean up Config parser for maintainability. ([\#5511](https://github.com/matrix-org/synapse/issues/5511)) +- Make the config clearer in that email.template_dir is relative to the Synapse's root directory, not the `synapse/` folder within it. ([\#5543](https://github.com/matrix-org/synapse/issues/5543)) +- Update v1.0.0 release changelog to include more information about changes to password resets. ([\#5545](https://github.com/matrix-org/synapse/issues/5545)) +- Remove non-functioning check_event_hash.py dev script. ([\#5548](https://github.com/matrix-org/synapse/issues/5548)) +- Synapse will now only allow TLS v1.2 connections when serving federation, if it terminates TLS. As Synapse's allowed ciphers were only able to be used in TLSv1.2 before, this does not change behaviour. ([\#5550](https://github.com/matrix-org/synapse/issues/5550)) +- Logging when running GC collection on generation 0 is now at the DEBUG level, not INFO. ([\#5557](https://github.com/matrix-org/synapse/issues/5557)) +- Reduce the amount of stuff we send in the docker context. ([\#5564](https://github.com/matrix-org/synapse/issues/5564)) +- Point the reverse links in the Purge History contrib scripts at the intended location. ([\#5570](https://github.com/matrix-org/synapse/issues/5570)) + + Synapse 1.0.0 (2019-06-11) ========================== diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 2c44422a0e84f2950bd8b24ebda8f9c8afabb3c3..94dc6504855fefa785ea196e1d59933fe6f0dea7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -30,11 +30,10 @@ use github's pull request workflow to review the contribution, and either ask you to make any refinements needed or merge it and make them ourselves. The changes will then land on master when we next do a release. -We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Buildkite -<https://buildkite.com/matrix-dot-org/synapse>`_ for continuous integration. -Buildkite builds need to be authorised by a maintainer. If your change breaks -the build, this will be shown in GitHub, so please keep an eye on the pull -request for feedback. +We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for +continuous integration. Buildkite builds need to be authorised by a +maintainer. If your change breaks the build, this will be shown in GitHub, so +please keep an eye on the pull request for feedback. To run unit tests in a local development environment, you can use: @@ -70,13 +69,21 @@ All changes, even minor ones, need a corresponding changelog / newsfragment entry. These are managed by Towncrier (https://github.com/hawkowl/towncrier). -To create a changelog entry, make a new file in the ``changelog.d`` -file named in the format of ``PRnumber.type``. The type can be -one of ``feature``, ``bugfix``, ``removal`` (also used for -deprecations), or ``misc`` (for internal-only changes). - -The content of the file is your changelog entry, which can contain Markdown -formatting. The entry should end with a full stop ('.') for consistency. +To create a changelog entry, make a new file in the ``changelog.d`` file named +in the format of ``PRnumber.type``. The type can be one of the following: + +* ``feature``. +* ``bugfix``. +* ``docker`` (for updates to the Docker image). +* ``doc`` (for updates to the documentation). +* ``removal`` (also used for deprecations). +* ``misc`` (for internal-only changes). + +The content of the file is your changelog entry, which should be a short +description of your change in the same style as the rest of our `changelog +<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can +contain Markdown formatting, and should end with a full stop ('.') for +consistency. Adding credits to the changelog is encouraged, we value your contributions and would like to have you shouted out in the release notes! diff --git a/MANIFEST.in b/MANIFEST.in index 2c59c7bdc29aa3773c79f74c2f2937ffb83313b7..834ddfad39c67a7fefe0701e5675c74363274846 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,6 +7,7 @@ include demo/README include demo/demo.tls.dh include demo/*.py include demo/*.sh +include sytest-blacklist recursive-include synapse/storage/schema *.sql recursive-include synapse/storage/schema *.sql.postgres diff --git a/README.rst b/README.rst index 13e11a57736e9b9bb6d0338443c407401ca7b1d5..bbff8de5ab992ecab277787e802f49e689ef0b42 100644 --- a/README.rst +++ b/README.rst @@ -272,7 +272,7 @@ to install using pip and a virtualenv:: virtualenv -p python3 env source env/bin/activate - python -m pip install --no-pep-517 -e .[all] + python -m pip install --no-use-pep517 -e .[all] This will run a process of downloading and installing all the needed dependencies into a virtual env. diff --git a/UPGRADE.rst b/UPGRADE.rst index 1fb109a21837a2d9425cf7e5f6df244ce17f0c4d..72064accf3e274b130a1b91232dd3164fac6ccd9 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -49,16 +49,16 @@ returned by the Client-Server API: # configured on port 443. curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:" -Upgrading to v1.1 -================= +Upgrading to v1.1.0 +=================== -Synapse 1.1 removes support for older Python and PostgreSQL versions, as +Synapse v1.1.0 removes support for older Python and PostgreSQL versions, as outlined in `our deprecation notice <https://matrix.org/blog/2019/04/08/synapse-deprecating-postgres-9-4-and-python-2-x>`_. Minimum Python Version ---------------------- -Synapse v1.1 has a minimum Python requirement of Python 3.5. Python 3.6 or +Synapse v1.1.0 has a minimum Python requirement of Python 3.5. Python 3.6 or Python 3.7 are recommended as they have improved internal string handling, significantly reducing memory usage. diff --git a/changelog.d/4276.misc b/changelog.d/4276.misc deleted file mode 100644 index 285939a4b8e14898355dc0cca7bb8edae0c5297e..0000000000000000000000000000000000000000 --- a/changelog.d/4276.misc +++ /dev/null @@ -1 +0,0 @@ -Improve README section on performance troubleshooting. diff --git a/changelog.d/5015.misc b/changelog.d/5015.misc deleted file mode 100644 index eeec85b92c5695a8e72564cae39d3b08c952e848..0000000000000000000000000000000000000000 --- a/changelog.d/5015.misc +++ /dev/null @@ -1 +0,0 @@ -Add logging to 3pid invite signature verification. diff --git a/changelog.d/5042.bugfix b/changelog.d/5042.bugfix deleted file mode 100644 index 736b07c7906bafd9d58655d840592319f4f71171..0000000000000000000000000000000000000000 --- a/changelog.d/5042.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug processing incoming events over federation if call to `/get_missing_events` fails. diff --git a/changelog.d/5051.bugfix b/changelog.d/5051.bugfix deleted file mode 100644 index bfa22cc7596164ae4d58b19553bb02cbd36aa7fa..0000000000000000000000000000000000000000 --- a/changelog.d/5051.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent >1 room upgrades happening simultaneously on the same room. diff --git a/changelog.d/5092.feature b/changelog.d/5092.feature deleted file mode 100644 index c22f586d080088ca0bc2f104d7fd44d707160245..0000000000000000000000000000000000000000 --- a/changelog.d/5092.feature +++ /dev/null @@ -1 +0,0 @@ -Added possibilty to disable local password authentication. Contributed by Daniel Hoffend. diff --git a/changelog.d/5252.feature b/changelog.d/5252.feature deleted file mode 100644 index 44115b0382ef21d460d054e87d39645e01810661..0000000000000000000000000000000000000000 --- a/changelog.d/5252.feature +++ /dev/null @@ -1 +0,0 @@ -Add monthly active users to phonehome stats. diff --git a/changelog.d/5313.misc b/changelog.d/5313.misc deleted file mode 100644 index 2ea01cb9d3cd9d20592cb4eab4e07be54b2fac8c..0000000000000000000000000000000000000000 --- a/changelog.d/5313.misc +++ /dev/null @@ -1 +0,0 @@ -Update example haproxy config to a more compatible setup. diff --git a/changelog.d/5325.bugfix b/changelog.d/5325.bugfix deleted file mode 100644 index b9413388f5bb3f1c8489106e59416a9c725bb03c..0000000000000000000000000000000000000000 --- a/changelog.d/5325.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where running synapse_port_db would cause the account validity feature to fail because it didn't set the type of the email_sent column to boolean. diff --git a/changelog.d/5363.feature b/changelog.d/5363.feature deleted file mode 100644 index 803fe3fc37930cb85d2a5ff02e5656ce38429d37..0000000000000000000000000000000000000000 --- a/changelog.d/5363.feature +++ /dev/null @@ -1 +0,0 @@ -Allow expired user to trigger renewal email sending manually. diff --git a/changelog.d/5378.misc b/changelog.d/5378.misc deleted file mode 100644 index 365e49d63402c8de9294d071de1d25fd64258eb0..0000000000000000000000000000000000000000 --- a/changelog.d/5378.misc +++ /dev/null @@ -1 +0,0 @@ -Track deactivated accounts in the database. diff --git a/changelog.d/5381.misc b/changelog.d/5381.misc deleted file mode 100644 index bbf70a044577cf27126e2c85bc14739fe5cc876d..0000000000000000000000000000000000000000 --- a/changelog.d/5381.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up code for sending federation EDUs. diff --git a/changelog.d/5382.misc b/changelog.d/5382.misc deleted file mode 100644 index 060cbba2a94446ed371ca96c0d6b370435befbd5..0000000000000000000000000000000000000000 --- a/changelog.d/5382.misc +++ /dev/null @@ -1 +0,0 @@ -Add a sponsor button to the repo. diff --git a/changelog.d/5383.misc b/changelog.d/5383.misc deleted file mode 100644 index 9dd5d1df93c1518ad0f41a358498ca9d96f92020..0000000000000000000000000000000000000000 --- a/changelog.d/5383.misc +++ /dev/null @@ -1 +0,0 @@ -Don't log non-200 responses from federation queries as exceptions. diff --git a/changelog.d/5384.feature b/changelog.d/5384.feature deleted file mode 100644 index 9497f521c832581b4b41144dd4433296a243608f..0000000000000000000000000000000000000000 --- a/changelog.d/5384.feature +++ /dev/null @@ -1 +0,0 @@ -Statistics on forward extremities per room are now exposed via Prometheus. diff --git a/changelog.d/5386.misc b/changelog.d/5386.misc deleted file mode 100644 index 060cbba2a94446ed371ca96c0d6b370435befbd5..0000000000000000000000000000000000000000 --- a/changelog.d/5386.misc +++ /dev/null @@ -1 +0,0 @@ -Add a sponsor button to the repo. diff --git a/changelog.d/5387.bugfix b/changelog.d/5387.bugfix deleted file mode 100644 index 2c6c94efc46337e47dbb8f9f6e71af854839dcbb..0000000000000000000000000000000000000000 --- a/changelog.d/5387.bugfix +++ /dev/null @@ -1 +0,0 @@ -Warn about disabling email-based password resets when a reset occurs, and remove warning when someone attempts a phone-based reset. diff --git a/changelog.d/5388.bugfix b/changelog.d/5388.bugfix deleted file mode 100644 index 503e8309151d0fedc3e79d28bd7a2fa1151f6efc..0000000000000000000000000000000000000000 --- a/changelog.d/5388.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix email notifications for unnamed rooms with multiple people. diff --git a/changelog.d/5389.bugfix b/changelog.d/5389.bugfix deleted file mode 100644 index dd648e26c837960a7e69beeb4613b0e6aa657298..0000000000000000000000000000000000000000 --- a/changelog.d/5389.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix exceptions in federation reader worker caused by attempting to renew attestations, which should only happen on master worker. diff --git a/changelog.d/5390.bugfix b/changelog.d/5390.bugfix deleted file mode 100644 index e7b7483cf26b19a44c764696aef823fac8dbf0b5..0000000000000000000000000000000000000000 --- a/changelog.d/5390.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix handling of failures fetching remote content to not log failures as exceptions. diff --git a/changelog.d/5394.bugfix b/changelog.d/5394.bugfix deleted file mode 100644 index 2ad9fbe82ce7f24334919d36c764b7edcff79e65..0000000000000000000000000000000000000000 --- a/changelog.d/5394.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where deactivated users could receive renewal emails if the account validity feature is on. diff --git a/changelog.d/5397.doc b/changelog.d/5397.doc new file mode 100644 index 0000000000000000000000000000000000000000..c2b500b482d8cff9bbdd41a0d8dffab72c15e738 --- /dev/null +++ b/changelog.d/5397.doc @@ -0,0 +1 @@ +Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! diff --git a/changelog.d/5412.feature b/changelog.d/5412.feature deleted file mode 100644 index ec1503860a08496974e7de56f1dc4cde1e452fc5..0000000000000000000000000000000000000000 --- a/changelog.d/5412.feature +++ /dev/null @@ -1 +0,0 @@ -Add --no-daemonize option to run synapse in the foreground, per issue #4130. Contributed by Soham Gumaste. \ No newline at end of file diff --git a/changelog.d/5425.removal b/changelog.d/5425.removal deleted file mode 100644 index 30022ee63d5ccf11d445680b8149f7c98614bac1..0000000000000000000000000000000000000000 --- a/changelog.d/5425.removal +++ /dev/null @@ -1 +0,0 @@ -Python 2.7 is no longer a supported platform. Synapse now requires Python 3.5+ to run. diff --git a/changelog.d/5440.feature b/changelog.d/5440.feature deleted file mode 100644 index 63d9b58734be4f19de9e27f9d0066e245be2b70e..0000000000000000000000000000000000000000 --- a/changelog.d/5440.feature +++ /dev/null @@ -1 +0,0 @@ -Allow server admins to define implementations of extra rules for allowing or denying incoming events. diff --git a/changelog.d/5446.misc b/changelog.d/5446.misc deleted file mode 100644 index e5209be0a67d2d0e305201ca1f2d2ab7f62a52d8..0000000000000000000000000000000000000000 --- a/changelog.d/5446.misc +++ /dev/null @@ -1 +0,0 @@ -Update Python syntax in contrib/ to Python 3. diff --git a/changelog.d/5447.misc b/changelog.d/5447.misc deleted file mode 100644 index dd520684044aa0de7e3f6b71b07f5ea659070b44..0000000000000000000000000000000000000000 --- a/changelog.d/5447.misc +++ /dev/null @@ -1 +0,0 @@ -Update federation_client dev script to support `.well-known` and work with python3. diff --git a/changelog.d/5448.removal b/changelog.d/5448.removal deleted file mode 100644 index 33b9859daedcd05a312cfdcffc6d170a51c007e4..0000000000000000000000000000000000000000 --- a/changelog.d/5448.removal +++ /dev/null @@ -1 +0,0 @@ -PostgreSQL 9.4 is no longer supported. Synapse requires Postgres 9.5+ or above for Postgres support. diff --git a/changelog.d/5458.feature b/changelog.d/5458.feature deleted file mode 100644 index 9497f521c832581b4b41144dd4433296a243608f..0000000000000000000000000000000000000000 --- a/changelog.d/5458.feature +++ /dev/null @@ -1 +0,0 @@ -Statistics on forward extremities per room are now exposed via Prometheus. diff --git a/changelog.d/5459.misc b/changelog.d/5459.misc deleted file mode 100644 index 904e45f66b043fa103ccf688adc468ae80a41f92..0000000000000000000000000000000000000000 --- a/changelog.d/5459.misc +++ /dev/null @@ -1 +0,0 @@ -SyTest has been moved to Buildkite. diff --git a/changelog.d/5460.misc b/changelog.d/5460.misc deleted file mode 100644 index badc8bb79ac65039cd03868c3cca5b4ed76d1aca..0000000000000000000000000000000000000000 --- a/changelog.d/5460.misc +++ /dev/null @@ -1 +0,0 @@ -Demo script now uses python3. diff --git a/changelog.d/5461.feature b/changelog.d/5461.feature deleted file mode 100644 index 9497f521c832581b4b41144dd4433296a243608f..0000000000000000000000000000000000000000 --- a/changelog.d/5461.feature +++ /dev/null @@ -1 +0,0 @@ -Statistics on forward extremities per room are now exposed via Prometheus. diff --git a/changelog.d/5464.bugfix b/changelog.d/5464.bugfix deleted file mode 100644 index 8278d1bce9f013e7f7a2576900ed6a262c1e45a8..0000000000000000000000000000000000000000 --- a/changelog.d/5464.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix missing invite state after exchanging 3PID invites over federaton. diff --git a/changelog.d/5465.misc b/changelog.d/5465.misc deleted file mode 100644 index af5f0f8f457617c54cd78345f71e4d6737bcced0..0000000000000000000000000000000000000000 --- a/changelog.d/5465.misc +++ /dev/null @@ -1,2 +0,0 @@ -Track deactivated accounts in the database. - diff --git a/changelog.d/5474.feature b/changelog.d/5474.feature deleted file mode 100644 index 63d9b58734be4f19de9e27f9d0066e245be2b70e..0000000000000000000000000000000000000000 --- a/changelog.d/5474.feature +++ /dev/null @@ -1 +0,0 @@ -Allow server admins to define implementations of extra rules for allowing or denying incoming events. diff --git a/changelog.d/5475.misc b/changelog.d/5475.misc deleted file mode 100644 index 6be06d4d0bb93b7504ba3ddd5eea2ee7b7369b8e..0000000000000000000000000000000000000000 --- a/changelog.d/5475.misc +++ /dev/null @@ -1 +0,0 @@ -Synapse can now handle RestServlets that return coroutines. diff --git a/changelog.d/5476.misc b/changelog.d/5476.misc deleted file mode 100644 index 7955c147447c2a64fd470dfe082086a4fd653a92..0000000000000000000000000000000000000000 --- a/changelog.d/5476.misc +++ /dev/null @@ -1 +0,0 @@ -Add new metrics for number of forward extremities being persisted and number of state groups involved in resolution. diff --git a/changelog.d/5477.feature b/changelog.d/5477.feature deleted file mode 100644 index 63d9b58734be4f19de9e27f9d0066e245be2b70e..0000000000000000000000000000000000000000 --- a/changelog.d/5477.feature +++ /dev/null @@ -1 +0,0 @@ -Allow server admins to define implementations of extra rules for allowing or denying incoming events. diff --git a/changelog.d/5478.misc b/changelog.d/5478.misc deleted file mode 100644 index 829bb1e521d8dda0d80556d93eb61ae0f1808328..0000000000000000000000000000000000000000 --- a/changelog.d/5478.misc +++ /dev/null @@ -1 +0,0 @@ -The demo servers talk to each other again. diff --git a/changelog.d/5480.misc b/changelog.d/5480.misc deleted file mode 100644 index 3001bcc1fee21ef60fa57a162a0b15b4351c131e..0000000000000000000000000000000000000000 --- a/changelog.d/5480.misc +++ /dev/null @@ -1 +0,0 @@ -Add an EXPERIMENTAL config option to try and periodically clean up extremities by sending dummy events. diff --git a/changelog.d/5482.misc b/changelog.d/5482.misc deleted file mode 100644 index 0332d1133bdcd969890446efa6d712c7bfdd90e5..0000000000000000000000000000000000000000 --- a/changelog.d/5482.misc +++ /dev/null @@ -1 +0,0 @@ -Synapse's codebase is now formatted by `black`. diff --git a/changelog.d/5490.bugfix b/changelog.d/5490.bugfix deleted file mode 100644 index 4242254c53b882b1be085501cf8b19e2278754b2..0000000000000000000000000000000000000000 --- a/changelog.d/5490.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix failure to start under docker with SAML support enabled. \ No newline at end of file diff --git a/changelog.d/5493.misc b/changelog.d/5493.misc deleted file mode 100644 index 365e49d63402c8de9294d071de1d25fd64258eb0..0000000000000000000000000000000000000000 --- a/changelog.d/5493.misc +++ /dev/null @@ -1 +0,0 @@ -Track deactivated accounts in the database. diff --git a/changelog.d/5498.bugfix b/changelog.d/5498.bugfix deleted file mode 100644 index 6ef298d20c738b8af89f9919ca312e9ac913d610..0000000000000000000000000000000000000000 --- a/changelog.d/5498.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix intermittent exceptions on Apple hardware. Also fix bug that caused database activity times to be under-reported in log lines. diff --git a/changelog.d/5499.misc b/changelog.d/5499.misc deleted file mode 100644 index 84de1f2dae2742220af46cdd4530d0a9d18be807..0000000000000000000000000000000000000000 --- a/changelog.d/5499.misc +++ /dev/null @@ -1 +0,0 @@ -Some cleanups and sanity-checking in the CPU and database metrics. \ No newline at end of file diff --git a/changelog.d/5500.bugfix b/changelog.d/5500.bugfix deleted file mode 100644 index 624c678435cf8d85657bda52e4458514aa6de115..0000000000000000000000000000000000000000 --- a/changelog.d/5500.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix logging error when a tampered event is detected. diff --git a/changelog.d/5502.misc b/changelog.d/5502.misc deleted file mode 100644 index d515323eaec09891d2ba0bd38151d6b095baba6e..0000000000000000000000000000000000000000 --- a/changelog.d/5502.misc +++ /dev/null @@ -1 +0,0 @@ -Improve email notification logging. diff --git a/changelog.d/5505.feature b/changelog.d/5505.feature deleted file mode 100644 index 5c6bab2c31173c97ff0f264dde6da88554e7d96f..0000000000000000000000000000000000000000 --- a/changelog.d/5505.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for handling pagination APIs on client reader worker. diff --git a/changelog.d/5507.bugfix b/changelog.d/5507.bugfix deleted file mode 100644 index 70452aa14647c2e9eefb25428ed22610a87741a2..0000000000000000000000000000000000000000 --- a/changelog.d/5507.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where clients could tight loop calling `/sync` for a period. diff --git a/changelog.d/5509.misc b/changelog.d/5509.misc deleted file mode 100644 index cc27cf2940f709220edf797f640e4cd1756ccde8..0000000000000000000000000000000000000000 --- a/changelog.d/5509.misc +++ /dev/null @@ -1 +0,0 @@ -Fix "Unexpected entry in 'full_schemas'" log warning. diff --git a/changelog.d/5510.misc b/changelog.d/5510.misc deleted file mode 100644 index 4591a63d9d2fc48ba5578e78d5ac071dd329890d..0000000000000000000000000000000000000000 --- a/changelog.d/5510.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging when generating config files. diff --git a/changelog.d/5511.misc b/changelog.d/5511.misc deleted file mode 100644 index c1f679b28780e8918dcb4e629e602ce2634e130f..0000000000000000000000000000000000000000 --- a/changelog.d/5511.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor and clean up Config parser for maintainability. diff --git a/changelog.d/5512.feature b/changelog.d/5512.feature deleted file mode 100644 index 712878901bf1d50d5d97642d59e52f5be4d91c35..0000000000000000000000000000000000000000 --- a/changelog.d/5512.feature +++ /dev/null @@ -1 +0,0 @@ -Improve help and cmdline option names for --generate-config options. diff --git a/changelog.d/5513.feature b/changelog.d/5513.feature deleted file mode 100644 index 5c6bab2c31173c97ff0f264dde6da88554e7d96f..0000000000000000000000000000000000000000 --- a/changelog.d/5513.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for handling pagination APIs on client reader worker. diff --git a/changelog.d/5514.bugfix b/changelog.d/5514.bugfix deleted file mode 100644 index c3a76a854a2bce1f796d4d8cfa5239c13eafa4a3..0000000000000000000000000000000000000000 --- a/changelog.d/5514.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug with `jinja2` preventing Synapse from starting. Users who had this problem should now simply need to run `pip install matrix-synapse`. diff --git a/changelog.d/5516.feature b/changelog.d/5516.feature deleted file mode 100644 index fdf91c35e48976f8dd94f29c25b9532f99a9aa33..0000000000000000000000000000000000000000 --- a/changelog.d/5516.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuration of the path used for ACME account keys. diff --git a/changelog.d/5521.feature b/changelog.d/5521.feature deleted file mode 100644 index fdf91c35e48976f8dd94f29c25b9532f99a9aa33..0000000000000000000000000000000000000000 --- a/changelog.d/5521.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuration of the path used for ACME account keys. diff --git a/changelog.d/5522.feature b/changelog.d/5522.feature deleted file mode 100644 index fdf91c35e48976f8dd94f29c25b9532f99a9aa33..0000000000000000000000000000000000000000 --- a/changelog.d/5522.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuration of the path used for ACME account keys. diff --git a/changelog.d/5523.bugfix b/changelog.d/5523.bugfix deleted file mode 100644 index 26acd367a850c766edb76d6499493ed64a12a8cb..0000000000000000000000000000000000000000 --- a/changelog.d/5523.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression where homeservers on private IP addresses were incorrectly blacklisted. \ No newline at end of file diff --git a/changelog.d/5524.feature b/changelog.d/5524.feature deleted file mode 100644 index 6ba211c3cc037421ee3f41d6f4190e9125d5c873..0000000000000000000000000000000000000000 --- a/changelog.d/5524.feature +++ /dev/null @@ -1 +0,0 @@ -Add --data-dir and --open-private-ports options. \ No newline at end of file diff --git a/changelog.d/5525.removal b/changelog.d/5525.removal deleted file mode 100644 index af71560f36ad4182a3c0028010df0fccb8c7cd47..0000000000000000000000000000000000000000 --- a/changelog.d/5525.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for cpu_affinity setting. diff --git a/changelog.d/5531.feature b/changelog.d/5531.feature deleted file mode 100644 index 5c6bab2c31173c97ff0f264dde6da88554e7d96f..0000000000000000000000000000000000000000 --- a/changelog.d/5531.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for handling pagination APIs on client reader worker. diff --git a/changelog.d/5534.feature b/changelog.d/5534.feature deleted file mode 100644 index 2e279c9b77ec67a25abdcbd4733f96ace3e2c2ac..0000000000000000000000000000000000000000 --- a/changelog.d/5534.feature +++ /dev/null @@ -1 +0,0 @@ -Split public rooms directory auth config in two settings, in order to manage client auth independently from the federation part of it. Obsoletes the "restrict_public_rooms_to_local_users" configuration setting. If "restrict_public_rooms_to_local_users" is set in the config, Synapse will act as if both new options are enabled, i.e. require authentication through the client API and deny federation requests. diff --git a/changelog.d/5537.misc b/changelog.d/5537.misc deleted file mode 100644 index 870a5ff18b27407cce98d7b04174ed28cfe286bd..0000000000000000000000000000000000000000 --- a/changelog.d/5537.misc +++ /dev/null @@ -1 +0,0 @@ -Add information about how to install and run `black` on the codebase to code_style.rst. diff --git a/changelog.d/5543.misc b/changelog.d/5543.misc deleted file mode 100644 index 793620a731200ebb707f966b50c246425ac9ecf4..0000000000000000000000000000000000000000 --- a/changelog.d/5543.misc +++ /dev/null @@ -1 +0,0 @@ -Make the config clearer in that email.template_dir is relative to the Synapse's root directory, not the `synapse/` folder within it. diff --git a/changelog.d/5544.misc b/changelog.d/5544.misc new file mode 100644 index 0000000000000000000000000000000000000000..81d6f74c31c83a25d10f731d26c06d119ad6a260 --- /dev/null +++ b/changelog.d/5544.misc @@ -0,0 +1 @@ +Added opentracing and configuration options. diff --git a/changelog.d/5545.misc b/changelog.d/5545.misc deleted file mode 100644 index b738eef4bd4c765788e116e91938eccede06bf87..0000000000000000000000000000000000000000 --- a/changelog.d/5545.misc +++ /dev/null @@ -1 +0,0 @@ -Update v1.0.0 release changelog to include more information about changes to password resets. diff --git a/changelog.d/5546.feature b/changelog.d/5546.feature deleted file mode 100644 index 16952b62fc6e5077b9a27cb61eedc06743f123c6..0000000000000000000000000000000000000000 --- a/changelog.d/5546.feature +++ /dev/null @@ -1 +0,0 @@ -Update docker image to use Python 3.7. diff --git a/changelog.d/5547.feature b/changelog.d/5547.feature deleted file mode 100644 index 509e36c7ea009579229ecb34f52710d6fb8158ad..0000000000000000000000000000000000000000 --- a/changelog.d/5547.feature +++ /dev/null @@ -1 +0,0 @@ -Increase default log level for docker image to INFO. It can still be changed by editing the generated log.config file. diff --git a/changelog.d/5548.misc b/changelog.d/5548.misc deleted file mode 100644 index f35939cfe9abb715b331339b5534fb0aec838129..0000000000000000000000000000000000000000 --- a/changelog.d/5548.misc +++ /dev/null @@ -1 +0,0 @@ -Remove non-functioning check_event_hash.py dev script. diff --git a/changelog.d/5550.feature b/changelog.d/5550.feature deleted file mode 100644 index 79ecedf3b8bbb5684ff1e34fdcc80918601ece56..0000000000000000000000000000000000000000 --- a/changelog.d/5550.feature +++ /dev/null @@ -1 +0,0 @@ -The minimum TLS version used for outgoing federation requests can now be set with `federation_client_minimum_tls_version`. diff --git a/changelog.d/5550.misc b/changelog.d/5550.misc deleted file mode 100644 index ad5693338e171db752bd407ae23ab1ab1e7d5d0a..0000000000000000000000000000000000000000 --- a/changelog.d/5550.misc +++ /dev/null @@ -1 +0,0 @@ -Synapse will now only allow TLS v1.2 connections when serving federation, if it terminates TLS. As Synapse's allowed ciphers were only able to be used in TLSv1.2 before, this does not change behaviour. diff --git a/changelog.d/5555.bugfix b/changelog.d/5555.bugfix deleted file mode 100644 index c0b1ecf81aa0699238e46363ac9ce7c01da868f6..0000000000000000000000000000000000000000 --- a/changelog.d/5555.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed m.login.jwt using unregistred user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. diff --git a/changelog.d/5557.misc b/changelog.d/5557.misc deleted file mode 100644 index 0c90f498719f36441d1574a35462275da743026e..0000000000000000000000000000000000000000 --- a/changelog.d/5557.misc +++ /dev/null @@ -1 +0,0 @@ -Logging when running GC collection on generation 0 is now at the DEBUG level, not INFO. diff --git a/changelog.d/5558.misc b/changelog.d/5558.misc deleted file mode 100644 index 9ce3555d4559b53fa648c62eab9fffaed4a3d25b..0000000000000000000000000000000000000000 --- a/changelog.d/5558.misc +++ /dev/null @@ -1 +0,0 @@ -Improve install docs on choosing server_name. diff --git a/changelog.d/5559.feature b/changelog.d/5559.feature deleted file mode 100644 index b77b383459cbfa6d00bd22802881077ebf7a5147..0000000000000000000000000000000000000000 --- a/changelog.d/5559.feature +++ /dev/null @@ -1 +0,0 @@ -Optimise devices changed query to not pull unnecessary rows from the database, reducing database load. diff --git a/changelog.d/5561.feature b/changelog.d/5561.feature deleted file mode 100644 index 85380bc5173688b23f3e18bbba2c1a9ba6a856dd..0000000000000000000000000000000000000000 --- a/changelog.d/5561.feature +++ /dev/null @@ -1 +0,0 @@ -Update Docker image to deprecate the use of environment variables for configuration, and make the use of a static configuration the default. diff --git a/changelog.d/5562.feature b/changelog.d/5562.feature deleted file mode 100644 index 85380bc5173688b23f3e18bbba2c1a9ba6a856dd..0000000000000000000000000000000000000000 --- a/changelog.d/5562.feature +++ /dev/null @@ -1 +0,0 @@ -Update Docker image to deprecate the use of environment variables for configuration, and make the use of a static configuration the default. diff --git a/changelog.d/5563.bugfix b/changelog.d/5563.bugfix deleted file mode 100644 index 09c4381a23c70397d252929defd6c46f4c83e356..0000000000000000000000000000000000000000 --- a/changelog.d/5563.bugfix +++ /dev/null @@ -1 +0,0 @@ -Docker: Use a sensible location for data files when generating a config file. \ No newline at end of file diff --git a/changelog.d/5564.misc b/changelog.d/5564.misc deleted file mode 100644 index e209cdcc29b5adf7a609afffe055a0ae818b8fa4..0000000000000000000000000000000000000000 --- a/changelog.d/5564.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the amount of stuff we send in the docker context. diff --git a/changelog.d/5565.feature b/changelog.d/5565.feature deleted file mode 100644 index 4b0665af03380d0bea0a57a51dad5949f1bdd205..0000000000000000000000000000000000000000 --- a/changelog.d/5565.feature +++ /dev/null @@ -1 +0,0 @@ -Docker: Send synapse logs to the docker logging system, by default. diff --git a/changelog.d/5566.feature b/changelog.d/5566.feature deleted file mode 100644 index 85380bc5173688b23f3e18bbba2c1a9ba6a856dd..0000000000000000000000000000000000000000 --- a/changelog.d/5566.feature +++ /dev/null @@ -1 +0,0 @@ -Update Docker image to deprecate the use of environment variables for configuration, and make the use of a static configuration the default. diff --git a/changelog.d/5567.feature b/changelog.d/5567.feature deleted file mode 100644 index 85380bc5173688b23f3e18bbba2c1a9ba6a856dd..0000000000000000000000000000000000000000 --- a/changelog.d/5567.feature +++ /dev/null @@ -1 +0,0 @@ -Update Docker image to deprecate the use of environment variables for configuration, and make the use of a static configuration the default. diff --git a/changelog.d/5568.feature b/changelog.d/5568.feature deleted file mode 100644 index 59b9e5f96dc9b17eb1b161e3bae21456fffe1a13..0000000000000000000000000000000000000000 --- a/changelog.d/5568.feature +++ /dev/null @@ -1 +0,0 @@ -Docker image: open the non-TLS port by default. diff --git a/changelog.d/5570.misc b/changelog.d/5570.misc deleted file mode 100644 index dfb1d7e58b5003eebde94e081a796d09f0aaaade..0000000000000000000000000000000000000000 --- a/changelog.d/5570.misc +++ /dev/null @@ -1 +0,0 @@ -Point the reverse links in the Purge History contrib scripts at the intended location. diff --git a/changelog.d/5576.bugfix b/changelog.d/5576.bugfix deleted file mode 100644 index c1ba5581f2a4ccabbe030b05f8fe2d7b5adb1a26..0000000000000000000000000000000000000000 --- a/changelog.d/5576.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug that would cause invited users to receive several emails for a single 3PID invite in case the inviter is rate limited. diff --git a/changelog.d/5585.misc b/changelog.d/5585.misc deleted file mode 100644 index 6be06d4d0bb93b7504ba3ddd5eea2ee7b7369b8e..0000000000000000000000000000000000000000 --- a/changelog.d/5585.misc +++ /dev/null @@ -1 +0,0 @@ -Synapse can now handle RestServlets that return coroutines. diff --git a/changelog.d/5606.misc b/changelog.d/5606.misc new file mode 100644 index 0000000000000000000000000000000000000000..bb3c028167896d34bf2819adcfeb7a4e8c49316a --- /dev/null +++ b/changelog.d/5606.misc @@ -0,0 +1 @@ +Move logging code out of `synapse.util` and into `synapse.logging`. diff --git a/changelog.d/5609.bugfix b/changelog.d/5609.bugfix new file mode 100644 index 0000000000000000000000000000000000000000..534ee22a1b3f1fb75b6bb12c0d047f09c2289675 --- /dev/null +++ b/changelog.d/5609.bugfix @@ -0,0 +1 @@ +Fix 'utime went backwards' errors on daemonization. diff --git a/changelog.d/5611.misc b/changelog.d/5611.misc new file mode 100644 index 0000000000000000000000000000000000000000..a2d1695139126ccab904dd8235519ede20435c8c --- /dev/null +++ b/changelog.d/5611.misc @@ -0,0 +1 @@ +Add a blacklist file to the repo to blacklist certain sytests from failing CI. diff --git a/changelog.d/5613.feature b/changelog.d/5613.feature new file mode 100644 index 0000000000000000000000000000000000000000..4b7bb2745c19f805bfc97fd0cd41c829446d31ba --- /dev/null +++ b/changelog.d/5613.feature @@ -0,0 +1 @@ +Add `sender` and `origin_server_ts` fields to `m.replace`. diff --git a/changelog.d/5616.misc b/changelog.d/5616.misc new file mode 100644 index 0000000000000000000000000000000000000000..9f94be6778c676543f5894580b33ecaebdd46208 --- /dev/null +++ b/changelog.d/5616.misc @@ -0,0 +1 @@ +Make runtime errors surrounding password reset emails much clearer. diff --git a/changelog.d/5617.misc b/changelog.d/5617.misc new file mode 100644 index 0000000000000000000000000000000000000000..bb3c028167896d34bf2819adcfeb7a4e8c49316a --- /dev/null +++ b/changelog.d/5617.misc @@ -0,0 +1 @@ +Move logging code out of `synapse.util` and into `synapse.logging`. diff --git a/changelog.d/5619.docker b/changelog.d/5619.docker new file mode 100644 index 0000000000000000000000000000000000000000..b69e5cc57cb5a6573d1e5b1614c09ecfed5bc4a2 --- /dev/null +++ b/changelog.d/5619.docker @@ -0,0 +1 @@ +Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). diff --git a/changelog.d/5620.docker b/changelog.d/5620.docker new file mode 100644 index 0000000000000000000000000000000000000000..cbb5a75d6ad595e5d6ff81a4bc17cb6da59d3ba9 --- /dev/null +++ b/changelog.d/5620.docker @@ -0,0 +1 @@ +Add missing space in default logging file format generated by the Docker image. diff --git a/changelog.d/5621.bugfix b/changelog.d/5621.bugfix new file mode 100644 index 0000000000000000000000000000000000000000..f1a2851f45ef434a5a18de50a64b2e08271fb173 --- /dev/null +++ b/changelog.d/5621.bugfix @@ -0,0 +1 @@ +Various minor fixes to the federation request rate limiter. diff --git a/changelog.d/5622.misc b/changelog.d/5622.misc new file mode 100644 index 0000000000000000000000000000000000000000..9f0a87311c271691567b479503a454d1e353d548 --- /dev/null +++ b/changelog.d/5622.misc @@ -0,0 +1 @@ +Remove dead code for persiting outgoing federation transactions. diff --git a/changelog.d/5623.feature b/changelog.d/5623.feature new file mode 100644 index 0000000000000000000000000000000000000000..b73080e88d2933e8d5ab5ae0e539982224325237 --- /dev/null +++ b/changelog.d/5623.feature @@ -0,0 +1 @@ +Add default push rule to ignore reactions. diff --git a/changelog.d/5625.removal b/changelog.d/5625.removal new file mode 100644 index 0000000000000000000000000000000000000000..d33a778d695d3a161a27b32f6c887b39b7793962 --- /dev/null +++ b/changelog.d/5625.removal @@ -0,0 +1 @@ +Remove support for the `invite_3pid_guest` configuration setting. diff --git a/changelog.d/5626.feature b/changelog.d/5626.feature new file mode 100644 index 0000000000000000000000000000000000000000..5ef793b943a0ce967e6a41413997413011417cfc --- /dev/null +++ b/changelog.d/5626.feature @@ -0,0 +1 @@ +Include the original event when asking for its relations. diff --git a/changelog.d/5627.misc b/changelog.d/5627.misc new file mode 100644 index 0000000000000000000000000000000000000000..730721b5ef19b8cdec880eeeccf2f4db0beb8e4f --- /dev/null +++ b/changelog.d/5627.misc @@ -0,0 +1 @@ +Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. diff --git a/changelog.d/5628.misc b/changelog.d/5628.misc new file mode 100644 index 0000000000000000000000000000000000000000..fec8446793a026fa5ba44077b89a72bc4a6279cd --- /dev/null +++ b/changelog.d/5628.misc @@ -0,0 +1 @@ +Move RegistrationHandler.get_or_create_user to test code. diff --git a/changelog.d/5630.misc b/changelog.d/5630.misc new file mode 100644 index 0000000000000000000000000000000000000000..f112d873eb8066211acbfa47111cc098eb5da08d --- /dev/null +++ b/changelog.d/5630.misc @@ -0,0 +1 @@ +Add some more common python virtual-environment paths to the black exclusion list. diff --git a/changelog.d/5637.misc b/changelog.d/5637.misc new file mode 100644 index 0000000000000000000000000000000000000000..f18d6197e51aa479ba1cfc0c9191c59e085ae4e8 --- /dev/null +++ b/changelog.d/5637.misc @@ -0,0 +1 @@ +Unblacklist some user_directory sytests. diff --git a/changelog.d/5638.bugfix b/changelog.d/5638.bugfix new file mode 100644 index 0000000000000000000000000000000000000000..66781ad9e687b2f6c2afe495478d2c0e92a6925e --- /dev/null +++ b/changelog.d/5638.bugfix @@ -0,0 +1 @@ +Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. diff --git a/changelog.d/5639.misc b/changelog.d/5639.misc new file mode 100644 index 0000000000000000000000000000000000000000..413b13128c6fb9f4f90ac5cde04754bdb12f3fcc --- /dev/null +++ b/changelog.d/5639.misc @@ -0,0 +1 @@ +Factor out some redundant code in the login implementation. diff --git a/changelog.d/5640.misc b/changelog.d/5640.misc new file mode 100644 index 0000000000000000000000000000000000000000..7d69a1b3b640689060d2e56997314ae049313753 --- /dev/null +++ b/changelog.d/5640.misc @@ -0,0 +1 @@ +Update ModuleApi to avoid register(generate_token=True). diff --git a/changelog.d/5641.misc b/changelog.d/5641.misc new file mode 100644 index 0000000000000000000000000000000000000000..1899bc963d4ee961c1cbddccf73aa6364a5f92ef --- /dev/null +++ b/changelog.d/5641.misc @@ -0,0 +1 @@ +Remove access-token support from RegistrationHandler.register, and rename it. diff --git a/changelog.d/5642.misc b/changelog.d/5642.misc new file mode 100644 index 0000000000000000000000000000000000000000..e7f8e214a4e6fbbe20fa7eba2526521fb638003b --- /dev/null +++ b/changelog.d/5642.misc @@ -0,0 +1 @@ +Remove access-token support from `RegistrationStore.register`, and rename it. diff --git a/changelog.d/5643.misc b/changelog.d/5643.misc new file mode 100644 index 0000000000000000000000000000000000000000..2b2316469e039ccdc0d9ecde6ee5c01ba585fe38 --- /dev/null +++ b/changelog.d/5643.misc @@ -0,0 +1 @@ +Improve logging for auto-join when a new user is created. diff --git a/changelog.d/5644.bugfix b/changelog.d/5644.bugfix new file mode 100644 index 0000000000000000000000000000000000000000..f6302fd08dd37c524155bfa0ff10af44eac1549b --- /dev/null +++ b/changelog.d/5644.bugfix @@ -0,0 +1 @@ +Fix newly-registered users not being able to lookup their own profile without joining a room. diff --git a/changelog.d/5645.misc b/changelog.d/5645.misc new file mode 100644 index 0000000000000000000000000000000000000000..4fa9699e4faee781e230913d6f5ca7d3619abadc --- /dev/null +++ b/changelog.d/5645.misc @@ -0,0 +1 @@ +Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. \ No newline at end of file diff --git a/changelog.d/5651.doc b/changelog.d/5651.doc new file mode 100644 index 0000000000000000000000000000000000000000..e2d5a8dc8ad760abe2b52f4c1aaaa87c41b0e181 --- /dev/null +++ b/changelog.d/5651.doc @@ -0,0 +1 @@ +--no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. diff --git a/changelog.d/5654.bugfix b/changelog.d/5654.bugfix new file mode 100644 index 0000000000000000000000000000000000000000..5f76b041cd1ce26c9b802a30781d24166b5c44f6 --- /dev/null +++ b/changelog.d/5654.bugfix @@ -0,0 +1 @@ +Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. \ No newline at end of file diff --git a/changelog.d/5655.misc b/changelog.d/5655.misc new file mode 100644 index 0000000000000000000000000000000000000000..acab6aee92bfe9dd3c59cd65a7c1dfbe00bef229 --- /dev/null +++ b/changelog.d/5655.misc @@ -0,0 +1 @@ +Fix a small typo in a code comment. \ No newline at end of file diff --git a/changelog.d/5656.misc b/changelog.d/5656.misc new file mode 100644 index 0000000000000000000000000000000000000000..a8de20a7d01e3a6c4820ed900aab9161308f813d --- /dev/null +++ b/changelog.d/5656.misc @@ -0,0 +1 @@ +Clean up exception handling around client access tokens. diff --git a/changelog.d/5657.misc b/changelog.d/5657.misc new file mode 100644 index 0000000000000000000000000000000000000000..bdec9ae4c0b67442866c6987daf8d7dcb8cc2b85 --- /dev/null +++ b/changelog.d/5657.misc @@ -0,0 +1 @@ +Add a mechanism for per-test homeserver configuration in the unit tests. diff --git a/changelog.d/5658.bugfix b/changelog.d/5658.bugfix new file mode 100644 index 0000000000000000000000000000000000000000..f6ae906a9ad625b287fc06df3ab2e69542e33534 --- /dev/null +++ b/changelog.d/5658.bugfix @@ -0,0 +1 @@ +Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. diff --git a/changelog.d/5659.misc b/changelog.d/5659.misc new file mode 100644 index 0000000000000000000000000000000000000000..686001295c33a27dc6b2e7e6589243d06032605c --- /dev/null +++ b/changelog.d/5659.misc @@ -0,0 +1 @@ +Inline issue_access_token. diff --git a/changelog.d/5660.feature b/changelog.d/5660.feature new file mode 100644 index 0000000000000000000000000000000000000000..82889fdaf16b2191403ff749cd9dcfa87f184e20 --- /dev/null +++ b/changelog.d/5660.feature @@ -0,0 +1 @@ +Implement `session_lifetime` configuration option, after which access tokens will expire. diff --git a/changelog.d/5661.doc b/changelog.d/5661.doc new file mode 100644 index 0000000000000000000000000000000000000000..c70e62014e40feda08c00004ae3210491433cc16 --- /dev/null +++ b/changelog.d/5661.doc @@ -0,0 +1 @@ +Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! diff --git a/changelog.d/5664.misc b/changelog.d/5664.misc new file mode 100644 index 0000000000000000000000000000000000000000..0ca7a0fbd0c7db824c48a35327df48205e67c25c --- /dev/null +++ b/changelog.d/5664.misc @@ -0,0 +1 @@ +Update the sytest BuildKite configuration to checkout Synapse in `/src`. diff --git a/changelog.d/5673.misc b/changelog.d/5673.misc new file mode 100644 index 0000000000000000000000000000000000000000..1942256358343933fd556b52317c572ee24e2d81 --- /dev/null +++ b/changelog.d/5673.misc @@ -0,0 +1 @@ +Add a `docker` type to the towncrier configuration. diff --git a/changelog.d/5674.feature b/changelog.d/5674.feature new file mode 100644 index 0000000000000000000000000000000000000000..04bdfa4ad5eb942d62c10b0068c92cd551b513f3 --- /dev/null +++ b/changelog.d/5674.feature @@ -0,0 +1 @@ +Return "This account has been deactivated" when a deactivated user tries to login. diff --git a/contrib/example_log_config.yaml b/contrib/example_log_config.yaml index 06592963daeee7b1cbac607b978ae541186b3d9a..3a76a7a33fa61e3974aaf1692d179fa628768c49 100644 --- a/contrib/example_log_config.yaml +++ b/contrib/example_log_config.yaml @@ -1,7 +1,7 @@ -# Example log_config file for synapse. To enable, point `log_config` to it in +# Example log_config file for synapse. To enable, point `log_config` to it in # `homeserver.yaml`, and restart synapse. # -# This configuration will produce similar results to the defaults within +# This configuration will produce similar results to the defaults within # synapse, but can be edited to give more flexibility. version: 1 @@ -12,7 +12,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: @@ -35,7 +35,7 @@ handlers: root: level: INFO handlers: [console] # to use file handler instead, switch to [file] - + loggers: synapse: level: INFO diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py index c7e55d8aa7fd91d9dd9b40b000c14a9c74b9a54a..5ef140ae48a7cb677ea957712c0177fd262cec9a 100644 --- a/contrib/experiments/test_messaging.py +++ b/contrib/experiments/test_messaging.py @@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid from synapse.app.homeserver import SynapseHomeServer -# from synapse.util.logutils import log_function +# from synapse.logging.utils import log_function from twisted.internet import reactor, defer from twisted.python import log diff --git a/contrib/systemd/log_config.yaml b/contrib/systemd/log_config.yaml index d85bdd1208f9a79dbe6236fdd1a1fe14523fdbe0..22f67a50ce0fcbc11a62edc2b2d5ebd6cf311b1c 100644 --- a/contrib/systemd/log_config.yaml +++ b/contrib/systemd/log_config.yaml @@ -8,7 +8,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/debian/changelog b/debian/changelog index 91653e7243564d66e29b2cd7239c7a7dd5b1245c..b964cf90a2b8304d323f71897d2e39cf145c228d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,19 @@ -matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium + + [ Amber Brown ] + * Update logging config defaults to match API changes in Synapse. + + -- Erik Johnston <erikj@rae> Thu, 04 Jul 2019 13:59:02 +0100 + +matrix-synapse-py3 (1.1.0) stable; urgency=medium [ Silke Hofstra ] * Include systemd-python to allow logging to the systemd journal. - -- Silke Hofstra <silke@slxh.eu> Wed, 29 May 2019 09:45:29 +0200 + [ Synapse Packaging team ] + * New synapse release 1.1.0. + + -- Synapse Packaging team <packages@matrix.org> Thu, 04 Jul 2019 11:43:41 +0100 matrix-synapse-py3 (1.0.0) stable; urgency=medium diff --git a/debian/log.yaml b/debian/log.yaml index 206b65f1acb0bd585a413c63399fcd6eea8f3ab4..95b655dd35f74ebfed10c0e88b8c81f1feea4604 100644 --- a/debian/log.yaml +++ b/debian/log.yaml @@ -7,7 +7,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/docker/Dockerfile b/docker/Dockerfile index 0939cadf399dc6c040612bd67050d392a2afd623..e5a0d6d5f662dcba8383fc263e5eaf2398dc9e5f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7 ### ### Stage 0: builder ### -FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder +FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder # install the OS build deps @@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \ ### Stage 1: runtime ### -FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 +FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 # xmlsec is required for saml support RUN apk add --no-cache --virtual .runtime_deps \ @@ -66,6 +66,7 @@ RUN apk add --no-cache --virtual .runtime_deps \ libpq \ zlib \ su-exec \ + tzdata \ xmlsec COPY --from=builder /install /usr/local diff --git a/docker/README.md b/docker/README.md index b62417c281bed3f5798089da36d5b3807e160138..46bb9d2d998aa3b6ab4755359fdac91910f184e6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -63,7 +63,6 @@ The following environment variables are supported in `generate` mode: * `UID`, `GID`: the user id and group id to use for creating the data directories. Defaults to `991`, `991`. - ## Running synapse Once you have a valid configuration file, you can start synapse as follows: @@ -91,6 +90,7 @@ The following environment variables are supported in run mode: * `SYNAPSE_CONFIG_PATH`: path to the config file. Defaults to `<SYNAPSE_CONFIG_DIR>/homeserver.yaml`. * `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`. +* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`. ## TLS support diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index b0267b1c602b2ce0cf78c9f4c86cc01818bb956a..c1110f0f538e2f732ea31a75409921b9ca56f684 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -207,22 +207,3 @@ perspectives: password_config: enabled: true - -{% if SYNAPSE_SMTP_HOST %} -email: - enable_notifs: false - smtp_host: "{{ SYNAPSE_SMTP_HOST }}" - smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }} - smtp_user: "{{ SYNAPSE_SMTP_USER }}" - smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}" - require_transport_security: False - notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}" - app_name: Matrix - # if template_dir is unset, uses the example templates that are part of - # the Synapse distribution. - #template_dir: res/templates - notif_template_html: notif_mail.html - notif_template_text: notif_mail.txt - notif_for_new_users: True - riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}" -{% endif %} diff --git a/docker/conf/log.config b/docker/conf/log.config index 895e45d20b7a5c1abbdd4dafec717a3a2b4c3af5..db35e475a4706b96b3e38fb19c72ca931996200d 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -2,11 +2,11 @@ version: 1 formatters: precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s' + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index 27cde11cf7627a121d33d37bff84a8a479cca681..f5cd5de8abaca38f53bf40a589e78b7a74fba531 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -1,4 +1,4 @@ -Log contexts +Log Contexts ============ .. contents:: @@ -12,7 +12,7 @@ record. Logcontexts are also used for CPU and database accounting, so that we can track which requests were responsible for high CPU use or database activity. -The ``synapse.util.logcontext`` module provides a facilities for managing the +The ``synapse.logging.context`` module provides a facilities for managing the current log context (as well as providing the ``LoggingContextFilter`` class). Deferreds make the whole thing complicated, so this document describes how it @@ -27,19 +27,19 @@ found them: .. code:: python - from synapse.util import logcontext # omitted from future snippets + from synapse.logging import context # omitted from future snippets def handle_request(request_id): - request_context = logcontext.LoggingContext() + request_context = context.LoggingContext() - calling_context = logcontext.LoggingContext.current_context() - logcontext.LoggingContext.set_current_context(request_context) + calling_context = context.LoggingContext.current_context() + context.LoggingContext.set_current_context(request_context) try: request_context.request = request_id do_request_handling() logger.debug("finished") finally: - logcontext.LoggingContext.set_current_context(calling_context) + context.LoggingContext.set_current_context(calling_context) def do_request_handling(): logger.debug("phew") # this will be logged against request_id @@ -51,7 +51,7 @@ written much more succinctly as: .. code:: python def handle_request(request_id): - with logcontext.LoggingContext() as request_context: + with context.LoggingContext() as request_context: request_context.request = request_id do_request_handling() logger.debug("finished") @@ -74,7 +74,7 @@ blocking operation, and returns a deferred: @defer.inlineCallbacks def handle_request(request_id): - with logcontext.LoggingContext() as request_context: + with context.LoggingContext() as request_context: request_context.request = request_id yield do_request_handling() logger.debug("finished") @@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from external code. We need to make it follow our rules. The easy way to do it is with a combination of ``defer.inlineCallbacks``, and -``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``, +``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``, which returns a deferred which will run its callbacks after a given number of seconds. That might look like: @@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with This technique works equally for external functions which return deferreds, or deferreds we have made ourselves. -You can also use ``logcontext.make_deferred_yieldable``, which just does the +You can also use ``context.make_deferred_yieldable``, which just does the boilerplate for you, so the above could be written: .. code:: python def sleep(seconds): - return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds)) + return context.make_deferred_yieldable(get_sleep_deferred(seconds)) Fire-and-forget @@ -279,7 +279,7 @@ Obviously that option means that the operations done in that might be fixed by setting a different logcontext via a ``with LoggingContext(...)`` in ``background_operation``). -The second option is to use ``logcontext.run_in_background``, which wraps a +The second option is to use ``context.run_in_background``, which wraps a function so that it doesn't reset the logcontext even when it returns an incomplete deferred, and adds a callback to the returned deferred to reset the logcontext. In other words, it turns a function that follows the Synapse rules @@ -293,7 +293,7 @@ It can be used like this: def do_request_handling(): yield foreground_operation() - logcontext.run_in_background(background_operation) + context.run_in_background(background_operation) # this will now be logged against the request context logger.debug("Request handling complete") @@ -332,7 +332,7 @@ gathered: result = yield defer.gatherResults([d1, d2]) In this case particularly, though, option two, of using -``logcontext.preserve_fn`` almost certainly makes more sense, so that +``context.preserve_fn`` almost certainly makes more sense, so that ``operation1`` and ``operation2`` are both logged against the original logcontext. This looks like: @@ -340,8 +340,8 @@ logcontext. This looks like: @defer.inlineCallbacks def do_request_handling(): - d1 = logcontext.preserve_fn(operation1)() - d2 = logcontext.preserve_fn(operation2)() + d1 = context.preserve_fn(operation1)() + d2 = context.preserve_fn(operation2)() with PreserveLoggingContext(): result = yield defer.gatherResults([d1, d2]) @@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it: .. code:: python def handle_request(request_id): - with logcontext.LoggingContext() as request_context: + with context.LoggingContext() as request_context: request_context.request = request_id d = do_request_handling() @@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy. The business of a Deferred which runs its callbacks in the original logcontext isn't hard to achieve — we have it today, in the shape of -``logcontext._PreservingContextDeferred``: +``context._PreservingContextDeferred``: .. code:: python diff --git a/docs/postgres.rst b/docs/postgres.rst index 33f58e3acea33f67a2849bc77f67d084b7dae0d8..0ae52ccbd8140cb1f522593e8a999d7c1990da0f 100644 --- a/docs/postgres.rst +++ b/docs/postgres.rst @@ -34,9 +34,14 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user su - postgres createuser --pwprompt synapse_user -The PostgreSQL database used *must* have the correct encoding set, otherwise it -would not be able to store UTF8 strings. To create a database with the correct -encoding use, e.g.:: +Before you can authenticate with the ``synapse_user``, you must create a +database that it can access. To create a database, first connect to the database +with your database user:: + + su - postgres + psql + +and then run:: CREATE DATABASE synapse ENCODING 'UTF8' @@ -46,7 +51,13 @@ encoding use, e.g.:: OWNER synapse_user; This would create an appropriate database named ``synapse`` owned by the -``synapse_user`` user (which must already exist). +``synapse_user`` user (which must already have been created as above). + +Note that the PostgreSQL database *must* have the correct encoding set (as +shown above), otherwise it will not be able to store UTF8 strings. + +You may need to enable password authentication so ``synapse_user`` can connect +to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html. Tuning Postgres =============== diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst index e4b870411c0240e7312c1bc9c24c8b63ada15e9a..4b640ffc4f6fd646fb2dc21f0094d562ca169c4a 100644 --- a/docs/reverse_proxy.rst +++ b/docs/reverse_proxy.rst @@ -48,6 +48,8 @@ Let's assume that we expect clients to connect to our server at proxy_set_header X-Forwarded-For $remote_addr; } } + + Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI. * Caddy:: diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index bf9cd88b15b5e5595a43377612090552234b808e..663ff31622f42cb0a4c98c8723d1afd73c2114f5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -786,6 +786,17 @@ uploads_path: "DATADIR/uploads" # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" +# Time that a user's session remains valid for, after they log in. +# +# Note that this is not currently compatible with guest logins. +# +# Note also that this is calculated at login time: changes are not applied +# retrospectively to users who have already logged in. +# +# By default, this is infinite. +# +#session_lifetime: 24h + # The user must provide all of the below types of 3PID when registering. # #registrations_require_3pid: @@ -997,6 +1008,12 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # so it is not normally necessary to specify them unless you need to # override them. # +# Once SAML support is enabled, a metadata file will be exposed at +# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to +# use to configure your SAML IdP with. Alternatively, you can manually configure +# the IdP to use an ACS location of +# https://<server>:<port>/_matrix/saml2/authn_response. +# #saml2_config: # sp_config: # # point this to the IdP's metadata. You can use either a local file or @@ -1006,7 +1023,15 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # remote: # - url: https://our_idp/metadata.xml # -# # The rest of sp_config is just used to generate our metadata xml, and you +# # By default, the user has to go to our login page first. If you'd like to +# # allow IdP-initiated login, set 'allow_unsolicited: True' in a +# # 'service.sp' section: +# # +# #service: +# # sp: +# # allow_unsolicited: True +# +# # The examples below are just used to generate our metadata xml, and you # # may well not need it, depending on your setup. Alternatively you # # may need a whole lot more detail - see the pysaml2 docs! # @@ -1029,6 +1054,12 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # # separate pysaml2 configuration file: # # # config_path: "CONFDIR/sp_conf.py" +# +# # the lifetime of a SAML session. This defines how long a user has to +# # complete the authentication process, if allow_unsolicited is unset. +# # The default is 5 minutes. +# # +# # saml_session_lifetime: 5m @@ -1375,3 +1406,20 @@ password_config: # module: "my_custom_project.SuperRulesSet" # config: # example_option: 'things' + + +## Opentracing ## +# These settings enable opentracing which implements distributed tracing +# This allows you to observe the causal chain of events across servers +# including requests, key lookups etc. across any server running +# synapse or any other other services which supports opentracing. +# (specifically those implemented with jaeger) + +#opentracing: +# # Enable / disable tracer +# tracer_enabled: false +# # The list of homeservers we wish to expose our current traces to. +# # The list is a list of regexes which are matched against the +# # servername of the homeserver +# homeserver_whitelist: +# - ".*" diff --git a/pyproject.toml b/pyproject.toml index ec23258da8dbfe42af505b1a5e3d7a03b77e9bf3..db4a2e41e439435008a9749e73a5dbf132cf75d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,11 @@ name = "Bugfixes" showcontent = true + [[tool.towncrier.type]] + directory = "docker" + name = "Updates to the Docker image" + showcontent = true + [[tool.towncrier.type]] directory = "doc" name = "Improved Documentation" @@ -39,6 +44,8 @@ exclude = ''' | \.git # root of the project | \.tox | \.venv + | \.env + | env | _build | _trial_temp.* | build diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh new file mode 100755 index 0000000000000000000000000000000000000000..ebb4d69f86897181d729d0484ed15bc5c81785bf --- /dev/null +++ b/scripts-dev/lint.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# +# Runs linting scripts over the local Synapse checkout +# isort - sorts import statements +# flake8 - lints and finds mistakes +# black - opinionated code formatter + +set -e + +isort -y -rc synapse tests scripts-dev scripts +flake8 synapse tests +python3 -m black synapse tests scripts-dev scripts diff --git a/synapse/__init__.py b/synapse/__init__.py index 119359be68f45fd3025239c8c389dbb2c75e3d49..cf22fabd6118de740fed55302de51430a22d28d4 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ try: except ImportError: pass -__version__ = "1.0.0" +__version__ = "1.1.0" diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 86f145649cfdf56e56af573b1aee2628a8a7afaa..d9e943c39c83318e7d11e10dc702e099477cf0dd 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -25,7 +25,13 @@ from twisted.internet import defer import synapse.types from synapse import event_auth from synapse.api.constants import EventTypes, JoinRules, Membership -from synapse.api.errors import AuthError, Codes, ResourceLimitError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientTokenError, + MissingClientTokenError, + ResourceLimitError, +) from synapse.config.server import is_threepid_reserved from synapse.types import UserID from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache @@ -63,7 +69,6 @@ class Auth(object): self.clock = hs.get_clock() self.store = hs.get_datastore() self.state = hs.get_state_handler() - self.TOKEN_NOT_FOUND_HTTP_STATUS = 401 self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000) register_cache("cache", "token_cache", self.token_cache) @@ -189,18 +194,17 @@ class Auth(object): Returns: defer.Deferred: resolves to a ``synapse.types.Requester`` object Raises: - AuthError if no user by that token exists or the token is invalid. + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. + AuthError if access is denied for the user in the access token """ - # Can optionally look elsewhere in the request (e.g. headers) try: ip_addr = self.hs.get_ip_from_request(request) user_agent = request.requestHeaders.getRawHeaders( b"User-Agent", default=[b""] )[0].decode("ascii", "surrogateescape") - access_token = self.get_access_token_from_request( - request, self.TOKEN_NOT_FOUND_HTTP_STATUS - ) + access_token = self.get_access_token_from_request(request) user_id, app_service = yield self._get_appservice_user_id(request) if user_id: @@ -264,18 +268,12 @@ class Auth(object): ) ) except KeyError: - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Missing access token.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError() @defer.inlineCallbacks def _get_appservice_user_id(self, request): app_service = self.store.get_app_service_by_token( - self.get_access_token_from_request( - request, self.TOKEN_NOT_FOUND_HTTP_STATUS - ) + self.get_access_token_from_request(request) ) if app_service is None: defer.returnValue((None, None)) @@ -313,13 +311,25 @@ class Auth(object): `token_id` (int|None): access token id. May be None if guest `device_id` (str|None): device corresponding to access token Raises: - AuthError if no user by that token exists or the token is invalid. + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. """ if rights == "access": # first look in the database r = yield self._look_up_user_by_access_token(token) if r: + valid_until_ms = r["valid_until_ms"] + if ( + valid_until_ms is not None + and valid_until_ms < self.clock.time_msec() + ): + # there was a valid access token, but it has expired. + # soft-logout the user. + raise InvalidClientTokenError( + msg="Access token has expired", soft_logout=True + ) + defer.returnValue(r) # otherwise it needs to be a valid macaroon @@ -331,11 +341,7 @@ class Auth(object): if not guest: # non-guest access tokens must be in the database logger.warning("Unrecognised access token - not in store.") - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Unrecognised access token.", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError() # Guest access tokens are not stored in the database (there can # only be one access token per guest, anyway). @@ -350,16 +356,10 @@ class Auth(object): # guest tokens. stored_user = yield self.store.get_user_by_id(user_id) if not stored_user: - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Unknown user_id %s" % user_id, - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("Unknown user_id %s" % user_id) if not stored_user["is_guest"]: - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Guest access token used for regular user", - errcode=Codes.UNKNOWN_TOKEN, + raise InvalidClientTokenError( + "Guest access token used for regular user" ) ret = { "user": user, @@ -386,11 +386,7 @@ class Auth(object): ValueError, ) as e: logger.warning("Invalid macaroon in auth: %s %s", type(e), e) - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Invalid macaroon passed.", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("Invalid macaroon passed.") def _parse_and_validate_macaroon(self, token, rights="access"): """Takes a macaroon and tries to parse and validate it. This is cached @@ -430,11 +426,7 @@ class Auth(object): macaroon, rights, self.hs.config.expire_access_token, user_id=user_id ) except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError): - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Invalid macaroon passed.", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("Invalid macaroon passed.") if not has_expiry and rights == "access": self.token_cache[token] = (user_id, guest) @@ -453,17 +445,14 @@ class Auth(object): (str) user id Raises: - AuthError if there is no user_id caveat in the macaroon + InvalidClientCredentialsError if there is no user_id caveat in the + macaroon """ user_prefix = "user_id = " for caveat in macaroon.caveats: if caveat.caveat_id.startswith(user_prefix): return caveat.caveat_id[len(user_prefix) :] - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "No user caveat in macaroon", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("No user caveat in macaroon") def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): """ @@ -527,26 +516,18 @@ class Auth(object): "token_id": ret.get("token_id", None), "is_guest": False, "device_id": ret.get("device_id"), + "valid_until_ms": ret.get("valid_until_ms"), } defer.returnValue(user_info) def get_appservice_by_req(self, request): - try: - token = self.get_access_token_from_request( - request, self.TOKEN_NOT_FOUND_HTTP_STATUS - ) - service = self.store.get_app_service_by_token(token) - if not service: - logger.warn("Unrecognised appservice access token.") - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Unrecognised access token.", - errcode=Codes.UNKNOWN_TOKEN, - ) - request.authenticated_entity = service.sender - return defer.succeed(service) - except KeyError: - raise AuthError(self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.") + token = self.get_access_token_from_request(request) + service = self.store.get_app_service_by_token(token) + if not service: + logger.warn("Unrecognised appservice access token.") + raise InvalidClientTokenError() + request.authenticated_entity = service.sender + return defer.succeed(service) def is_server_admin(self, user): """ Check if the given user is a local server admin. @@ -692,20 +673,16 @@ class Auth(object): return bool(query_params) or bool(auth_headers) @staticmethod - def get_access_token_from_request(request, token_not_found_http_status=401): + def get_access_token_from_request(request): """Extracts the access_token from the request. Args: request: The http request. - token_not_found_http_status(int): The HTTP status code to set in the - AuthError if the token isn't found. This is used in some of the - legacy APIs to change the status code to 403 from the default of - 401 since some of the old clients depended on auth errors returning - 403. Returns: unicode: The access_token Raises: - AuthError: If there isn't an access_token in the request. + MissingClientTokenError: If there isn't a single access_token in the + request """ auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") @@ -714,34 +691,20 @@ class Auth(object): # Try the get the access_token from a "Authorization: Bearer" # header if query_params is not None: - raise AuthError( - token_not_found_http_status, - "Mixing Authorization headers and access_token query parameters.", - errcode=Codes.MISSING_TOKEN, + raise MissingClientTokenError( + "Mixing Authorization headers and access_token query parameters." ) if len(auth_headers) > 1: - raise AuthError( - token_not_found_http_status, - "Too many Authorization headers.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError("Too many Authorization headers.") parts = auth_headers[0].split(b" ") if parts[0] == b"Bearer" and len(parts) == 2: return parts[1].decode("ascii") else: - raise AuthError( - token_not_found_http_status, - "Invalid Authorization header.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError("Invalid Authorization header.") else: # Try to get the access_token from the query params. if not query_params: - raise AuthError( - token_not_found_http_status, - "Missing access token.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError() return query_params[0].decode("ascii") diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 28b5c2af9b395eee98d152f5b8751ccb3cc52e57..ad3e262041dfae0edf54d7c89dd358f8d94cfbd2 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -139,6 +139,22 @@ class ConsentNotGivenError(SynapseError): return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri) +class UserDeactivatedError(SynapseError): + """The error returned to the client when the user attempted to access an + authenticated endpoint, but the account has been deactivated. + """ + + def __init__(self, msg): + """Constructs a UserDeactivatedError + + Args: + msg (str): The human-readable error message + """ + super(UserDeactivatedError, self).__init__( + code=http_client.FORBIDDEN, msg=msg, errcode=Codes.UNKNOWN + ) + + class RegistrationError(SynapseError): """An error raised when a registration event fails.""" @@ -210,7 +226,9 @@ class NotFoundError(SynapseError): class AuthError(SynapseError): - """An error raised when there was a problem authorising an event.""" + """An error raised when there was a problem authorising an event, and at various + other poorly-defined times. + """ def __init__(self, *args, **kwargs): if "errcode" not in kwargs: @@ -218,6 +236,41 @@ class AuthError(SynapseError): super(AuthError, self).__init__(*args, **kwargs) +class InvalidClientCredentialsError(SynapseError): + """An error raised when there was a problem with the authorisation credentials + in a client request. + + https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens: + + When credentials are required but missing or invalid, the HTTP call will + return with a status of 401 and the error code, M_MISSING_TOKEN or + M_UNKNOWN_TOKEN respectively. + """ + + def __init__(self, msg, errcode): + super().__init__(code=401, msg=msg, errcode=errcode) + + +class MissingClientTokenError(InvalidClientCredentialsError): + """Raised when we couldn't find the access token in a request""" + + def __init__(self, msg="Missing access token"): + super().__init__(msg=msg, errcode="M_MISSING_TOKEN") + + +class InvalidClientTokenError(InvalidClientCredentialsError): + """Raised when we didn't understand the access token in a request""" + + def __init__(self, msg="Unrecognised access token", soft_logout=False): + super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN") + self._soft_logout = soft_logout + + def error_dict(self): + d = super().error_dict() + d["soft_logout"] = self._soft_logout + return d + + class ResourceLimitError(SynapseError): """ Any error raised when there is a problem with resource usage. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 200978a58f51b4d4c5cf8a243e3024376d08defe..807f320b46e1cc35f7fd9034f18b196a4809e9ca 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -27,7 +27,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory import synapse from synapse.app import check_bind_error from synapse.crypto import context_factory -from synapse.util import PreserveLoggingContext +from synapse.logging.context import PreserveLoggingContext from synapse.util.async_helpers import Linearizer from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string @@ -103,34 +103,36 @@ def start_reactor( install_dns_limiter(reactor) def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - - change_resource_limit(soft_file_limit) - if gc_thresholds: - gc.set_threshold(*gc_thresholds) - - run_command() - - if daemonize: - if print_pidfile: - print(pid_file) - - daemon = Daemonize( - app=appname, - pid=pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + logger.info("Running") + change_resource_limit(soft_file_limit) + if gc_thresholds: + gc.set_threshold(*gc_thresholds) + run_command() + + # make sure that we run the reactor with the sentinel log context, + # otherwise other PreserveLoggingContext instances will get confused + # and complain when they see the logcontext arbitrarily swapping + # between the sentinel and `run` logcontexts. + # + # We also need to drop the logcontext before forking if we're daemonizing, + # otherwise the cputime metrics get confused about the per-thread resource usage + # appearing to go backwards. + with PreserveLoggingContext(): + if daemonize: + if print_pidfile: + print(pid_file) + + daemon = Daemonize( + app=appname, + pid=pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() def quit_with_error(error_string): @@ -251,6 +253,9 @@ def start(hs, listeners=None): # Load the certificate from disk. refresh_certificate(hs) + # Start the tracer + synapse.logging.opentracing.init_tracer(hs.config) + # It is now safe to start your Synapse. hs.start_listening(listeners) hs.get_datastore().start_profiling() diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 9120bdb1438c60f569c7d9ee09aae1c2f25abef9..be44249ed6f18a91c0dc0efd35a606683ac8f71a 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -26,6 +26,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore @@ -36,7 +37,6 @@ from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 90bc79cdda87d74dbf189d2049f4df163cf056ea..ff11beca82a727dd91c8e66a0fbf85dd59028a07 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -64,7 +65,6 @@ from synapse.rest.client.versions import VersionsRestServlet from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index ff522e4499f3b7984cf92e7c89f47431c7631622..cacad25eacd5158088bc323fde1c5de68cb5753e 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -59,7 +60,6 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.user_directory import UserDirectoryStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 94214209301aea4531b5b1bd3ac0375f31423930..11e80dbae02cb2913cd73e5f5dece1287766995e 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.federation.transport.server import TransportLayerServer from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -48,7 +49,6 @@ from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 969be58d0b04810a3e41290353adb123b6e5a8b3..97da7bdcbf4a759169204cdfeb6c404f1cfa2da4 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.federation import send_queue from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.resource import METRICS_PREFIX, MetricsResource @@ -44,7 +45,6 @@ from synapse.storage.engines import create_engine from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 2fd7d57ebf1c871d55308627a05d410405eb4525..417a10bbd23c618e8e0ac00175611488c0c795da 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -29,6 +29,7 @@ from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -41,7 +42,6 @@ from synapse.rest.client.v2_alpha._base import client_patterns from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 49da105cf62e27826f9fc3d7a1d4cf7a45658d2b..639b1429c05a3f1a71c772fa41b8eebe12ec1505 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -54,6 +54,7 @@ from synapse.federation.transport.server import TransportLayerServer from synapse.http.additional_resource import AdditionalResource from synapse.http.server import RootRedirect from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.resource import METRICS_PREFIX, MetricsResource @@ -72,7 +73,6 @@ from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index cf0e2036c3fdf79e478c95d44f4a9c39812632ad..f23b9b6eda8926c5c5fb8e24c9270eed01af155f 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -27,6 +27,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -40,7 +41,6 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.media_repository import MediaRepositoryStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index df29ea5ecbea6035627cc89d9f5a68bce3e1c7a4..4f929edf8660c6b7e4f42d681e9ec066d3401a16 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -26,6 +26,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import __func__ @@ -38,7 +39,6 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 858949910d0dbb8ad9ac5f03fbc077c9b5d423a3..de4797fddcd8cd11f3f8d06b9516e33002f42af8 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -31,6 +31,7 @@ from synapse.config.logger import setup_logging from synapse.handlers.presence import PresenceHandler, get_interested_parties from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ @@ -57,7 +58,6 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.presence import UserPresenceState from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 2d9d2e1bbc02dd881ebf322ca4357d4a4bf7a804..1177ddd72e9210b1cfad9b58a2a964fccda03fd1 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.http.server import JsonResource from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore @@ -46,7 +47,6 @@ from synapse.storage.engines import create_engine from synapse.storage.user_directory import UserDirectoryStore from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, run_in_background from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index b54bf5411f4624c193d148502a8e697e3f8d2004..e5b36494f57f54bfa5c837e47f61ee8838d52015 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -53,8 +53,8 @@ import logging from twisted.internet import defer from synapse.appservice import ApplicationServiceState +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index fcd55d3e3d76b15840897b71d9dfec1309b9847e..8381b8eb296dfdedc4515d61404b018fe3696678 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -112,13 +112,17 @@ class EmailConfig(Config): missing = [] for k in required: if k not in email_config: - missing.append(k) + missing.append("email." + k) + + if config.get("public_baseurl") is None: + missing.append("public_base_url") if len(missing) > 0: raise RuntimeError( - "email.password_reset_behaviour is set to 'local' " - "but required keys are missing: %s" - % (", ".join(["email." + k for k in missing]),) + "Password resets emails are configured to be sent from " + "this homeserver due to a partial 'email' block. " + "However, the following required keys are missing: %s" + % (", ".join(missing),) ) # Templates for password reset emails @@ -156,13 +160,6 @@ class EmailConfig(Config): filepath, "email.password_reset_template_success_html" ) - if config.get("public_baseurl") is None: - raise RuntimeError( - "email.password_reset_behaviour is set to 'local' but no " - "public_baseurl is set. This is necessary to generate password " - "reset links" - ) - if self.email_enable_notifs: required = [ "smtp_host", diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index acadef4fd31698c1532951addb66c667dc5c14d8..72acad4f18959692d3ff52c58aa70c81555c6ac9 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -40,6 +40,7 @@ from .spam_checker import SpamCheckerConfig from .stats import StatsConfig from .third_party_event_rules import ThirdPartyRulesConfig from .tls import TlsConfig +from .tracer import TracerConfig from .user_directory import UserDirectoryConfig from .voip import VoipConfig from .workers import WorkerConfig @@ -75,5 +76,6 @@ class HomeServerConfig( ServerNoticesConfig, RoomDirectoryConfig, ThirdPartyRulesConfig, + TracerConfig, ): pass diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 52cf691227c70fade8c9d37e1f9dba8a5bf33ea2..40502a57987808ffc0905c222a38646aca901b43 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -24,7 +24,7 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner import synapse from synapse.app import _base as appbase -from synapse.util.logcontext import LoggingContextFilter +from synapse.logging.context import LoggingContextFilter from synapse.util.versionstring import get_version_string from ._base import Config @@ -40,7 +40,7 @@ formatters: filters: context: - (): synapse.util.logcontext.LoggingContextFilter + (): synapse.logging.context.LoggingContextFilter request: "" handlers: diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 8c587f3fd2f8b8eab5d6cf6c0ebda010bc0a066f..33f31cf21303f1e5cae7b84dc216c56142769724 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -23,7 +23,7 @@ class RateLimitConfig(object): class FederationRateLimitConfig(object): _items_and_default = { - "window_size": 10000, + "window_size": 1000, "sleep_limit": 10, "sleep_delay": 500, "reject_limit": 50, @@ -54,7 +54,7 @@ class RatelimitConfig(Config): # Load the new-style federation config, if it exists. Otherwise, fall # back to the old method. - if "federation_rc" in config: + if "rc_federation" in config: self.rc_federation = FederationRateLimitConfig(**config["rc_federation"]) else: self.rc_federation = FederationRateLimitConfig( diff --git a/synapse/config/registration.py b/synapse/config/registration.py index ee5885251588c59775377c14849871164897e6d1..c3de7a4e32aeb135cf90f7a02cfe08851087125d 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -71,9 +71,8 @@ class RegistrationConfig(Config): self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) - self.invite_3pid_guest = self.allow_guest_access and config.get( - "invite_3pid_guest", False - ) + if config.get("invite_3pid_guest", False): + raise ConfigError("invite_3pid_guest is no longer supported") self.auto_join_rooms = config.get("auto_join_rooms", []) for room_alias in self.auto_join_rooms: @@ -85,6 +84,11 @@ class RegistrationConfig(Config): "disable_msisdn_registration", False ) + session_lifetime = config.get("session_lifetime") + if session_lifetime is not None: + session_lifetime = self.parse_duration(session_lifetime) + self.session_lifetime = session_lifetime + def generate_config_section(self, generate_secrets=False, **kwargs): if generate_secrets: registration_shared_secret = 'registration_shared_secret: "%s"' % ( @@ -142,6 +146,17 @@ class RegistrationConfig(Config): # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" + # Time that a user's session remains valid for, after they log in. + # + # Note that this is not currently compatible with guest logins. + # + # Note also that this is calculated at login time: changes are not applied + # retrospectively to users who have already logged in. + # + # By default, this is infinite. + # + #session_lifetime: 24h + # The user must provide all of the below types of 3PID when registering. # #registrations_require_3pid: diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 872a1ba934bbcf0aa52ce29fd89978ac43563a47..6a8161547a8e9cafc6e86e5b7859d14466820f90 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from synapse.python_dependencies import DependencyException, check_requirements from ._base import Config, ConfigError @@ -25,6 +26,11 @@ class SAML2Config(Config): if not saml2_config or not saml2_config.get("enabled", True): return + try: + check_requirements("saml2") + except DependencyException as e: + raise ConfigError(e.message) + self.saml2_enabled = True import saml2.config @@ -37,6 +43,11 @@ class SAML2Config(Config): if config_path is not None: self.saml2_sp_config.load_file(config_path) + # session lifetime: in milliseconds + self.saml2_session_lifetime = self.parse_duration( + saml2_config.get("saml_session_lifetime", "5m") + ) + def _default_saml_config_dict(self): import saml2 @@ -72,6 +83,12 @@ class SAML2Config(Config): # so it is not normally necessary to specify them unless you need to # override them. # + # Once SAML support is enabled, a metadata file will be exposed at + # https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to + # use to configure your SAML IdP with. Alternatively, you can manually configure + # the IdP to use an ACS location of + # https://<server>:<port>/_matrix/saml2/authn_response. + # #saml2_config: # sp_config: # # point this to the IdP's metadata. You can use either a local file or @@ -81,7 +98,15 @@ class SAML2Config(Config): # remote: # - url: https://our_idp/metadata.xml # - # # The rest of sp_config is just used to generate our metadata xml, and you + # # By default, the user has to go to our login page first. If you'd like to + # # allow IdP-initiated login, set 'allow_unsolicited: True' in a + # # 'service.sp' section: + # # + # #service: + # # sp: + # # allow_unsolicited: True + # + # # The examples below are just used to generate our metadata xml, and you # # may well not need it, depending on your setup. Alternatively you # # may need a whole lot more detail - see the pysaml2 docs! # @@ -104,6 +129,12 @@ class SAML2Config(Config): # # separate pysaml2 configuration file: # # # config_path: "%(config_dir_path)s/sp_conf.py" + # + # # the lifetime of a SAML session. This defines how long a user has to + # # complete the authentication process, if allow_unsolicited is unset. + # # The default is 5 minutes. + # # + # # saml_session_lifetime: 5m """ % { "config_dir_path": config_dir_path } diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py new file mode 100644 index 0000000000000000000000000000000000000000..63a637984aa84535e058a0a4a3dafe5aa68d3479 --- /dev/null +++ b/synapse/config/tracer.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C.d +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config, ConfigError + + +class TracerConfig(Config): + def read_config(self, config, **kwargs): + self.tracer_config = config.get("opentracing") + + self.tracer_config = config.get("opentracing", {"tracer_enabled": False}) + + if self.tracer_config.get("tracer_enabled", False): + # The tracer is enabled so sanitize the config + # If no whitelists are given + self.tracer_config.setdefault("homeserver_whitelist", []) + + if not isinstance(self.tracer_config.get("homeserver_whitelist"), list): + raise ConfigError("Tracer homesererver_whitelist config is malformed") + + def generate_config_section(cls, **kwargs): + return """\ + ## Opentracing ## + # These settings enable opentracing which implements distributed tracing + # This allows you to observe the causal chain of events across servers + # including requests, key lookups etc. across any server running + # synapse or any other other services which supports opentracing. + # (specifically those implemented with jaeger) + + #opentracing: + # # Enable / disable tracer + # tracer_enabled: false + # # The list of homeservers we wish to expose our current traces to. + # # The list is a list of regexes which are matched against the + # # servername of the homeserver + # homeserver_whitelist: + # - ".*" + """ diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 10c2eb7f0fa7dc1d69b8ef599664961919236aea..341c863152730891cd16c7f801295a3835c60d13 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -44,15 +44,16 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) -from synapse.storage.keys import FetchKeyResult -from synapse.util import logcontext, unwrapFirstError -from synapse.util.async_helpers import yieldable_gather_results -from synapse.util.logcontext import ( +from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, + make_deferred_yieldable, preserve_fn, run_in_background, ) +from synapse.storage.keys import FetchKeyResult +from synapse.util import unwrapFirstError +from synapse.util.async_helpers import yieldable_gather_results from synapse.util.metrics import Measure from synapse.util.retryutils import NotRetryingDestination @@ -140,7 +141,7 @@ class Keyring(object): """ req = VerifyJsonRequest(server_name, json_object, validity_time, request_name) requests = (req,) - return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0]) + return make_deferred_yieldable(self._verify_objects(requests)[0]) def verify_json_objects_for_server(self, server_and_json): """Bulk verifies signatures of json objects, bulk fetching keys as @@ -557,7 +558,7 @@ class BaseV2KeyFetcher(object): signed_key_json_bytes = encode_canonical_json(signed_key_json) - yield logcontext.make_deferred_yieldable( + yield make_deferred_yieldable( defer.gatherResults( [ run_in_background( @@ -612,7 +613,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): defer.returnValue({}) - results = yield logcontext.make_deferred_yieldable( + results = yield make_deferred_yieldable( defer.gatherResults( [run_in_background(get_key, server) for server in self.key_servers], consumeErrors=True, diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a96cdada3d35efadcd4f1c6d024759c5d3beee7b..a9545e6c1b17406caab5134e5cfef3d25bc7ee8d 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -19,7 +19,7 @@ from frozendict import frozendict from twisted.internet import defer -from synapse.util.logcontext import make_deferred_yieldable, run_in_background +from synapse.logging.context import make_deferred_yieldable, run_in_background class EventContext(object): diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f24f0c16f0010f14405cf6baa32ac2cd61623a7c..987de5cab7a354437dea3c34e58d6da35fb7d530 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -392,7 +392,11 @@ class EventClientSerializer(object): serialized_event["content"].pop("m.relates_to", None) r = serialized_event["unsigned"].setdefault("m.relations", {}) - r[RelationTypes.REPLACE] = {"event_id": edit.event_id} + r[RelationTypes.REPLACE] = { + "event_id": edit.event_id, + "origin_server_ts": edit.origin_server_ts, + "sender": edit.sender, + } defer.returnValue(serialized_event) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 1e925b19e7b3509cd96e09c6e32469a869c56f71..f7bb806ae724cf77bc8a2c58926fa1b109d2aea9 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -27,8 +27,14 @@ from synapse.crypto.event_signing import check_event_content_hash from synapse.events import event_type_from_format_version from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, + preserve_fn, +) from synapse.types import get_domain_from_id -from synapse.util import logcontext, unwrapFirstError +from synapse.util import unwrapFirstError logger = logging.getLogger(__name__) @@ -73,7 +79,7 @@ class FederationBase(object): @defer.inlineCallbacks def handle_check_result(pdu, deferred): try: - res = yield logcontext.make_deferred_yieldable(deferred) + res = yield make_deferred_yieldable(deferred) except SynapseError: res = None @@ -102,10 +108,10 @@ class FederationBase(object): defer.returnValue(res) - handle = logcontext.preserve_fn(handle_check_result) + handle = preserve_fn(handle_check_result) deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] - valid_pdus = yield logcontext.make_deferred_yieldable( + valid_pdus = yield make_deferred_yieldable( defer.gatherResults(deferreds2, consumeErrors=True) ).addErrback(unwrapFirstError) @@ -115,7 +121,7 @@ class FederationBase(object): defer.returnValue([p for p in valid_pdus if p]) def _check_sigs_and_hash(self, room_version, pdu): - return logcontext.make_deferred_yieldable( + return make_deferred_yieldable( self._check_sigs_and_hashes(room_version, [pdu])[0] ) @@ -133,14 +139,14 @@ class FederationBase(object): * returns a redacted version of the event (if the signature matched but the hash did not) * throws a SynapseError if the signature check failed. - The deferreds run their callbacks in the sentinel logcontext. + The deferreds run their callbacks in the sentinel """ deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus) - ctx = logcontext.LoggingContext.current_context() + ctx = LoggingContext.current_context() def callback(_, pdu): - with logcontext.PreserveLoggingContext(ctx): + with PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was # redacted (which are somewhat expected) vs actual ball-tampering @@ -178,7 +184,7 @@ class FederationBase(object): def errback(failure, pdu): failure.trap(SynapseError) - with logcontext.PreserveLoggingContext(ctx): + with PreserveLoggingContext(ctx): logger.warn( "Signature check failed for %s: %s", pdu.event_id, diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3883eb525eedd245e94180aba62a9e91acecf8b3..3cb4b944208594e6a16640cfa3642fe5c788782f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -39,10 +39,10 @@ from synapse.api.room_versions import ( ) from synapse.events import builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json -from synapse.util import logcontext, unwrapFirstError +from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.utils import log_function +from synapse.util import unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background -from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -207,7 +207,7 @@ class FederationClient(FederationBase): ] # FIXME: We should handle signature failures more gracefully. - pdus[:] = yield logcontext.make_deferred_yieldable( + pdus[:] = yield make_deferred_yieldable( defer.gatherResults( self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True ).addErrback(unwrapFirstError) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2e0cebb638c483e3ee78cd355775ca2130cd48a1..8c0a18b120393c20a96a9041b55d5edd04f1e3c2 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -42,6 +42,8 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name +from synapse.logging.context import nested_logging_context +from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, ReplicationGetQueryRestServlet, @@ -50,8 +52,6 @@ from synapse.types import get_domain_from_id from synapse.util import glob_to_regex from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache -from synapse.util.logcontext import nested_logging_context -from synapse.util.logutils import log_function # when processing incoming transactions, we try to handle multiple rooms in # parallel, up to this limit. diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 7535f79203b31073f709f88a4e274cd72c1b6b28..44edcabed4722a0ddf5b735cb416e9402782019e 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -21,9 +21,7 @@ These actions are mostly only used by the :py:mod:`.replication` module. import logging -from twisted.internet import defer - -from synapse.util.logutils import log_function +from synapse.logging.utils import log_function logger = logging.getLogger(__name__) @@ -63,33 +61,3 @@ class TransactionActions(object): return self.store.set_received_txn_response( transaction.transaction_id, origin, code, response ) - - @defer.inlineCallbacks - @log_function - def prepare_to_send(self, transaction): - """ Persists the `Transaction` we are about to send and works out the - correct value for the `prev_ids` key. - - Returns: - Deferred - """ - transaction.prev_ids = yield self.store.prep_send_transaction( - transaction.transaction_id, - transaction.destination, - transaction.origin_server_ts, - ) - - @log_function - def delivered(self, transaction, response_code, response_dict): - """ Marks the given `Transaction` as having been successfully - delivered to the remote homeserver, and what the response was. - - Returns: - Deferred - """ - return self.store.delivered_txn( - transaction.transaction_id, - transaction.destination, - response_code, - response_dict, - ) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 766c5a37cd7aea3f13fced7c9015fb5e645b450d..d46f4aaeb1a373d3ea6960bf30f15a35ad948889 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -26,6 +26,11 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu from synapse.handlers.presence import get_interested_remotes +from synapse.logging.context import ( + make_deferred_yieldable, + preserve_fn, + run_in_background, +) from synapse.metrics import ( LaterGauge, event_processing_loop_counter, @@ -33,7 +38,6 @@ from synapse.metrics import ( events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util import logcontext from synapse.util.metrics import measure_func logger = logging.getLogger(__name__) @@ -210,10 +214,10 @@ class FederationSender(object): for event in events: events_by_room.setdefault(event.room_id, []).append(event) - yield logcontext.make_deferred_yieldable( + yield make_deferred_yieldable( defer.gatherResults( [ - logcontext.run_in_background(handle_room_events, evs) + run_in_background(handle_room_events, evs) for evs in itervalues(events_by_room) ], consumeErrors=True, @@ -360,7 +364,7 @@ class FederationSender(object): for queue in queues: queue.flush_read_receipts_for_room(room_id) - @logcontext.preserve_fn # the caller should not yield on this + @preserve_fn # the caller should not yield on this @defer.inlineCallbacks def send_presence(self, states): """Send the new presence states to the appropriate destinations. diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index c987bb9a0d3cb05574215eb8efa3b376029389b7..0460a8c4acc6f3dd51537ab379e90165cb7cd723 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -63,8 +63,6 @@ class TransactionManager(object): len(edus), ) - logger.debug("TX [%s] Persisting transaction...", destination) - transaction = Transaction.create_new( origin_server_ts=int(self.clock.time_msec()), transaction_id=txn_id, @@ -76,9 +74,6 @@ class TransactionManager(object): self._next_txn_id += 1 - yield self._transaction_actions.prepare_to_send(transaction) - - logger.debug("TX [%s] Persisted transaction", destination) logger.info( "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)", destination, @@ -118,10 +113,6 @@ class TransactionManager(object): logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) - yield self._transaction_actions.delivered(transaction, code, response) - - logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id) - if code == 200: for e_id, r in response.get("pdus", {}).items(): if "error" in r: diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index aecd1423097ba6902507f9abb59ad9c9ee2326fb..1aae9ec9e74dbf106444d321a7b8cad6a5438ea6 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -22,7 +22,7 @@ from twisted.internet import defer from synapse.api.constants import Membership from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX -from synapse.util.logutils import log_function +from synapse.logging.utils import log_function logger = logging.getLogger(__name__) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 955f0f4308252d15f2ec6accb3f75991d5fe7b39..c45d458d946d4335244fa1bd36cf5c061a9d9ed9 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -21,6 +21,7 @@ import re from twisted.internet import defer import synapse +import synapse.logging.opentracing as opentracing from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.room_versions import RoomVersions from synapse.api.urls import ( @@ -36,8 +37,8 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string_from_args, ) +from synapse.logging.context import run_in_background from synapse.types import ThirdPartyInstanceID, get_domain_from_id -from synapse.util.logcontext import run_in_background from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string @@ -288,14 +289,29 @@ class BaseFederationServlet(object): logger.warn("authenticate_request failed: %s", e) raise - if origin: - with ratelimiter.ratelimit(origin) as d: - yield d + # Start an opentracing span + with opentracing.start_active_span_from_context( + request.requestHeaders, + "incoming-federation-request", + tags={ + "request_id": request.get_request_id(), + opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_SERVER, + opentracing.tags.HTTP_METHOD: request.get_method(), + opentracing.tags.HTTP_URL: request.get_redacted_uri(), + opentracing.tags.PEER_HOST_IPV6: request.getClientIP(), + "authenticated_entity": origin, + }, + ): + if origin: + with ratelimiter.ratelimit(origin) as d: + yield d + response = yield func( + origin, content, request.args, *args, **kwargs + ) + else: response = yield func( origin, content, request.args, *args, **kwargs ) - else: - response = yield func(origin, content, request.args, *args, **kwargs) defer.returnValue(response) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index e73757570cf74df52a5e63029767d16cb93b4a82..f497711133e9610711333beb1537575b2f16dff8 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -43,9 +43,9 @@ from signedjson.sign import sign_json from twisted.internet import defer from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import get_domain_from_id -from synapse.util.logcontext import run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 0719da3ab7e093741c6913284ce7adb966b67259..1f1708ba7d5d8bf3a9f1c73da672c952dbad96a8 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -22,9 +22,10 @@ from email.mime.text import MIMEText from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.context import make_deferred_yieldable +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID from synapse.util import stringutils -from synapse.util.logcontext import make_deferred_yieldable try: from synapse.push.mailer import load_jinja2_templates @@ -67,7 +68,14 @@ class AccountValidityHandler(object): ) # Check the renewal emails to send and send them every 30min. - self.clock.looping_call(self.send_renewal_emails, 30 * 60 * 1000) + def send_emails(): + # run as a background process to make sure that the database transactions + # have a logcontext to report to + return run_as_background_process( + "send_renewals", self.send_renewal_emails + ) + + self.clock.looping_call(send_emails, 30 * 60 * 1000) @defer.inlineCallbacks def send_renewal_emails(self): diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index e424fc46bddb4bb487708d797be25685958a2fae..e8a651e231b886fc5f17241648912d35a489a20a 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -14,10 +14,6 @@ # limitations under the License. import logging -import os -import tempfile - -from canonicaljson import json from twisted.internet import defer @@ -99,15 +95,16 @@ class AdminHandler(BaseHandler): defer.returnValue(ret) @defer.inlineCallbacks - def exfiltrate_user_data(self, user_id, writer): - """Write all data we have of the user to the specified directory. + def export_user_data(self, user_id, writer): + """Write all data we have on the user to the given writer. Args: user_id (str) writer (ExfiltrationWriter) Returns: - defer.Deferred + defer.Deferred: Resolves when all data for a user has been written. + The returned value is that returned by `writer.finished()`. """ # Get all rooms the user is in or has been in rooms = yield self.store.get_rooms_for_user_where_membership_is( @@ -134,7 +131,7 @@ class AdminHandler(BaseHandler): forgotten = yield self.store.did_forget(user_id, room_id) if forgotten: - logger.info("[%s] User forgot room %d, ignoring", room_id) + logger.info("[%s] User forgot room %d, ignoring", user_id, room_id) continue if room_id not in rooms_user_has_been_in: @@ -172,13 +169,14 @@ class AdminHandler(BaseHandler): # dict[str, set[str]]. event_to_unseen_prevs = {} - # The reverse mapping to above, i.e. map from unseen event to parent - # events. dict[str, set[str]] - unseen_event_to_parents = {} + # The reverse mapping to above, i.e. map from unseen event to events + # that have the unseen event in their prev_events, i.e. the unseen + # events "children". dict[str, set[str]] + unseen_to_child_events = {} # We fetch events in the room the user could see by fetching *all* # events that we have and then filtering, this isn't the most - # efficient method perhaps but it does guarentee we get everything. + # efficient method perhaps but it does guarantee we get everything. while True: events, _ = yield self.store.paginate_room_events( room_id, from_key, to_key, limit=100, direction="f" @@ -200,16 +198,14 @@ class AdminHandler(BaseHandler): if unseen_events: event_to_unseen_prevs[event.event_id] = unseen_events for unseen in unseen_events: - unseen_event_to_parents.setdefault(unseen, set()).add( + unseen_to_child_events.setdefault(unseen, set()).add( event.event_id ) # Now check if this event is an unseen prev event, if so # then we remove this event from the appropriate dicts. - for event_id in unseen_event_to_parents.pop(event.event_id, []): - event_to_unseen_prevs.get(event_id, set()).discard( - event.event_id - ) + for child_id in unseen_to_child_events.pop(event.event_id, []): + event_to_unseen_prevs[child_id].discard(event.event_id) written_events.add(event.event_id) @@ -233,7 +229,7 @@ class AdminHandler(BaseHandler): class ExfiltrationWriter(object): - """Interfaced used to specify how to write exfilrated data. + """Interface used to specify how to write exported data. """ def write_events(self, room_id, events): @@ -254,7 +250,7 @@ class ExfiltrationWriter(object): Args: room_id (str) event_id (str) - state (list[FrozenEvent]) + state (dict[tuple[str, str], FrozenEvent]) """ pass @@ -263,76 +259,16 @@ class ExfiltrationWriter(object): Args: room_id (str) - invite (FrozenEvent) - state (list[dict]): A subset of the state at the invite, with a - subset of the event keys (type, state_key, content and sender) + event (FrozenEvent) + state (dict[tuple[str, str], dict]): A subset of the state at the + invite, with a subset of the event keys (type, state_key + content and sender) """ def finished(self): - """Called when exfiltration is complete, and the return valus is passed - to the requester. + """Called when all data has succesfully been exported and written. + + This functions return value is passed to the caller of + `export_user_data`. """ pass - - -class FileExfiltrationWriter(ExfiltrationWriter): - """An ExfiltrationWriter that writes the users data to a directory. - - Returns the directory location on completion. - - Args: - user_id (str): The user whose data is being exfiltrated. - directory (str|None): The directory to write the data to, if None then - will write to a temporary directory. - """ - - def __init__(self, user_id, directory=None): - self.user_id = user_id - - if directory: - self.base_directory = directory - else: - self.base_directory = tempfile.mkdtemp( - prefix="synapse-exfiltrate__%s__" % (user_id,) - ) - - os.makedirs(self.base_directory, exist_ok=True) - if list(os.listdir(self.base_directory)): - raise Exception("Directory must be empty") - - def write_events(self, room_id, events): - room_directory = os.path.join(self.base_directory, "rooms", room_id) - os.makedirs(room_directory, exist_ok=True) - events_file = os.path.join(room_directory, "events") - - with open(events_file, "a") as f: - for event in events: - print(json.dumps(event.get_pdu_json()), file=f) - - def write_state(self, room_id, event_id, state): - room_directory = os.path.join(self.base_directory, "rooms", room_id) - state_directory = os.path.join(room_directory, "state") - os.makedirs(state_directory, exist_ok=True) - - event_file = os.path.join(state_directory, event_id) - - with open(event_file, "a") as f: - for event in state.values(): - print(json.dumps(event.get_pdu_json()), file=f) - - def write_invite(self, room_id, event, state): - self.write_events(room_id, [event]) - - # We write the invite state somewhere else as they aren't full events - # and are only a subset of the state at the event. - room_directory = os.path.join(self.base_directory, "rooms", room_id) - os.makedirs(room_directory, exist_ok=True) - - invite_state = os.path.join(room_directory, "invite_state") - - with open(invite_state, "a") as f: - for event in state.values(): - print(json.dumps(event), file=f) - - def finished(self): - return self.base_directory diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 5cc89d43f6a6677926996c18c2b70587675baa36..8f089f0e3355cc28e7a28a7b2876fe95e1443e34 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -23,13 +23,13 @@ from twisted.internet import defer import synapse from synapse.api.constants import EventTypes +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import ( event_processing_loop_counter, event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util import log_failure -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index c8c1ed324656ecd67b57a4c90f02b78160fe1b4f..d4d65749754eb1dd8e4a6676d04b989df68abc3b 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +import time import unicodedata import attr @@ -34,11 +35,12 @@ from synapse.api.errors import ( LoginError, StoreError, SynapseError, + UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.logging.context import defer_to_thread from synapse.module_api import ModuleApi from synapse.types import UserID -from synapse.util import logcontext from synapse.util.caches.expiringcache import ExpiringCache from ._base import BaseHandler @@ -558,7 +560,7 @@ class AuthHandler(BaseHandler): return self.sessions[session_id] @defer.inlineCallbacks - def get_access_token_for_user_id(self, user_id, device_id=None): + def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms): """ Creates a new access token for the user with the given user ID. @@ -572,15 +574,27 @@ class AuthHandler(BaseHandler): device_id (str|None): the device ID to associate with the tokens. None to leave the tokens unassociated with a device (deprecated: we should always have a device ID) + valid_until_ms (int|None): when the token is valid until. None for + no expiry. Returns: The access token for the user's session. Raises: StoreError if there was a problem storing the token. """ - logger.info("Logging in user %s on device %s", user_id, device_id) - access_token = yield self.issue_access_token(user_id, device_id) + fmt_expiry = "" + if valid_until_ms is not None: + fmt_expiry = time.strftime( + " until %Y-%m-%d %H:%M:%S", time.localtime(valid_until_ms / 1000.0) + ) + logger.info("Logging in user %s on device %s%s", user_id, device_id, fmt_expiry) + yield self.auth.check_auth_blocking(user_id) + access_token = self.macaroon_gen.generate_access_token(user_id) + yield self.store.add_access_token_to_user( + user_id, access_token, device_id, valid_until_ms + ) + # the device *should* have been registered before we got here; however, # it's possible we raced against a DELETE operation. The thing we # really don't want is active access_tokens without a record of the @@ -610,6 +624,7 @@ class AuthHandler(BaseHandler): Raises: LimitExceededError if the ratelimiter's login requests count for this user is too high too proceed. + UserDeactivatedError if a user is found but is deactivated. """ self.ratelimit_login_per_account(user_id) res = yield self._find_user_id_and_pwd_hash(user_id) @@ -825,18 +840,19 @@ class AuthHandler(BaseHandler): if not lookupres: defer.returnValue(None) (user_id, password_hash) = lookupres + + # If the password hash is None, the account has likely been deactivated + if not password_hash: + deactivated = yield self.store.get_user_deactivated_status(user_id) + if deactivated: + raise UserDeactivatedError("This account has been deactivated") + result = yield self.validate_hash(password, password_hash) if not result: logger.warn("Failed password login for user %s", user_id) defer.returnValue(None) defer.returnValue(user_id) - @defer.inlineCallbacks - def issue_access_token(self, user_id, device_id=None): - access_token = self.macaroon_gen.generate_access_token(user_id) - yield self.store.add_access_token_to_user(user_id, access_token, device_id) - defer.returnValue(access_token) - @defer.inlineCallbacks def validate_short_term_login_token_and_get_user_id(self, login_token): auth_api = self.hs.get_auth() @@ -987,7 +1003,7 @@ class AuthHandler(BaseHandler): bcrypt.gensalt(self.bcrypt_rounds), ).decode("ascii") - return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash) + return defer_to_thread(self.hs.get_reactor(), _do_hash) def validate_hash(self, password, stored_hash): """Validates that self.hash(password) == stored_hash. @@ -1013,7 +1029,7 @@ class AuthHandler(BaseHandler): if not isinstance(stored_hash, bytes): stored_hash = stored_hash.encode("ascii") - return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash) + return defer_to_thread(self.hs.get_reactor(), _do_validate_hash) else: return defer.succeed(False) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 807900fe52ac4c589e1b621e529c2fc0cb14293f..fdfe8611b6ca513dae205c5d8fb8aef900420195 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -22,9 +22,9 @@ from canonicaljson import encode_canonical_json, json from twisted.internet import defer -from synapse.api.errors import CodeMessageException, FederationDeniedError, SynapseError +from synapse.api.errors import CodeMessageException, SynapseError +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID, get_domain_from_id -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -350,9 +350,6 @@ def _exception_to_failure(e): if isinstance(e, NotRetryingDestination): return {"status": 503, "message": "Not ready for retry"} - if isinstance(e, FederationDeniedError): - return {"status": 403, "message": "Federation Denied"} - # include ConnectionRefused and other errors # # Note that some Exceptions (notably twisted's ResponseFailed etc) don't diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 5836d3c639bdcb16d7526f2bd273ae1e252bcda1..6a38328af3166f375065a9ff548070c705c1ffe6 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -21,8 +21,8 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, SynapseError from synapse.events import EventBase +from synapse.logging.utils import log_function from synapse.types import UserID -from synapse.util.logutils import log_function from synapse.visibility import filter_events_for_client from ._base import BaseHandler diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 02d397c498e4d6b72b1ae3142568091dcc5a9242..57be968c67c8f21ca5f95cd1bd38824f0ec38107 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -45,6 +45,13 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import auth_types_for_event from synapse.events.validator import EventValidator +from synapse.logging.context import ( + make_deferred_yieldable, + nested_logging_context, + preserve_fn, + run_in_background, +) +from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationFederationSendEventsRestServlet, @@ -52,10 +59,9 @@ from synapse.replication.http.federation import ( from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.types import UserID, get_domain_from_id -from synapse.util import logcontext, unwrapFirstError +from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room -from synapse.util.logutils import log_function from synapse.util.retryutils import NotRetryingDestination from synapse.visibility import filter_events_for_server @@ -338,7 +344,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(room_id) - with logcontext.nested_logging_context(p): + with nested_logging_context(p): # note that if any of the missing prevs share missing state or # auth events, the requests to fetch those events are deduped # by the get_pdu_cache in federation_client. @@ -532,7 +538,7 @@ class FederationHandler(BaseHandler): event_id, ev.event_id, ) - with logcontext.nested_logging_context(ev.event_id): + with nested_logging_context(ev.event_id): try: yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False) except FederationError as e: @@ -725,10 +731,10 @@ class FederationHandler(BaseHandler): missing_auth - failed_to_fetch, ) - results = yield logcontext.make_deferred_yieldable( + results = yield make_deferred_yieldable( defer.gatherResults( [ - logcontext.run_in_background( + run_in_background( self.federation_client.get_pdu, [dest], event_id, @@ -994,10 +1000,8 @@ class FederationHandler(BaseHandler): event_ids = list(extremities.keys()) logger.debug("calling resolve_state_groups in _maybe_backfill") - resolve = logcontext.preserve_fn( - self.state_handler.resolve_state_groups_for_events - ) - states = yield logcontext.make_deferred_yieldable( + resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) + states = yield make_deferred_yieldable( defer.gatherResults( [resolve(room_id, [e]) for e in event_ids], consumeErrors=True ) @@ -1171,7 +1175,7 @@ class FederationHandler(BaseHandler): # lots of requests for missing prev_events which we do actually # have. Hence we fire off the deferred, but don't wait for it. - logcontext.run_in_background(self._handle_queued_pdus, room_queue) + run_in_background(self._handle_queued_pdus, room_queue) defer.returnValue(True) @@ -1191,7 +1195,7 @@ class FederationHandler(BaseHandler): p.event_id, p.room_id, ) - with logcontext.nested_logging_context(p.event_id): + with nested_logging_context(p.event_id): yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warn( @@ -1610,7 +1614,7 @@ class FederationHandler(BaseHandler): success = True finally: if not success: - logcontext.run_in_background( + run_in_background( self.store.remove_push_actions_from_staging, event.event_id ) @@ -1629,7 +1633,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def prep(ev_info): event = ev_info["event"] - with logcontext.nested_logging_context(suffix=event.event_id): + with nested_logging_context(suffix=event.event_id): res = yield self._prep_event( origin, event, @@ -1639,12 +1643,9 @@ class FederationHandler(BaseHandler): ) defer.returnValue(res) - contexts = yield logcontext.make_deferred_yieldable( + contexts = yield make_deferred_yieldable( defer.gatherResults( - [ - logcontext.run_in_background(prep, ev_info) - for ev_info in event_infos - ], + [run_in_background(prep, ev_info) for ev_info in event_infos], consumeErrors=True, ) ) @@ -2106,10 +2107,10 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(event.room_id) - different_events = yield logcontext.make_deferred_yieldable( + different_events = yield make_deferred_yieldable( defer.gatherResults( [ - logcontext.run_in_background( + run_in_background( self.store.get_event, d, allow_none=True, allow_rejected=False ) for d in different_auth diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index c82b1933f2160bc85b2e899ed902444853a1ea77..546d6169e9fa811785b71b84185a2c9783dc5403 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -118,7 +118,7 @@ class IdentityHandler(BaseHandler): raise SynapseError(400, "No client_secret in creds") try: - data = yield self.http_client.post_urlencoded_get_json( + data = yield self.http_client.post_json_get_json( "https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"), {"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid}, ) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index a1fe9d116faffa67ea505559cafad3524f4a655c..54c966c8a67b7a3b67eacfdb8f4ed93b13678455 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -21,12 +21,12 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, SynapseError from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.streams.config import PaginationConfig from synapse.types import StreamToken, UserID from synapse.util import unwrapFirstError from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.snapshot_cache import SnapshotCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.visibility import filter_events_for_client from ._base import BaseHandler diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 683da6bf3204c0479cca08786d2904d4301bb5f1..eaeda7a5cbb2b16b1653b1bcd0ff12614a1224a1 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -34,13 +34,13 @@ from synapse.api.errors import ( from synapse.api.room_versions import RoomVersions from synapse.api.urls import ConsentURIBuilder from synapse.events.validator import EventValidator +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID, create_requester from synapse.util.async_helpers import Linearizer from synapse.util.frozenutils import frozendict_json_encoder -from synapse.util.logcontext import run_in_background from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 76ee97ddd32359de7b0a8ce940a94deb1cfac4b1..20bcfed334f90873cf79bfc733401f69919f4099 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -20,10 +20,10 @@ from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError +from synapse.logging.context import run_in_background from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import ReadWriteLock -from synapse.util.logcontext import run_in_background from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c80dc2eba0b07e66ebf3abaed1163c68a9a54b5c..6f3537e435455d46bf63e48bb9ed4c8039f6258b 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -34,14 +34,14 @@ from twisted.internet import defer import synapse.metrics from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError +from synapse.logging.context import run_in_background +from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.presence import UserPresenceState from synapse.types import UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.util.logcontext import run_in_background -from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index d8462b75eca549a81c711267cc38c3ada901a884..a2388a7091601cbbcd338f91a8683b8bcea52439 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -303,6 +303,10 @@ class BaseProfileHandler(BaseHandler): if not self.hs.config.require_auth_for_profile_requests or not requester: return + # Always allow the user to query their own profile. + if target_user.to_string() == requester.to_string(): + return + try: requester_rooms = yield self.store.get_rooms_for_user(requester.to_string()) target_user_rooms = yield self.store.get_rooms_for_user( diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e487b90c0821010981b7582fe3735a84c4d96277..bb7cfd71b91d576426c130f8d859d7a669604180 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -84,6 +84,8 @@ class RegistrationHandler(BaseHandler): self.device_handler = hs.get_device_handler() self.pusher_pool = hs.get_pusherpool() + self.session_lifetime = hs.config.session_lifetime + @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None, assigned_user_id=None): if types.contains_invalid_mxid_characters(localpart): @@ -138,11 +140,10 @@ class RegistrationHandler(BaseHandler): ) @defer.inlineCallbacks - def register( + def register_user( self, localpart=None, password=None, - generate_token=True, guest_access_token=None, make_guest=False, admin=False, @@ -160,11 +161,6 @@ class RegistrationHandler(BaseHandler): password (unicode) : The password to assign to this user so they can login again. This can be None which means they cannot login again via a password (e.g. the user is an application service user). - generate_token (bool): Whether a new access token should be - generated. Having this be True should be considered deprecated, - since it offers no means of associating a device_id with the - access_token. Instead you should call auth_handler.issue_access_token - after registration. user_type (str|None): type of user. One of the values from api.constants.UserTypes, or None for a normal user. default_display_name (unicode|None): if set, the new user's displayname @@ -172,7 +168,7 @@ class RegistrationHandler(BaseHandler): address (str|None): the IP address used to perform the registration. bind_emails (List[str]): list of emails to bind to this account. Returns: - A tuple of (user_id, access_token). + Deferred[str]: user_id Raises: RegistrationError if there was a problem registering. """ @@ -206,12 +202,8 @@ class RegistrationHandler(BaseHandler): elif default_display_name is None: default_display_name = localpart - token = None - if generate_token: - token = self.macaroon_gen.generate_access_token(user_id) yield self.register_with_store( user_id=user_id, - token=token, password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, @@ -230,21 +222,17 @@ class RegistrationHandler(BaseHandler): else: # autogen a sequential user ID attempts = 0 - token = None user = None while not user: localpart = yield self._generate_user_id(attempts > 0) user = UserID(localpart, self.hs.hostname) user_id = user.to_string() yield self.check_user_id_not_appservice_exclusive(user_id) - if generate_token: - token = self.macaroon_gen.generate_access_token(user_id) if default_display_name is None: default_display_name = localpart try: yield self.register_with_store( user_id=user_id, - token=token, password_hash=password_hash, make_guest=make_guest, create_profile_with_displayname=default_display_name, @@ -254,10 +242,15 @@ class RegistrationHandler(BaseHandler): # if user id is taken, just generate another user = None user_id = None - token = None attempts += 1 + if not self.hs.config.user_consent_at_registration: yield self._auto_join_rooms(user_id) + else: + logger.info( + "Skipping auto-join for %s because consent is required at registration", + user_id, + ) # Bind any specified emails to this account current_time = self.hs.get_clock().time_msec() @@ -272,7 +265,7 @@ class RegistrationHandler(BaseHandler): # Bind email to new account yield self._register_email_threepid(user_id, threepid_dict, None, False) - defer.returnValue((user_id, token)) + defer.returnValue(user_id) @defer.inlineCallbacks def _auto_join_rooms(self, user_id): @@ -298,6 +291,7 @@ class RegistrationHandler(BaseHandler): count = yield self.store.count_all_users() should_auto_create_rooms = count == 1 for r in self.hs.config.auto_join_rooms: + logger.info("Auto-joining %s to %s", user_id, r) try: if should_auto_create_rooms: room_alias = RoomAlias.from_string(r) @@ -505,87 +499,6 @@ class RegistrationHandler(BaseHandler): ) defer.returnValue(data) - @defer.inlineCallbacks - def get_or_create_user(self, requester, localpart, displayname, password_hash=None): - """Creates a new user if the user does not exist, - else revokes all previous access tokens and generates a new one. - - Args: - localpart : The local part of the user ID to register. If None, - one will be randomly generated. - Returns: - A tuple of (user_id, access_token). - Raises: - RegistrationError if there was a problem registering. - - NB this is only used in tests. TODO: move it to the test package! - """ - if localpart is None: - raise SynapseError(400, "Request must include user id") - yield self.auth.check_auth_blocking() - need_register = True - - try: - yield self.check_username(localpart) - except SynapseError as e: - if e.errcode == Codes.USER_IN_USE: - need_register = False - else: - raise - - user = UserID(localpart, self.hs.hostname) - user_id = user.to_string() - token = self.macaroon_gen.generate_access_token(user_id) - - if need_register: - yield self.register_with_store( - user_id=user_id, - token=token, - password_hash=password_hash, - create_profile_with_displayname=user.localpart, - ) - else: - yield self._auth_handler.delete_access_tokens_for_user(user_id) - yield self.store.add_access_token_to_user(user_id=user_id, token=token) - - if displayname is not None: - logger.info("setting user display name: %s -> %s", user_id, displayname) - yield self.profile_handler.set_displayname( - user, requester, displayname, by_admin=True - ) - - defer.returnValue((user_id, token)) - - @defer.inlineCallbacks - def get_or_register_3pid_guest(self, medium, address, inviter_user_id): - """Get a guest access token for a 3PID, creating a guest account if - one doesn't already exist. - - Args: - medium (str) - address (str) - inviter_user_id (str): The user ID who is trying to invite the - 3PID - - Returns: - Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the - 3PID guest account. - """ - access_token = yield self.store.get_3pid_guest_access_token(medium, address) - if access_token: - user_info = yield self.auth.get_user_by_access_token(access_token) - - defer.returnValue((user_info["user"].to_string(), access_token)) - - user_id, access_token = yield self.register( - generate_token=True, make_guest=True - ) - access_token = yield self.store.save_or_get_3pid_guest_access_token( - medium, address, access_token, inviter_user_id - ) - - defer.returnValue((user_id, access_token)) - @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): room_id = None @@ -615,7 +528,6 @@ class RegistrationHandler(BaseHandler): def register_with_store( self, user_id, - token=None, password_hash=None, was_guest=False, make_guest=False, @@ -629,9 +541,6 @@ class RegistrationHandler(BaseHandler): Args: user_id (str): The desired user ID to register. - token (str): The desired access token to use for this user. If this - is not None, the given access token is associated with the user - id. password_hash (str|None): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. @@ -667,7 +576,6 @@ class RegistrationHandler(BaseHandler): if self.hs.config.worker_app: return self._register_client( user_id=user_id, - token=token, password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, @@ -678,9 +586,8 @@ class RegistrationHandler(BaseHandler): address=address, ) else: - return self.store.register( + return self.store.register_user( user_id=user_id, - token=token, password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, @@ -694,6 +601,8 @@ class RegistrationHandler(BaseHandler): def register_device(self, user_id, device_id, initial_display_name, is_guest=False): """Register a device for a user and generate an access token. + The access token will be limited by the homeserver's session_lifetime config. + Args: user_id (str): full canonical @user:id device_id (str|None): The device ID to check, or None to generate @@ -714,20 +623,29 @@ class RegistrationHandler(BaseHandler): is_guest=is_guest, ) defer.returnValue((r["device_id"], r["access_token"])) - else: - device_id = yield self.device_handler.check_device_registered( - user_id, device_id, initial_display_name - ) + + valid_until_ms = None + if self.session_lifetime is not None: if is_guest: - access_token = self.macaroon_gen.generate_access_token( - user_id, ["guest = true"] - ) - else: - access_token = yield self._auth_handler.get_access_token_for_user_id( - user_id, device_id=device_id + raise Exception( + "session_lifetime is not currently implemented for guest access" ) + valid_until_ms = self.clock.time_msec() + self.session_lifetime + + device_id = yield self.device_handler.check_device_registered( + user_id, device_id, initial_display_name + ) + if is_guest: + assert valid_until_ms is None + access_token = self.macaroon_gen.generate_access_token( + user_id, ["guest = true"] + ) + else: + access_token = yield self._auth_handler.get_access_token_for_user_id( + user_id, device_id=device_id, valid_until_ms=valid_until_ms + ) - defer.returnValue((device_id, access_token)) + defer.returnValue((device_id, access_token)) @defer.inlineCallbacks def post_registration_actions( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 66b05b4732ba3ba01927701281bb2bd6037a4487..e0196ef83e6d21331a01cf84bb4df8fe071305c9 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -29,7 +29,7 @@ from twisted.internet import defer import synapse.server import synapse.types from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import AuthError, Codes, SynapseError +from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room @@ -118,24 +118,6 @@ class RoomMemberHandler(object): """ raise NotImplementedError() - @abc.abstractmethod - def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id): - """Get a guest access token for a 3PID, creating a guest account if - one doesn't already exist. - - Args: - requester (Requester) - medium (str) - address (str) - inviter_user_id (str): The user ID who is trying to invite the - 3PID - - Returns: - Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the - 3PID guest account. - """ - raise NotImplementedError() - @abc.abstractmethod def _user_joined_room(self, target, room_id): """Notifies distributor on master process that the user has joined the @@ -890,24 +872,23 @@ class RoomMemberHandler(object): "sender_avatar_url": inviter_avatar_url, } - if self.config.invite_3pid_guest: - guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest( - requester=requester, - medium=medium, - address=address, - inviter_user_id=inviter_user_id, + try: + data = yield self.simple_http_client.post_json_get_json( + is_url, invite_config ) - - invite_config.update( - { - "guest_access_token": guest_access_token, - "guest_user_id": guest_user_id, - } + except HttpResponseException as e: + # Some identity servers may only support application/x-www-form-urlencoded + # types. This is especially true with old instances of Sydent, see + # https://github.com/matrix-org/sydent/pull/170 + logger.info( + "Failed to POST %s with JSON, falling back to urlencoded form: %s", + is_url, + e, + ) + data = yield self.simple_http_client.post_urlencoded_get_json( + is_url, invite_config ) - data = yield self.simple_http_client.post_urlencoded_get_json( - is_url, invite_config - ) # TODO: Check for success token = data["token"] public_keys = data.get("public_keys", []) @@ -1010,12 +991,6 @@ class RoomMemberMasterHandler(RoomMemberHandler): yield self.store.locally_reject_invite(target.to_string(), room_id) defer.returnValue({}) - def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id): - """Implements RoomMemberHandler.get_or_register_3pid_guest - """ - rg = self.registration_handler - return rg.get_or_register_3pid_guest(medium, address, inviter_user_id) - def _user_joined_room(self, target, room_id): """Implements RoomMemberHandler._user_joined_room """ diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index da501f38c04e05d6293a637cb4b42e3901a61952..fc873a3ba651544d73f910b6b2cf3be8f56693ad 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -20,7 +20,6 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.handlers.room_member import RoomMemberHandler from synapse.replication.http.membership import ( - ReplicationRegister3PIDGuestRestServlet as Repl3PID, ReplicationRemoteJoinRestServlet as ReplRemoteJoin, ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite, ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft, @@ -33,7 +32,6 @@ class RoomMemberWorkerHandler(RoomMemberHandler): def __init__(self, hs): super(RoomMemberWorkerHandler, self).__init__(hs) - self._get_register_3pid_client = Repl3PID.make_client(hs) self._remote_join_client = ReplRemoteJoin.make_client(hs) self._remote_reject_client = ReplRejectInvite.make_client(hs) self._notify_change_client = ReplJoinedLeft.make_client(hs) @@ -80,13 +78,3 @@ class RoomMemberWorkerHandler(RoomMemberHandler): return self._notify_change_client( user_id=target.to_string(), room_id=room_id, change="left" ) - - def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id): - """Implements RoomMemberHandler.get_or_register_3pid_guest - """ - return self._get_register_3pid_client( - requester=requester, - medium=medium, - address=address, - inviter_user_id=inviter_user_id, - ) diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ce6929cfeebd598e2b531fcf1680d67985f214 --- /dev/null +++ b/synapse/handlers/saml_handler.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +import attr +import saml2 +from saml2.client import Saml2Client + +from synapse.api.errors import SynapseError +from synapse.http.servlet import parse_string +from synapse.rest.client.v1.login import SSOAuthHandler + +logger = logging.getLogger(__name__) + + +class SamlHandler: + def __init__(self, hs): + self._saml_client = Saml2Client(hs.config.saml2_sp_config) + self._sso_auth_handler = SSOAuthHandler(hs) + + # a map from saml session id to Saml2SessionData object + self._outstanding_requests_dict = {} + + self._clock = hs.get_clock() + self._saml2_session_lifetime = hs.config.saml2_session_lifetime + + def handle_redirect_request(self, client_redirect_url): + """Handle an incoming request to /login/sso/redirect + + Args: + client_redirect_url (bytes): the URL that we should redirect the + client to when everything is done + + Returns: + bytes: URL to redirect to + """ + reqid, info = self._saml_client.prepare_for_authenticate( + relay_state=client_redirect_url + ) + + now = self._clock.time_msec() + self._outstanding_requests_dict[reqid] = Saml2SessionData(creation_time=now) + + for key, value in info["headers"]: + if key == "Location": + return value + + # this shouldn't happen! + raise Exception("prepare_for_authenticate didn't return a Location header") + + def handle_saml_response(self, request): + """Handle an incoming request to /_matrix/saml2/authn_response + + Args: + request (SynapseRequest): the incoming request from the browser. We'll + respond to it with a redirect. + + Returns: + Deferred[none]: Completes once we have handled the request. + """ + resp_bytes = parse_string(request, "SAMLResponse", required=True) + relay_state = parse_string(request, "RelayState", required=True) + + # expire outstanding sessions before parse_authn_request_response checks + # the dict. + self.expire_sessions() + + try: + saml2_auth = self._saml_client.parse_authn_request_response( + resp_bytes, + saml2.BINDING_HTTP_POST, + outstanding=self._outstanding_requests_dict, + ) + except Exception as e: + logger.warning("Exception parsing SAML2 response: %s", e) + raise SynapseError(400, "Unable to parse SAML2 response: %s" % (e,)) + + if saml2_auth.not_signed: + logger.warning("SAML2 response was not signed") + raise SynapseError(400, "SAML2 response was not signed") + + if "uid" not in saml2_auth.ava: + logger.warning("SAML2 response lacks a 'uid' attestation") + raise SynapseError(400, "uid not in SAML2 response") + + self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None) + + username = saml2_auth.ava["uid"][0] + displayName = saml2_auth.ava.get("displayName", [None])[0] + + return self._sso_auth_handler.on_successful_auth( + username, request, relay_state, user_display_name=displayName + ) + + def expire_sessions(self): + expire_before = self._clock.time_msec() - self._saml2_session_lifetime + to_expire = set() + for reqid, data in self._outstanding_requests_dict.items(): + if data.creation_time < expire_before: + to_expire.add(reqid) + for reqid in to_expire: + logger.debug("Expiring session id %s", reqid) + del self._outstanding_requests_dict[reqid] + + +@attr.s +class Saml2SessionData: + """Data we track about SAML2 sessions""" + + # time the session was created, in milliseconds + creation_time = attr.ib() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a3f550554f05d53d4f53b54a5f6a7aff5d38b7c2..cd1ac0a27ab76c87881d5429f784188d9be3b4b7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -25,6 +25,7 @@ from prometheus_client import Counter from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.logging.context import LoggingContext from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter @@ -33,7 +34,6 @@ from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.lrucache import LruCache from synapse.util.caches.response_cache import ResponseCache -from synapse.util.logcontext import LoggingContext from synapse.util.metrics import Measure, measure_func from synapse.visibility import filter_events_for_client diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index f8062c867133c4e25df3c356acdd2174d5a03fe6..c3e0c8fc7e37bd1aadb95908e9d0d4db5eaa5fe2 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -19,9 +19,9 @@ from collections import namedtuple from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError +from synapse.logging.context import run_in_background from synapse.types import UserID, get_domain_from_id from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer diff --git a/synapse/http/client.py b/synapse/http/client.py index 9bc7035c8dae5361f6d95109e6b6e58419d9d196..45d50109529903795a847a33027d9c014453613c 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -45,9 +45,9 @@ from synapse.http import ( cancelled_to_request_timed_out_error, redact_uri, ) +from synapse.logging.context import make_deferred_yieldable from synapse.util.async_helpers import timeout_deferred from synapse.util.caches import CACHE_SIZE_FACTOR -from synapse.util.logcontext import make_deferred_yieldable logger = logging.getLogger(__name__) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 414cde0777762371129f7b8418667a1a00d62cca..054c321a2015743e5d29ac351981d47db9fc3bb7 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -30,9 +30,9 @@ from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list +from synapse.logging.context import make_deferred_yieldable from synapse.util import Clock from synapse.util.caches.ttlcache import TTLCache -from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure # period to cache .well-known results for by default diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index 1f22f78a755f2d25f030ec5dfa3c5e2c83baf430..ecc88f9b9667f07093970f4c2a72c399b8a99595 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -25,7 +25,7 @@ from twisted.internet.error import ConnectError from twisted.names import client, dns from twisted.names.error import DNSNameError, DomainError -from synapse.util.logcontext import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable logger = logging.getLogger(__name__) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 5ef8bb60a3ab07601bcb093ba5874e5f8d62c3a3..e60334547e96e58f90e7808d2eb6896d0c45c987 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -36,6 +36,7 @@ from twisted.internet.task import _EPSILON, Cooperator from twisted.web._newclient import ResponseDone from twisted.web.http_headers import Headers +import synapse.logging.opentracing as opentracing import synapse.metrics import synapse.util.retryutils from synapse.api.errors import ( @@ -48,8 +49,8 @@ from synapse.api.errors import ( from synapse.http import QuieterFileBodyProducer from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent +from synapse.logging.context import make_deferred_yieldable from synapse.util.async_helpers import timeout_deferred -from synapse.util.logcontext import make_deferred_yieldable from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -339,9 +340,25 @@ class MatrixFederationHttpClient(object): else: query_bytes = b"" - headers_dict = {b"User-Agent": [self.version_string_bytes]} + # Retreive current span + scope = opentracing.start_active_span( + "outgoing-federation-request", + tags={ + opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT, + opentracing.tags.PEER_ADDRESS: request.destination, + opentracing.tags.HTTP_METHOD: request.method, + opentracing.tags.HTTP_URL: request.path, + }, + finish_on_close=True, + ) + + # Inject the span into the headers + headers_dict = {} + opentracing.inject_active_span_byte_dict(headers_dict, request.destination) - with limiter: + headers_dict[b"User-Agent"] = [self.version_string_bytes] + + with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: @@ -419,6 +436,10 @@ class MatrixFederationHttpClient(object): response.phrase.decode("ascii", errors="replace"), ) + opentracing.set_tag( + opentracing.tags.HTTP_STATUS_CODE, response.code + ) + if 200 <= response.code < 300: pass else: @@ -499,8 +520,7 @@ class MatrixFederationHttpClient(object): _flatten_response_never_received(e), ) raise - - defer.returnValue(response) + defer.returnValue(response) def build_auth_headers( self, destination, method, url_bytes, content=None, destination_is=None diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 62045a918bf5319178b13f7e186fdabde18c5a00..46af27c8f65149a161edfcbc1970c85922a02c7c 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -19,8 +19,8 @@ import threading from prometheus_client.core import Counter, Histogram +from synapse.logging.context import LoggingContext from synapse.metrics import LaterGauge -from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/http/server.py b/synapse/http/server.py index f067c163c13194ded8f0a35ae3afe21ad24563f0..72a3d67eb69d2631d8dfa0fc7bd03f6efb60f24c 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -39,8 +39,8 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) +from synapse.logging.context import preserve_fn from synapse.util.caches import intern_dict -from synapse.util.logcontext import preserve_fn logger = logging.getLogger(__name__) @@ -65,8 +65,8 @@ def wrap_json_request_handler(h): The handler method must have a signature of "handle_foo(self, request)", where "request" must be a SynapseRequest. - The handler must return a deferred. If the deferred succeeds we assume that - a response has been sent. If the deferred fails with a SynapseError we use + The handler must return a deferred or a coroutine. If the deferred succeeds + we assume that a response has been sent. If the deferred fails with a SynapseError we use it to send a JSON response with the appropriate HTTP reponse code. If the deferred fails with any other type of error we send a 500 reponse. """ @@ -353,16 +353,22 @@ class DirectServeResource(resource.Resource): """ Render the request, using an asynchronous render handler if it exists. """ - render_callback_name = "_async_render_" + request.method.decode("ascii") + async_render_callback_name = "_async_render_" + request.method.decode("ascii") - if hasattr(self, render_callback_name): - # Call the handler - callback = getattr(self, render_callback_name) - defer.ensureDeferred(callback(request)) + # Try and get the async renderer + callback = getattr(self, async_render_callback_name, None) - return NOT_DONE_YET - else: - super().render(request) + # No async renderer for this request method. + if not callback: + return super().render(request) + + resp = callback(request) + + # If it's a coroutine, turn it into a Deferred + if isinstance(resp, types.CoroutineType): + defer.ensureDeferred(resp) + + return NOT_DONE_YET def _options_handler(request): diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index cd8415acd5ab168e19e18de940670208c9b9e0f0..889038ff25858026df43edd99283494eda2f1300 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -20,6 +20,7 @@ import logging from canonicaljson import json from synapse.api.errors import Codes, SynapseError +from synapse.logging.opentracing import trace_servlet logger = logging.getLogger(__name__) @@ -290,7 +291,11 @@ class RestServlet(object): for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"): if hasattr(self, "on_%s" % (method,)): method_handler = getattr(self, "on_%s" % (method,)) - http_server.register_paths(method, patterns, method_handler) + http_server.register_paths( + method, + patterns, + trace_servlet(self.__class__.__name__, method_handler), + ) else: raise NotImplementedError("RestServlet must register something.") diff --git a/synapse/http/site.py b/synapse/http/site.py index 93f679ea4845fde71dcfd56eb81180ea51168220..df5274c17766c5e6b7b67f9ca9e0240fad320324 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -19,7 +19,7 @@ from twisted.web.server import Request, Site from synapse.http import redact_uri from synapse.http.request_metrics import RequestMetrics, requests_counter -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.logging.context import LoggingContext, PreserveLoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/logging/__init__.py b/synapse/logging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/synapse/logging/context.py b/synapse/logging/context.py new file mode 100644 index 0000000000000000000000000000000000000000..b456c31f70715ab10f148702454411132b383d12 --- /dev/null +++ b/synapse/logging/context.py @@ -0,0 +1,697 @@ +# Copyright 2014-2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Thread-local-alike tracking of log contexts within synapse + +This module provides objects and utilities for tracking contexts through +synapse code, so that log lines can include a request identifier, and so that +CPU and database activity can be accounted for against the request that caused +them. + +See doc/log_contexts.rst for details on how this works. +""" + +import logging +import threading +import types + +from twisted.internet import defer, threads + +logger = logging.getLogger(__name__) + +try: + import resource + + # Python doesn't ship with a definition of RUSAGE_THREAD but it's defined + # to be 1 on linux so we hard code it. + RUSAGE_THREAD = 1 + + # If the system doesn't support RUSAGE_THREAD then this should throw an + # exception. + resource.getrusage(RUSAGE_THREAD) + + def get_thread_resource_usage(): + return resource.getrusage(RUSAGE_THREAD) + + +except Exception: + # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we + # won't track resource usage by returning None. + def get_thread_resource_usage(): + return None + + +# get an id for the current thread. +# +# threading.get_ident doesn't actually return an OS-level tid, and annoyingly, +# on Linux it actually returns the same value either side of a fork() call. However +# we only fork in one place, so it's not worth the hoop-jumping to get a real tid. +# +get_thread_id = threading.get_ident + + +class ContextResourceUsage(object): + """Object for tracking the resources used by a log context + + Attributes: + ru_utime (float): user CPU time (in seconds) + ru_stime (float): system CPU time (in seconds) + db_txn_count (int): number of database transactions done + db_sched_duration_sec (float): amount of time spent waiting for a + database connection + db_txn_duration_sec (float): amount of time spent doing database + transactions (excluding scheduling time) + evt_db_fetch_count (int): number of events requested from the database + """ + + __slots__ = [ + "ru_stime", + "ru_utime", + "db_txn_count", + "db_txn_duration_sec", + "db_sched_duration_sec", + "evt_db_fetch_count", + ] + + def __init__(self, copy_from=None): + """Create a new ContextResourceUsage + + Args: + copy_from (ContextResourceUsage|None): if not None, an object to + copy stats from + """ + if copy_from is None: + self.reset() + else: + self.ru_utime = copy_from.ru_utime + self.ru_stime = copy_from.ru_stime + self.db_txn_count = copy_from.db_txn_count + + self.db_txn_duration_sec = copy_from.db_txn_duration_sec + self.db_sched_duration_sec = copy_from.db_sched_duration_sec + self.evt_db_fetch_count = copy_from.evt_db_fetch_count + + def copy(self): + return ContextResourceUsage(copy_from=self) + + def reset(self): + self.ru_stime = 0.0 + self.ru_utime = 0.0 + self.db_txn_count = 0 + + self.db_txn_duration_sec = 0 + self.db_sched_duration_sec = 0 + self.evt_db_fetch_count = 0 + + def __repr__(self): + return ( + "<ContextResourceUsage ru_stime='%r', ru_utime='%r', " + "db_txn_count='%r', db_txn_duration_sec='%r', " + "db_sched_duration_sec='%r', evt_db_fetch_count='%r'>" + ) % ( + self.ru_stime, + self.ru_utime, + self.db_txn_count, + self.db_txn_duration_sec, + self.db_sched_duration_sec, + self.evt_db_fetch_count, + ) + + def __iadd__(self, other): + """Add another ContextResourceUsage's stats to this one's. + + Args: + other (ContextResourceUsage): the other resource usage object + """ + self.ru_utime += other.ru_utime + self.ru_stime += other.ru_stime + self.db_txn_count += other.db_txn_count + self.db_txn_duration_sec += other.db_txn_duration_sec + self.db_sched_duration_sec += other.db_sched_duration_sec + self.evt_db_fetch_count += other.evt_db_fetch_count + return self + + def __isub__(self, other): + self.ru_utime -= other.ru_utime + self.ru_stime -= other.ru_stime + self.db_txn_count -= other.db_txn_count + self.db_txn_duration_sec -= other.db_txn_duration_sec + self.db_sched_duration_sec -= other.db_sched_duration_sec + self.evt_db_fetch_count -= other.evt_db_fetch_count + return self + + def __add__(self, other): + res = ContextResourceUsage(copy_from=self) + res += other + return res + + def __sub__(self, other): + res = ContextResourceUsage(copy_from=self) + res -= other + return res + + +class LoggingContext(object): + """Additional context for log formatting. Contexts are scoped within a + "with" block. + + If a parent is given when creating a new context, then: + - logging fields are copied from the parent to the new context on entry + - when the new context exits, the cpu usage stats are copied from the + child to the parent + + Args: + name (str): Name for the context for debugging. + parent_context (LoggingContext|None): The parent of the new context + """ + + __slots__ = [ + "previous_context", + "name", + "parent_context", + "_resource_usage", + "usage_start", + "main_thread", + "alive", + "request", + "tag", + "scope", + ] + + thread_local = threading.local() + + class Sentinel(object): + """Sentinel to represent the root context""" + + __slots__ = [] + + def __str__(self): + return "sentinel" + + def copy_to(self, record): + pass + + def start(self): + pass + + def stop(self): + pass + + def add_database_transaction(self, duration_sec): + pass + + def add_database_scheduled(self, sched_sec): + pass + + def record_event_fetch(self, event_count): + pass + + def __nonzero__(self): + return False + + __bool__ = __nonzero__ # python3 + + sentinel = Sentinel() + + def __init__(self, name=None, parent_context=None, request=None): + self.previous_context = LoggingContext.current_context() + self.name = name + + # track the resources used by this context so far + self._resource_usage = ContextResourceUsage() + + # If alive has the thread resource usage when the logcontext last + # became active. + self.usage_start = None + + self.main_thread = get_thread_id() + self.request = None + self.tag = "" + self.alive = True + self.scope = None + + self.parent_context = parent_context + + if self.parent_context is not None: + self.parent_context.copy_to(self) + + if request is not None: + # the request param overrides the request from the parent context + self.request = request + + def __str__(self): + if self.request: + return str(self.request) + return "%s@%x" % (self.name, id(self)) + + @classmethod + def current_context(cls): + """Get the current logging context from thread local storage + + Returns: + LoggingContext: the current logging context + """ + return getattr(cls.thread_local, "current_context", cls.sentinel) + + @classmethod + def set_current_context(cls, context): + """Set the current logging context in thread local storage + Args: + context(LoggingContext): The context to activate. + Returns: + The context that was previously active + """ + current = cls.current_context() + + if current is not context: + current.stop() + cls.thread_local.current_context = context + context.start() + return current + + def __enter__(self): + """Enters this logging context into thread local storage""" + old_context = self.set_current_context(self) + if self.previous_context != old_context: + logger.warn( + "Expected previous context %r, found %r", + self.previous_context, + old_context, + ) + self.alive = True + + return self + + def __exit__(self, type, value, traceback): + """Restore the logging context in thread local storage to the state it + was before this context was entered. + Returns: + None to avoid suppressing any exceptions that were thrown. + """ + current = self.set_current_context(self.previous_context) + if current is not self: + if current is self.sentinel: + logger.warning("Expected logging context %s was lost", self) + else: + logger.warning( + "Expected logging context %s but found %s", self, current + ) + self.previous_context = None + self.alive = False + + # if we have a parent, pass our CPU usage stats on + if self.parent_context is not None and hasattr( + self.parent_context, "_resource_usage" + ): + self.parent_context._resource_usage += self._resource_usage + + # reset them in case we get entered again + self._resource_usage.reset() + + def copy_to(self, record): + """Copy logging fields from this context to a log record or + another LoggingContext + """ + + # we track the current request + record.request = self.request + + # we also track the current scope: + record.scope = self.scope + + def start(self): + if get_thread_id() != self.main_thread: + logger.warning("Started logcontext %s on different thread", self) + return + + # If we haven't already started record the thread resource usage so + # far + if not self.usage_start: + self.usage_start = get_thread_resource_usage() + + def stop(self): + if get_thread_id() != self.main_thread: + logger.warning("Stopped logcontext %s on different thread", self) + return + + # When we stop, let's record the cpu used since we started + if not self.usage_start: + logger.warning("Called stop on logcontext %s without calling start", self) + return + + utime_delta, stime_delta = self._get_cputime() + self._resource_usage.ru_utime += utime_delta + self._resource_usage.ru_stime += stime_delta + + self.usage_start = None + + def get_resource_usage(self): + """Get resources used by this logcontext so far. + + Returns: + ContextResourceUsage: a *copy* of the object tracking resource + usage so far + """ + # we always return a copy, for consistency + res = self._resource_usage.copy() + + # If we are on the correct thread and we're currently running then we + # can include resource usage so far. + is_main_thread = get_thread_id() == self.main_thread + if self.alive and self.usage_start and is_main_thread: + utime_delta, stime_delta = self._get_cputime() + res.ru_utime += utime_delta + res.ru_stime += stime_delta + + return res + + def _get_cputime(self): + """Get the cpu usage time so far + + Returns: Tuple[float, float]: seconds in user mode, seconds in system mode + """ + current = get_thread_resource_usage() + + utime_delta = current.ru_utime - self.usage_start.ru_utime + stime_delta = current.ru_stime - self.usage_start.ru_stime + + # sanity check + if utime_delta < 0: + logger.error( + "utime went backwards! %f < %f", + current.ru_utime, + self.usage_start.ru_utime, + ) + utime_delta = 0 + + if stime_delta < 0: + logger.error( + "stime went backwards! %f < %f", + current.ru_stime, + self.usage_start.ru_stime, + ) + stime_delta = 0 + + return utime_delta, stime_delta + + def add_database_transaction(self, duration_sec): + if duration_sec < 0: + raise ValueError("DB txn time can only be non-negative") + self._resource_usage.db_txn_count += 1 + self._resource_usage.db_txn_duration_sec += duration_sec + + def add_database_scheduled(self, sched_sec): + """Record a use of the database pool + + Args: + sched_sec (float): number of seconds it took us to get a + connection + """ + if sched_sec < 0: + raise ValueError("DB scheduling time can only be non-negative") + self._resource_usage.db_sched_duration_sec += sched_sec + + def record_event_fetch(self, event_count): + """Record a number of events being fetched from the db + + Args: + event_count (int): number of events being fetched + """ + self._resource_usage.evt_db_fetch_count += event_count + + +class LoggingContextFilter(logging.Filter): + """Logging filter that adds values from the current logging context to each + record. + Args: + **defaults: Default values to avoid formatters complaining about + missing fields + """ + + def __init__(self, **defaults): + self.defaults = defaults + + def filter(self, record): + """Add each fields from the logging contexts to the record. + Returns: + True to include the record in the log output. + """ + context = LoggingContext.current_context() + for key, value in self.defaults.items(): + setattr(record, key, value) + + # context should never be None, but if it somehow ends up being, then + # we end up in a death spiral of infinite loops, so let's check, for + # robustness' sake. + if context is not None: + context.copy_to(record) + + return True + + +class PreserveLoggingContext(object): + """Captures the current logging context and restores it when the scope is + exited. Used to restore the context after a function using + @defer.inlineCallbacks is resumed by a callback from the reactor.""" + + __slots__ = ["current_context", "new_context", "has_parent"] + + def __init__(self, new_context=None): + if new_context is None: + new_context = LoggingContext.sentinel + self.new_context = new_context + + def __enter__(self): + """Captures the current logging context""" + self.current_context = LoggingContext.set_current_context(self.new_context) + + if self.current_context: + self.has_parent = self.current_context.previous_context is not None + if not self.current_context.alive: + logger.debug("Entering dead context: %s", self.current_context) + + def __exit__(self, type, value, traceback): + """Restores the current logging context""" + context = LoggingContext.set_current_context(self.current_context) + + if context != self.new_context: + if context is LoggingContext.sentinel: + logger.warning("Expected logging context %s was lost", self.new_context) + else: + logger.warning( + "Expected logging context %s but found %s", + self.new_context, + context, + ) + + if self.current_context is not LoggingContext.sentinel: + if not self.current_context.alive: + logger.debug("Restoring dead context: %s", self.current_context) + + +def nested_logging_context(suffix, parent_context=None): + """Creates a new logging context as a child of another. + + The nested logging context will have a 'request' made up of the parent context's + request, plus the given suffix. + + CPU/db usage stats will be added to the parent context's on exit. + + Normal usage looks like: + + with nested_logging_context(suffix): + # ... do stuff + + Args: + suffix (str): suffix to add to the parent context's 'request'. + parent_context (LoggingContext|None): parent context. Will use the current context + if None. + + Returns: + LoggingContext: new logging context. + """ + if parent_context is None: + parent_context = LoggingContext.current_context() + return LoggingContext( + parent_context=parent_context, request=parent_context.request + "-" + suffix + ) + + +def preserve_fn(f): + """Function decorator which wraps the function with run_in_background""" + + def g(*args, **kwargs): + return run_in_background(f, *args, **kwargs) + + return g + + +def run_in_background(f, *args, **kwargs): + """Calls a function, ensuring that the current context is restored after + return from the function, and that the sentinel context is set once the + deferred returned by the function completes. + + Useful for wrapping functions that return a deferred or coroutine, which you don't + yield or await on (for instance because you want to pass it to + deferred.gatherResults()). + + Note that if you completely discard the result, you should make sure that + `f` doesn't raise any deferred exceptions, otherwise a scary-looking + CRITICAL error about an unhandled error will be logged without much + indication about where it came from. + """ + current = LoggingContext.current_context() + try: + res = f(*args, **kwargs) + except: # noqa: E722 + # the assumption here is that the caller doesn't want to be disturbed + # by synchronous exceptions, so let's turn them into Failures. + return defer.fail() + + if isinstance(res, types.CoroutineType): + res = defer.ensureDeferred(res) + + if not isinstance(res, defer.Deferred): + return res + + if res.called and not res.paused: + # The function should have maintained the logcontext, so we can + # optimise out the messing about + return res + + # The function may have reset the context before returning, so + # we need to restore it now. + ctx = LoggingContext.set_current_context(current) + + # The original context will be restored when the deferred + # completes, but there is nothing waiting for it, so it will + # get leaked into the reactor or some other function which + # wasn't expecting it. We therefore need to reset the context + # here. + # + # (If this feels asymmetric, consider it this way: we are + # effectively forking a new thread of execution. We are + # probably currently within a ``with LoggingContext()`` block, + # which is supposed to have a single entry and exit point. But + # by spawning off another deferred, we are effectively + # adding a new exit point.) + res.addBoth(_set_context_cb, ctx) + return res + + +def make_deferred_yieldable(deferred): + """Given a deferred, make it follow the Synapse logcontext rules: + + If the deferred has completed (or is not actually a Deferred), essentially + does nothing (just returns another completed deferred with the + result/failure). + + If the deferred has not yet completed, resets the logcontext before + returning a deferred. Then, when the deferred completes, restores the + current logcontext before running callbacks/errbacks. + + (This is more-or-less the opposite operation to run_in_background.) + """ + if not isinstance(deferred, defer.Deferred): + return deferred + + if deferred.called and not deferred.paused: + # it looks like this deferred is ready to run any callbacks we give it + # immediately. We may as well optimise out the logcontext faffery. + return deferred + + # ok, we can't be sure that a yield won't block, so let's reset the + # logcontext, and add a callback to the deferred to restore it. + prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) + deferred.addBoth(_set_context_cb, prev_context) + return deferred + + +def _set_context_cb(result, context): + """A callback function which just sets the logging context""" + LoggingContext.set_current_context(context) + return result + + +def defer_to_thread(reactor, f, *args, **kwargs): + """ + Calls the function `f` using a thread from the reactor's default threadpool and + returns the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked, and whose threadpool we should use for the + function. + + Normally this will be hs.get_reactor(). + + f (callable): The function to call. + + args: positional arguments to pass to f. + + kwargs: keyword arguments to pass to f. + + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. + """ + return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) + + +def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): + """ + A wrapper for twisted.internet.threads.deferToThreadpool, which handles + logcontexts correctly. + + Calls the function `f` using a thread from the given threadpool and returns + the result as a Deferred. + + Creates a new logcontext for `f`, which is created as a child of the current + logcontext (so its CPU usage metrics will get attributed to the current + logcontext). `f` should preserve the logcontext it is given. + + The result deferred follows the Synapse logcontext rules: you should `yield` + on it. + + Args: + reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread + the Deferred will be invoked. Normally this will be hs.get_reactor(). + + threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for + running `f`. Normally this will be hs.get_reactor().getThreadPool(). + + f (callable): The function to call. + + args: positional arguments to pass to f. + + kwargs: keyword arguments to pass to f. + + Returns: + Deferred: A Deferred which fires a callback with the result of `f`, or an + errback if `f` throws an exception. + """ + logcontext = LoggingContext.current_context() + + def g(): + with LoggingContext(parent_context=logcontext): + return f(*args, **kwargs) + + return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g)) diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf570c756d8b984d11d7c44da46e9c66b37d1d0 --- /dev/null +++ b/synapse/logging/formatter.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import traceback + +from six import StringIO + + +class LogFormatter(logging.Formatter): + """Log formatter which gives more detail for exceptions + + This is the same as the standard log formatter, except that when logging + exceptions [typically via log.foo("msg", exc_info=1)], it prints the + sequence that led up to the point at which the exception was caught. + (Normally only stack frames between the point the exception was raised and + where it was caught are logged). + """ + + def __init__(self, *args, **kwargs): + super(LogFormatter, self).__init__(*args, **kwargs) + + def formatException(self, ei): + sio = StringIO() + (typ, val, tb) = ei + + # log the stack above the exception capture point if possible, but + # check that we actually have an f_back attribute to work around + # https://twistedmatrix.com/trac/ticket/9305 + + if tb and hasattr(tb.tb_frame, "f_back"): + sio.write("Capture point (most recent call last):\n") + traceback.print_stack(tb.tb_frame.f_back, None, sio) + + traceback.print_exception(typ, val, tb, None, sio) + s = sio.getvalue() + sio.close() + if s[-1:] == "\n": + s = s[:-1] + return s diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ceea2a642a6a16cf82c96ed8b4f6206b178b43 --- /dev/null +++ b/synapse/logging/opentracing.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C.d +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.import opentracing + + +# NOTE +# This is a small wrapper around opentracing because opentracing is not currently +# packaged downstream (specifically debian). Since opentracing instrumentation is +# fairly invasive it was awkward to make it optional. As a result we opted to encapsulate +# all opentracing state in these methods which effectively noop if opentracing is +# not present. We should strongly consider encouraging the downstream distributers +# to package opentracing and making opentracing a full dependency. In order to facilitate +# this move the methods have work very similarly to opentracing's and it should only +# be a matter of few regexes to move over to opentracing's access patterns proper. + +try: + import opentracing +except ImportError: + opentracing = None +try: + from jaeger_client import Config as JaegerConfig + from synapse.logging.scopecontextmanager import LogContextScopeManager +except ImportError: + JaegerConfig = None + LogContextScopeManager = None + +import contextlib +import logging +import re +from functools import wraps + +from twisted.internet import defer + +logger = logging.getLogger(__name__) + + +class _DumTagNames(object): + """wrapper of opentracings tags. We need to have them if we + want to reference them without opentracing around. Clearly they + should never actually show up in a trace. `set_tags` overwrites + these with the correct ones.""" + + INVALID_TAG = "invalid-tag" + COMPONENT = INVALID_TAG + DATABASE_INSTANCE = INVALID_TAG + DATABASE_STATEMENT = INVALID_TAG + DATABASE_TYPE = INVALID_TAG + DATABASE_USER = INVALID_TAG + ERROR = INVALID_TAG + HTTP_METHOD = INVALID_TAG + HTTP_STATUS_CODE = INVALID_TAG + HTTP_URL = INVALID_TAG + MESSAGE_BUS_DESTINATION = INVALID_TAG + PEER_ADDRESS = INVALID_TAG + PEER_HOSTNAME = INVALID_TAG + PEER_HOST_IPV4 = INVALID_TAG + PEER_HOST_IPV6 = INVALID_TAG + PEER_PORT = INVALID_TAG + PEER_SERVICE = INVALID_TAG + SAMPLING_PRIORITY = INVALID_TAG + SERVICE = INVALID_TAG + SPAN_KIND = INVALID_TAG + SPAN_KIND_CONSUMER = INVALID_TAG + SPAN_KIND_PRODUCER = INVALID_TAG + SPAN_KIND_RPC_CLIENT = INVALID_TAG + SPAN_KIND_RPC_SERVER = INVALID_TAG + + +def only_if_tracing(func): + """Executes the function only if we're tracing. Otherwise return. + Assumes the function wrapped may return None""" + + @wraps(func) + def _only_if_tracing_inner(*args, **kwargs): + if opentracing: + return func(*args, **kwargs) + else: + return + + return _only_if_tracing_inner + + +# Block everything by default +_homeserver_whitelist = None + +tags = _DumTagNames + + +def init_tracer(config): + """Set the whitelists and initialise the JaegerClient tracer + + Args: + config (Config) + The config used by the homeserver. Here it's used to set the service + name to the homeserver's. + """ + global opentracing + if not config.tracer_config.get("tracer_enabled", False): + # We don't have a tracer + opentracing = None + return + + if not opentracing: + logger.error( + "The server has been configure to use opentracing but opentracing is not installed." + ) + raise ModuleNotFoundError("opentracing") + + if not JaegerConfig: + logger.error( + "The server has been configure to use opentracing but opentracing is not installed." + ) + + # Include the worker name + name = config.worker_name if config.worker_name else "master" + + set_homeserver_whitelist(config.tracer_config["homeserver_whitelist"]) + jaeger_config = JaegerConfig( + config={"sampler": {"type": "const", "param": 1}, "logging": True}, + service_name="{} {}".format(config.server_name, name), + scope_manager=LogContextScopeManager(config), + ) + jaeger_config.initialize_tracer() + + # Set up tags to be opentracing's tags + global tags + tags = opentracing.tags + + +@contextlib.contextmanager +def _noop_context_manager(*args, **kwargs): + """Does absolutely nothing really well. Can be entered and exited arbitrarily. + Good substitute for an opentracing scope.""" + yield + + +# Could use kwargs but I want these to be explicit +def start_active_span( + operation_name, + child_of=None, + references=None, + tags=None, + start_time=None, + ignore_active_span=False, + finish_on_close=True, +): + """Starts an active opentracing span. Note, the scope doesn't become active + until it has been entered, however, the span starts from the time this + message is called. + Args: + See opentracing.tracer + Returns: + scope (Scope) or noop_context_manager + """ + if opentracing is None: + return _noop_context_manager() + else: + # We need to enter the scope here for the logcontext to become active + return opentracing.tracer.start_active_span( + operation_name, + child_of=child_of, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + finish_on_close=finish_on_close, + ) + + +@only_if_tracing +def close_active_span(): + """Closes the active span. This will close it's logcontext if the context + was made for the span""" + opentracing.tracer.scope_manager.active.__exit__(None, None, None) + + +@only_if_tracing +def set_tag(key, value): + """Set's a tag on the active span""" + opentracing.tracer.active_span.set_tag(key, value) + + +@only_if_tracing +def log_kv(key_values, timestamp=None): + """Log to the active span""" + opentracing.tracer.active_span.log_kv(key_values, timestamp) + + +# Note: we don't have a get baggage items because we're trying to hide all +# scope and span state from synapse. I think this method may also be useless +# as a result +@only_if_tracing +def set_baggage_item(key, value): + """Attach baggage to the active span""" + opentracing.tracer.active_span.set_baggage_item(key, value) + + +@only_if_tracing +def set_operation_name(operation_name): + """Sets the operation name of the active span""" + opentracing.tracer.active_span.set_operation_name(operation_name) + + +@only_if_tracing +def set_homeserver_whitelist(homeserver_whitelist): + """Sets the whitelist + + Args: + homeserver_whitelist (iterable of strings): regex of whitelisted homeservers + """ + global _homeserver_whitelist + if homeserver_whitelist: + # Makes a single regex which accepts all passed in regexes in the list + _homeserver_whitelist = re.compile( + "({})".format(")|(".join(homeserver_whitelist)) + ) + + +@only_if_tracing +def whitelisted_homeserver(destination): + """Checks if a destination matches the whitelist + Args: + destination (String)""" + global _homeserver_whitelist + if _homeserver_whitelist: + return _homeserver_whitelist.match(destination) + return False + + +def start_active_span_from_context( + headers, + operation_name, + references=None, + tags=None, + start_time=None, + ignore_active_span=False, + finish_on_close=True, +): + """ + Extracts a span context from Twisted Headers. + args: + headers (twisted.web.http_headers.Headers) + returns: + span_context (opentracing.span.SpanContext) + """ + # Twisted encodes the values as lists whereas opentracing doesn't. + # So, we take the first item in the list. + # Also, twisted uses byte arrays while opentracing expects strings. + if opentracing is None: + return _noop_context_manager() + + header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()} + context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) + + return opentracing.tracer.start_active_span( + operation_name, + child_of=context, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + finish_on_close=finish_on_close, + ) + + +@only_if_tracing +def inject_active_span_twisted_headers(headers, destination): + """ + Injects a span context into twisted headers inplace + + Args: + headers (twisted.web.http_headers.Headers) + span (opentracing.Span) + + Returns: + Inplace modification of headers + + Note: + The headers set by the tracer are custom to the tracer implementation which + should be unique enough that they don't interfere with any headers set by + synapse or twisted. If we're still using jaeger these headers would be those + here: + https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py + """ + + if not whitelisted_homeserver(destination): + return + + span = opentracing.tracer.active_span + carrier = {} + opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + + for key, value in carrier.items(): + headers.addRawHeaders(key, value) + + +@only_if_tracing +def inject_active_span_byte_dict(headers, destination): + """ + Injects a span context into a dict where the headers are encoded as byte + strings + + Args: + headers (dict) + span (opentracing.Span) + + Returns: + Inplace modification of headers + + Note: + The headers set by the tracer are custom to the tracer implementation which + should be unique enough that they don't interfere with any headers set by + synapse or twisted. If we're still using jaeger these headers would be those + here: + https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py + """ + if not whitelisted_homeserver(destination): + return + + span = opentracing.tracer.active_span + + carrier = {} + opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + + for key, value in carrier.items(): + headers[key.encode()] = [value.encode()] + + +def trace_servlet(servlet_name, func): + """Decorator which traces a serlet. It starts a span with some servlet specific + tags such as the servlet_name and request information""" + + @wraps(func) + @defer.inlineCallbacks + def _trace_servlet_inner(request, *args, **kwargs): + with start_active_span_from_context( + request.requestHeaders, + "incoming-client-request", + tags={ + "request_id": request.get_request_id(), + tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, + tags.HTTP_METHOD: request.get_method(), + tags.HTTP_URL: request.get_redacted_uri(), + tags.PEER_HOST_IPV6: request.getClientIP(), + "servlet_name": servlet_name, + }, + ): + result = yield defer.maybeDeferred(func, request, *args, **kwargs) + defer.returnValue(result) + + return _trace_servlet_inner diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py new file mode 100644 index 0000000000000000000000000000000000000000..91e14462f30863617f764bbe82b39d9b1f2f6672 --- /dev/null +++ b/synapse/logging/scopecontextmanager.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.import logging + +import logging + +from opentracing import Scope, ScopeManager + +import twisted + +from synapse.logging.context import LoggingContext, nested_logging_context + +logger = logging.getLogger(__name__) + + +class LogContextScopeManager(ScopeManager): + """ + The LogContextScopeManager tracks the active scope in opentracing + by using the log contexts which are native to synapse. This is so + that the basic opentracing api can be used across twisted defereds. + (I would love to break logcontexts and this into an OS package. but + let's wait for twisted's contexts to be released.) + """ + + def __init__(self, config): + # Set the whitelists + logger.info(config.tracer_config) + self._homeserver_whitelist = config.tracer_config["homeserver_whitelist"] + + @property + def active(self): + """ + Returns the currently active Scope which can be used to access the + currently active Scope.span. + If there is a non-null Scope, its wrapped Span + becomes an implicit parent of any newly-created Span at + Tracer.start_active_span() time. + + Return: + (Scope) : the Scope that is active, or None if not + available. + """ + ctx = LoggingContext.current_context() + if ctx is LoggingContext.sentinel: + return None + else: + return ctx.scope + + def activate(self, span, finish_on_close): + """ + Makes a Span active. + Args + span (Span): the span that should become active. + finish_on_close (Boolean): whether Span should be automatically + finished when Scope.close() is called. + + Returns: + Scope to control the end of the active period for + *span*. It is a programming error to neglect to call + Scope.close() on the returned instance. + """ + + enter_logcontext = False + ctx = LoggingContext.current_context() + + if ctx is LoggingContext.sentinel: + # We don't want this scope to affect. + logger.error("Tried to activate scope outside of loggingcontext") + return Scope(None, span) + elif ctx.scope is not None: + # We want the logging scope to look exactly the same so we give it + # a blank suffix + ctx = nested_logging_context("") + enter_logcontext = True + + scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) + ctx.scope = scope + return scope + + +class _LogContextScope(Scope): + """ + A custom opentracing scope. The only significant difference is that it will + close the log context it's related to if the logcontext was created specifically + for this scope. + """ + + def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close): + """ + Args: + manager (LogContextScopeManager): + the manager that is responsible for this scope. + span (Span): + the opentracing span which this scope represents the local + lifetime for. + logcontext (LogContext): + the logcontext to which this scope is attached. + enter_logcontext (Boolean): + if True the logcontext will be entered and exited when the scope + is entered and exited respectively + finish_on_close (Boolean): + if True finish the span when the scope is closed + """ + super(_LogContextScope, self).__init__(manager, span) + self.logcontext = logcontext + self._finish_on_close = finish_on_close + self._enter_logcontext = enter_logcontext + + def __enter__(self): + if self._enter_logcontext: + self.logcontext.__enter__() + + def __exit__(self, type, value, traceback): + if type == twisted.internet.defer._DefGen_Return: + super(_LogContextScope, self).__exit__(None, None, None) + else: + super(_LogContextScope, self).__exit__(type, value, traceback) + if self._enter_logcontext: + self.logcontext.__exit__(type, value, traceback) + else: # the logcontext existed before the creation of the scope + self.logcontext.scope = None + + def close(self): + if self.manager.active is not self: + logger.error("Tried to close a none active scope!") + return + + if self._finish_on_close: + self.span.finish() diff --git a/synapse/util/logutils.py b/synapse/logging/utils.py similarity index 100% rename from synapse/util/logutils.py rename to synapse/logging/utils.py diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 167e2c068a45dd65f99090ad994eb1fea8fe96c4..edd6b42db3ef00db7b3355249d4d987cd4b8334c 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -22,7 +22,7 @@ from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily from twisted.internet import defer -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.logging.context import LoggingContext, PreserveLoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index bf43ca09be7c60a3ea5b2985164e42fa4fa3d75c..7bb020cb45be247b2b01b2cbdfe9c079b2fd1ec5 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -12,10 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.types import UserID +logger = logging.getLogger(__name__) + class ModuleApi(object): """A proxy object that gets passed to password auth providers so they @@ -76,8 +80,31 @@ class ModuleApi(object): @defer.inlineCallbacks def register(self, localpart, displayname=None, emails=[]): - """Registers a new user with given localpart and optional - displayname, emails. + """Registers a new user with given localpart and optional displayname, emails. + + Also returns an access token for the new user. + + Deprecated: avoid this, as it generates a new device with no way to + return that device to the user. Prefer separate calls to register_user and + register_device. + + Args: + localpart (str): The localpart of the new user. + displayname (str|None): The displayname of the new user. + emails (List[str]): Emails to bind to the new user. + + Returns: + Deferred[tuple[str, str]]: a 2-tuple of (user_id, access_token) + """ + logger.warning( + "Using deprecated ModuleApi.register which creates a dummy user device." + ) + user_id = yield self.register_user(localpart, displayname, emails) + _, access_token = yield self.register_device(user_id) + defer.returnValue((user_id, access_token)) + + def register_user(self, localpart, displayname=None, emails=[]): + """Registers a new user with given localpart and optional displayname, emails. Args: localpart (str): The localpart of the new user. @@ -85,15 +112,30 @@ class ModuleApi(object): emails (List[str]): Emails to bind to the new user. Returns: - Deferred: a 2-tuple of (user_id, access_token) + Deferred[str]: user_id """ - # Register the user - reg = self.hs.get_registration_handler() - user_id, access_token = yield reg.register( + return self.hs.get_registration_handler().register_user( localpart=localpart, default_display_name=displayname, bind_emails=emails ) - defer.returnValue((user_id, access_token)) + def register_device(self, user_id, device_id=None, initial_display_name=None): + """Register a device for a user and generate an access token. + + Args: + user_id (str): full canonical @user:id + device_id (str|None): The device ID to check, or None to generate + a new one. + initial_display_name (str|None): An optional display name for the + device. + + Returns: + defer.Deferred[tuple[str, str]]: Tuple of device ID and access token + """ + return self.hs.get_registration_handler().register_device( + user_id=user_id, + device_id=device_id, + initial_display_name=initial_display_name, + ) @defer.inlineCallbacks def invalidate_access_token(self, access_token): diff --git a/synapse/notifier.py b/synapse/notifier.py index d398078eedbb19d1f9374c62637bca8609de1918..918ef64897be8e3cb4313196908adc1dbdeadac2 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -23,12 +23,12 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state +from synapse.logging.context import PreserveLoggingContext +from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import StreamToken from synapse.util.async_helpers import ObservableDeferred, timeout_deferred -from synapse.util.logcontext import PreserveLoggingContext -from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 96d087de226a8b5507f5978a4ba5df9270cd8c8f..134bf805eb8dee3f118a56c01ce45512f513c906 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -1,5 +1,6 @@ # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -248,6 +249,18 @@ BASE_APPEND_OVERRIDE_RULES = [ ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], }, + { + "rule_id": "global/override/.m.rule.reaction", + "conditions": [ + { + "kind": "event_match", + "key": "type", + "pattern": "m.reaction", + "_id": "_reaction", + } + ], + "actions": ["dont_notify"], + }, ] diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 809199fe8856c49c87f28f5ca33f527ccf903f2b..521c6e2cd7f8b2b7a26ac2624be4005fd811b7a4 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -29,6 +29,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.errors import StoreError +from synapse.logging.context import make_deferred_yieldable from synapse.push.presentable_names import ( calculate_room_name, descriptor_from_member_events, @@ -36,7 +37,6 @@ from synapse.push.presentable_names import ( ) from synapse.types import UserID from synapse.util.async_helpers import concurrently_execute -from synapse.util.logcontext import make_deferred_yieldable from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 6324c00ef184bea6207071484c49903cd90e8a05..e7618057be6d9fb66ce487d5f431da587e3ac416 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -95,6 +95,7 @@ CONDITIONAL_REQUIREMENTS = { "url_preview": ["lxml>=3.5.0"], "test": ["mock>=2.0", "parameterized"], "sentry": ["sentry-sdk>=0.7.2"], + "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"], "jwt": ["pyjwt>=1.6.4"], } diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 0a76a3762f1c8308248e9695dc97439c48203474..2d9cbbaefc2490a02f11417e61af237c28120578 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -156,70 +156,6 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): defer.returnValue((200, ret)) -class ReplicationRegister3PIDGuestRestServlet(ReplicationEndpoint): - """Gets/creates a guest account for given 3PID. - - Request format: - - POST /_synapse/replication/get_or_register_3pid_guest/ - - { - "requester": ..., - "medium": ..., - "address": ..., - "inviter_user_id": ... - } - """ - - NAME = "get_or_register_3pid_guest" - PATH_ARGS = () - - def __init__(self, hs): - super(ReplicationRegister3PIDGuestRestServlet, self).__init__(hs) - - self.registeration_handler = hs.get_registration_handler() - self.store = hs.get_datastore() - self.clock = hs.get_clock() - - @staticmethod - def _serialize_payload(requester, medium, address, inviter_user_id): - """ - Args: - requester(Requester) - medium (str) - address (str) - inviter_user_id (str): The user ID who is trying to invite the - 3PID - """ - return { - "requester": requester.serialize(), - "medium": medium, - "address": address, - "inviter_user_id": inviter_user_id, - } - - @defer.inlineCallbacks - def _handle_request(self, request): - content = parse_json_object_from_request(request) - - medium = content["medium"] - address = content["address"] - inviter_user_id = content["inviter_user_id"] - - requester = Requester.deserialize(self.store, content["requester"]) - - if requester.user: - request.authenticated_entity = requester.user.to_string() - - logger.info("get_or_register_3pid_guest: %r", content) - - ret = yield self.registeration_handler.get_or_register_3pid_guest( - medium, address, inviter_user_id - ) - - defer.returnValue((200, ret)) - - class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): """Notifies that a user has joined or left the room @@ -272,5 +208,4 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): def register_servlets(hs, http_server): ReplicationRemoteJoinRestServlet(hs).register(http_server) ReplicationRemoteRejectInviteRestServlet(hs).register(http_server) - ReplicationRegister3PIDGuestRestServlet(hs).register(http_server) ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index f81a0f1b8f548346ca02691ec34b1e5b5a914e18..2bf2173895eca6039b04ea266e943138e7c33b05 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -38,7 +38,6 @@ class ReplicationRegisterServlet(ReplicationEndpoint): @staticmethod def _serialize_payload( user_id, - token, password_hash, was_guest, make_guest, @@ -51,9 +50,6 @@ class ReplicationRegisterServlet(ReplicationEndpoint): """ Args: user_id (str): The desired user ID to register. - token (str): The desired access token to use for this user. If this - is not None, the given access token is associated with the user - id. password_hash (str|None): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. @@ -68,7 +64,6 @@ class ReplicationRegisterServlet(ReplicationEndpoint): address (str|None): the IP address used to perform the regitration. """ return { - "token": token, "password_hash": password_hash, "was_guest": was_guest, "make_guest": make_guest, @@ -85,7 +80,6 @@ class ReplicationRegisterServlet(ReplicationEndpoint): yield self.registration_handler.register_with_store( user_id=user_id, - token=content["token"], password_hash=content["password_hash"], was_guest=content["was_guest"], make_guest=content["make_guest"], diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 97efb835ad7bcd5ad7d154d897b25d70692e20e6..5ffdf2675df4b70887abfae79e8b783b70c5f47b 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -62,9 +62,9 @@ from twisted.internet import defer from twisted.protocols.basic import LineOnlyReceiver from twisted.python.failure import Failure +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.stringutils import random_string from .commands import ( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 9843a902c69ddd11c79d2960a11936f3a7c997c7..6888ae559002cb9e415eee39e38aebfb6295d639 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -219,11 +219,10 @@ class UserRegisterServlet(RestServlet): register = RegisterRestServlet(self.hs) - (user_id, _) = yield register.registration_handler.register( + user_id = yield register.registration_handler.register_user( localpart=body["username"].lower(), password=body["password"], admin=bool(admin), - generate_token=False, user_type=user_type, ) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 36404b797dfe5f264fe2630664e5c58417d9ff87..6da71dc46fff4254673d271cb9768d73aef14dfc 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -17,8 +17,8 @@ to ensure idempotency when performing PUTs using the REST API.""" import logging +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util.async_helpers import ObservableDeferred -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index dd0d38ea5c1228fe7218b0d05feb3fddc721ace6..57542c2b4b9cf144d47d8d1dd1562902d2c34935 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -18,7 +18,13 @@ import logging from twisted.internet import defer -from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientCredentialsError, + NotFoundError, + SynapseError, +) from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import RoomAlias @@ -97,7 +103,7 @@ class ClientDirectoryServer(RestServlet): room_alias.to_string(), ) defer.returnValue((200, {})) - except AuthError: + except InvalidClientCredentialsError: # fallback to default user behaviour if they aren't an AS pass diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index ede6bc8b1eaed6d3ba5ce147850709641d523e2c..0d05945f0aa6f90145f626f6b1237fd82ca5ba5e 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -86,6 +86,7 @@ class LoginRestServlet(RestServlet): self.jwt_enabled = hs.config.jwt_enabled self.jwt_secret = hs.config.jwt_secret self.jwt_algorithm = hs.config.jwt_algorithm + self.saml2_enabled = hs.config.saml2_enabled self.cas_enabled = hs.config.cas_enabled self.auth_handler = self.hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() @@ -97,6 +98,9 @@ class LoginRestServlet(RestServlet): flows = [] if self.jwt_enabled: flows.append({"type": LoginRestServlet.JWT_TYPE}) + if self.saml2_enabled: + flows.append({"type": LoginRestServlet.SSO_TYPE}) + flows.append({"type": LoginRestServlet.TOKEN_TYPE}) if self.cas_enabled: flows.append({"type": LoginRestServlet.SSO_TYPE}) @@ -279,19 +283,7 @@ class LoginRestServlet(RestServlet): yield auth_handler.validate_short_term_login_token_and_get_user_id(token) ) - device_id = login_submission.get("device_id") - initial_display_name = login_submission.get("initial_device_display_name") - device_id, access_token = yield self.registration_handler.register_device( - user_id, device_id, initial_display_name - ) - - result = { - "user_id": user_id, # may have changed - "access_token": access_token, - "home_server": self.hs.hostname, - "device_id": device_id, - } - + result = yield self._register_device_with_callback(user_id, login_submission) defer.returnValue(result) @defer.inlineCallbacks @@ -320,61 +312,61 @@ class LoginRestServlet(RestServlet): user_id = UserID(user, self.hs.hostname).to_string() - auth_handler = self.auth_handler - registered_user_id = yield auth_handler.check_user_exists(user_id) - if registered_user_id: - device_id = login_submission.get("device_id") - initial_display_name = login_submission.get("initial_device_display_name") - device_id, access_token = yield self.registration_handler.register_device( - registered_user_id, device_id, initial_display_name + registered_user_id = yield self.auth_handler.check_user_exists(user_id) + if not registered_user_id: + registered_user_id = yield self.registration_handler.register_user( + localpart=user ) - result = { - "user_id": registered_user_id, - "access_token": access_token, - "home_server": self.hs.hostname, - } - else: - user_id, access_token = ( - yield self.registration_handler.register(localpart=user) - ) + result = yield self._register_device_with_callback( + registered_user_id, login_submission + ) + defer.returnValue(result) - device_id = login_submission.get("device_id") - initial_display_name = login_submission.get("initial_device_display_name") - device_id, access_token = yield self.registration_handler.register_device( - registered_user_id, device_id, initial_display_name - ) - result = { - "user_id": user_id, # may have changed - "access_token": access_token, - "home_server": self.hs.hostname, - } +class BaseSSORedirectServlet(RestServlet): + """Common base class for /login/sso/redirect impls""" - defer.returnValue(result) + PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True) + def on_GET(self, request): + args = request.args + if b"redirectUrl" not in args: + return 400, "Redirect URL not specified for SSO auth" + client_redirect_url = args[b"redirectUrl"][0] + sso_url = self.get_sso_url(client_redirect_url) + request.redirect(sso_url) + finish_request(request) -class CasRedirectServlet(RestServlet): - PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True) + def get_sso_url(self, client_redirect_url): + """Get the URL to redirect to, to perform SSO auth + + Args: + client_redirect_url (bytes): the URL that we should redirect the + client to when everything is done + + Returns: + bytes: URL to redirect to + """ + # to be implemented by subclasses + raise NotImplementedError() + +class CasRedirectServlet(BaseSSORedirectServlet): def __init__(self, hs): super(CasRedirectServlet, self).__init__() self.cas_server_url = hs.config.cas_server_url.encode("ascii") self.cas_service_url = hs.config.cas_service_url.encode("ascii") - def on_GET(self, request): - args = request.args - if b"redirectUrl" not in args: - return (400, "Redirect URL not specified for CAS auth") + def get_sso_url(self, client_redirect_url): client_redirect_url_param = urllib.parse.urlencode( - {b"redirectUrl": args[b"redirectUrl"][0]} + {b"redirectUrl": client_redirect_url} ).encode("ascii") hs_redirect_url = self.cas_service_url + b"/_matrix/client/r0/login/cas/ticket" service_param = urllib.parse.urlencode( {b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param)} ).encode("ascii") - request.redirect(b"%s/login?%s" % (self.cas_server_url, service_param)) - finish_request(request) + return b"%s/login?%s" % (self.cas_server_url, service_param) class CasTicketServlet(RestServlet): @@ -457,6 +449,16 @@ class CasTicketServlet(RestServlet): return user, attributes +class SAMLRedirectServlet(BaseSSORedirectServlet): + PATTERNS = client_patterns("/login/sso/redirect", v1=True) + + def __init__(self, hs): + self._saml_handler = hs.get_saml_handler() + + def get_sso_url(self, client_redirect_url): + return self._saml_handler.handle_redirect_request(client_redirect_url) + + class SSOAuthHandler(object): """ Utility class for Resources and Servlets which handle the response from a SSO @@ -501,12 +503,8 @@ class SSOAuthHandler(object): user_id = UserID(localpart, self._hostname).to_string() registered_user_id = yield self._auth_handler.check_user_exists(user_id) if not registered_user_id: - registered_user_id, _ = ( - yield self._registration_handler.register( - localpart=localpart, - generate_token=False, - default_display_name=user_display_name, - ) + registered_user_id = yield self._registration_handler.register_user( + localpart=localpart, default_display_name=user_display_name ) login_token = self._macaroon_gen.generate_short_term_login_token( @@ -532,3 +530,5 @@ def register_servlets(hs, http_server): if hs.config.cas_enabled: CasRedirectServlet(hs).register(http_server) CasTicketServlet(hs).register(http_server) + elif hs.config.saml2_enabled: + SAMLRedirectServlet(hs).register(http_server) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index cca7e45ddbec33de847c06c501d07e7739335241..7709c2d705c2753af30b47ece4b5fae85d9c8c58 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -24,7 +24,12 @@ from canonicaljson import json from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import AuthError, Codes, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientCredentialsError, + SynapseError, +) from synapse.api.filtering import Filter from synapse.events.utils import format_event_for_client_v2 from synapse.http.servlet import ( @@ -307,7 +312,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): try: yield self.auth.get_user_by_req(request, allow_guest=True) - except AuthError as e: + except InvalidClientCredentialsError as e: # Option to allow servers to require auth when accessing # /publicRooms via CS API. This is especially helpful in private # federations. diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5c120e4dd5d8b70ba19edeb536b5d60bd8670bb5..f327999e59903e04d7ffc7e1623ef2f2f774a816 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -464,11 +464,10 @@ class RegisterRestServlet(RestServlet): Codes.THREEPID_IN_USE, ) - (registered_user_id, _) = yield self.registration_handler.register( + registered_user_id = yield self.registration_handler.register_user( localpart=desired_username, password=new_password, guest_access_token=guest_access_token, - generate_token=False, threepid=threepid, address=client_addr, ) @@ -542,8 +541,8 @@ class RegisterRestServlet(RestServlet): if not compare_digest(want_mac, got_mac): raise SynapseError(403, "HMAC incorrect") - (user_id, _) = yield self.registration_handler.register( - localpart=username, password=password, generate_token=False + user_id = yield self.registration_handler.register_user( + localpart=username, password=password ) result = yield self._create_registration_details(user_id, body) @@ -577,8 +576,8 @@ class RegisterRestServlet(RestServlet): def _do_guest_registration(self, params, address=None): if not self.hs.config.allow_guest_access: raise SynapseError(403, "Guest access is disabled") - user_id, _ = yield self.registration_handler.register( - generate_token=False, make_guest=True, address=address + user_id = yield self.registration_handler.register_user( + make_guest=True, address=address ) # we don't allow guests to specify their own device_id, because diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 8e362782cc3c2e0147a1a2becc1f49ff144a038b..7ce485b4712471ae25792a24fe673579e68a1e74 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -145,9 +145,9 @@ class RelationPaginationServlet(RestServlet): room_id, requester.user.to_string() ) - # This checks that a) the event exists and b) the user is allowed to - # view it. - yield self.event_handler.get_event(requester.user, room_id, parent_id) + # This gets the original event and checks that a) the event exists and + # b) the user is allowed to view it. + event = yield self.event_handler.get_event(requester.user, room_id, parent_id) limit = parse_integer(request, "limit", default=5) from_token = parse_string(request, "from") @@ -173,10 +173,22 @@ class RelationPaginationServlet(RestServlet): ) now = self.clock.time_msec() - events = yield self._event_serializer.serialize_events(events, now) + # We set bundle_aggregations to False when retrieving the original + # event because we want the content before relations were applied to + # it. + original_event = yield self._event_serializer.serialize_event( + event, now, bundle_aggregations=False + ) + # Similarly, we don't allow relations to be applied to relations, so we + # return the original relations without any aggregations on top of them + # here. + events = yield self._event_serializer.serialize_events( + events, now, bundle_aggregations=False + ) return_value = result.to_dict() return_value["chunk"] = events + return_value["original_event"] = original_event defer.returnValue((200, return_value)) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 3318638d3e69150ed4e619e2ed77c8ee492b65c5..5fefee4dde383c5867d890cceb1a6efde0eced7c 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -25,7 +25,7 @@ from twisted.protocols.basic import FileSender from synapse.api.errors import Codes, SynapseError, cs_error from synapse.http.server import finish_request, respond_with_json -from synapse.util import logcontext +from synapse.logging.context import make_deferred_yieldable from synapse.util.stringutils import is_ascii logger = logging.getLogger(__name__) @@ -75,9 +75,7 @@ def respond_with_file(request, media_type, file_path, file_size=None, upload_nam add_file_headers(request, media_type, file_size, upload_name) with open(file_path, "rb") as f: - yield logcontext.make_deferred_yieldable( - FileSender().beginFileTransfer(f, request) - ) + yield make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) finish_request(request) else: diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index df3d985a385525cfcba146910bdde7d5e1b6b5d4..65afffbb4230f68f85ac054772e8a357be7d5d83 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -33,8 +33,8 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) +from synapse.logging.context import defer_to_thread from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util import logcontext from synapse.util.async_helpers import Linearizer from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import random_string @@ -463,7 +463,7 @@ class MediaRepository(object): ) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, @@ -511,7 +511,7 @@ class MediaRepository(object): ) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, @@ -596,7 +596,7 @@ class MediaRepository(object): return if thumbnailer.transpose_method is not None: - m_width, m_height = yield logcontext.defer_to_thread( + m_width, m_height = yield defer_to_thread( self.hs.get_reactor(), thumbnailer.transpose ) @@ -616,11 +616,11 @@ class MediaRepository(object): for (t_width, t_height, t_type), t_method in iteritems(thumbnails): # Generate the thumbnail if t_method == "crop": - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type ) elif t_method == "scale": - t_byte_source = yield logcontext.defer_to_thread( + t_byte_source = yield defer_to_thread( self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type ) else: diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index eff86836fb8a6868cb75af7b9d713598421f36f8..25e5ac2848181ae33c95c2816f957990f5747372 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -24,9 +24,8 @@ import six from twisted.internet import defer from twisted.protocols.basic import FileSender -from synapse.util import logcontext +from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util.file_consumer import BackgroundFileConsumer -from synapse.util.logcontext import make_deferred_yieldable from ._base import Responder @@ -65,7 +64,7 @@ class MediaStorage(object): with self.store_into_file(file_info) as (f, fname, finish_cb): # Write to the main repository - yield logcontext.defer_to_thread( + yield defer_to_thread( self.hs.get_reactor(), _write_file_synchronously, source, f ) yield finish_cb() diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 0337b64dc2b81b76d0ce96c903d42253795b5757..5871737bfdfbbd8a59c2d3de282b3a1883cd3bce 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -42,11 +42,11 @@ from synapse.http.server import ( wrap_json_request_handler, ) from synapse.http.servlet import parse_integer, parse_string +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.media.v1._base import get_filename_from_headers from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background from synapse.util.stringutils import random_string from ._base import FileInfo @@ -95,6 +95,7 @@ class PreviewUrlResource(DirectServeResource): ) def render_OPTIONS(self, request): + request.setHeader(b"Allow", b"OPTIONS, GET") return respond_with_json(request, 200, {}, send_cors=True) @wrap_json_request_handler diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 359b45ebfc4f24c3049efbb83ae28a32d448de00..37687ea7f4ecad268806ad44db94821e1bebc9a7 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -20,8 +20,7 @@ import shutil from twisted.internet import defer from synapse.config._base import Config -from synapse.util import logcontext -from synapse.util.logcontext import run_in_background +from synapse.logging.context import defer_to_thread, run_in_background from .media_storage import FileResponder @@ -68,7 +67,7 @@ class StorageProviderWrapper(StorageProvider): backend (StorageProvider) store_local (bool): Whether to store new local files or not. store_synchronous (bool): Whether to wait for file to be successfully - uploaded, or todo the upload in the backgroud. + uploaded, or todo the upload in the background. store_remote (bool): Whether remote media should be uploaded """ @@ -125,7 +124,7 @@ class FileStorageProviderBackend(StorageProvider): if not os.path.exists(dirname): os.makedirs(dirname) - return logcontext.defer_to_thread( + return defer_to_thread( self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname ) diff --git a/synapse/rest/saml2/response_resource.py b/synapse/rest/saml2/response_resource.py index 939c87306c479e53448e8e94fbf295806f0a4805..69ecc5e4b499b8d67b810e311f3ad6bf90c914b8 100644 --- a/synapse/rest/saml2/response_resource.py +++ b/synapse/rest/saml2/response_resource.py @@ -13,17 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging -import saml2 -from saml2.client import Saml2Client - -from synapse.api.errors import CodeMessageException from synapse.http.server import DirectServeResource, wrap_html_request_handler -from synapse.http.servlet import parse_string -from synapse.rest.client.v1.login import SSOAuthHandler - -logger = logging.getLogger(__name__) class SAML2ResponseResource(DirectServeResource): @@ -33,32 +24,8 @@ class SAML2ResponseResource(DirectServeResource): def __init__(self, hs): super().__init__() - - self._saml_client = Saml2Client(hs.config.saml2_sp_config) - self._sso_auth_handler = SSOAuthHandler(hs) + self._saml_handler = hs.get_saml_handler() @wrap_html_request_handler async def _async_render_POST(self, request): - resp_bytes = parse_string(request, "SAMLResponse", required=True) - relay_state = parse_string(request, "RelayState", required=True) - - try: - saml2_auth = self._saml_client.parse_authn_request_response( - resp_bytes, saml2.BINDING_HTTP_POST - ) - except Exception as e: - logger.warning("Exception parsing SAML2 response", exc_info=1) - raise CodeMessageException(400, "Unable to parse SAML2 response: %s" % (e,)) - - if saml2_auth.not_signed: - raise CodeMessageException(400, "SAML2 response was not signed") - - if "uid" not in saml2_auth.ava: - raise CodeMessageException(400, "uid not in SAML2 response") - - username = saml2_auth.ava["uid"][0] - - displayName = saml2_auth.ava.get("displayName", [None])[0] - return self._sso_auth_handler.on_successful_auth( - username, request, relay_state, user_display_name=displayName - ) + return await self._saml_handler.handle_saml_response(request) diff --git a/synapse/server.py b/synapse/server.py index a9592c396c745e8d6a9aed50e810914fb029aa65..9e28dba2b1c6c66be26414f9f062ae231c99f5cd 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -194,6 +194,7 @@ class HomeServer(object): "sendmail", "registration_handler", "account_validity_handler", + "saml_handler", "event_client_serializer", ] @@ -524,6 +525,11 @@ class HomeServer(object): def build_account_validity_handler(self): return AccountValidityHandler(self) + def build_saml_handler(self): + from synapse.handlers.saml_handler import SamlHandler + + return SamlHandler(self) + def build_event_client_serializer(self): return EventClientSerializer(self) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 1b454a56a1966c48de62da72055710f45596de3e..9f708fa20546e244117557188cfd988cb0a6c850 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -28,11 +28,11 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions from synapse.events.snapshot import EventContext +from synapse.logging.utils import log_function from synapse.state import v1, v2 from synapse.util.async_helpers import Linearizer from synapse.util.caches import get_cache_factor_for from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.logutils import log_function from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 29589853c62f24f8207f51e309816a7f4d02d807..2f940dbae6a2162c847583bd3b8a7e0162c50c28 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -30,12 +30,12 @@ from prometheus_client import Histogram from twisted.internet import defer from synapse.api.errors import StoreError +from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import get_domain_from_id from synapse.util import batch_iter from synapse.util.caches.descriptors import Cache -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.stringutils import exception_to_unicode # import a function which will return a monotonic time, in seconds diff --git a/synapse/storage/events.py b/synapse/storage/events.py index fefba39ea1eca81facc3fa8d9384e4a215bcc660..b486ca50eb336a31107813b5d97f49077833c42f 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -33,6 +33,8 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 +from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.logging.utils import log_function from synapse.metrics import BucketCollector from synapse.metrics.background_process_metrics import run_as_background_process from synapse.state import StateResolutionStore @@ -45,8 +47,6 @@ from synapse.util import batch_iter from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder -from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable -from synapse.util.logutils import log_function from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -253,7 +253,14 @@ class EventsStore( ) # Read the extrems every 60 minutes - hs.get_clock().looping_call(self._read_forward_extremities, 60 * 60 * 1000) + def read_forward_extremities(): + # run as a background process to make sure that the database transactions + # have a logcontext to report to + return run_as_background_process( + "read_forward_extremities", self._read_forward_extremities + ) + + hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000) @defer.inlineCallbacks def _read_forward_extremities(self): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 6d680d405ac17f44ef86fc45c5c74f152ad7a917..874d0a56bcbb06f7b5711dd60371121915ca68d8 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -29,14 +29,14 @@ from synapse.api.room_versions import EventFormatVersions from synapse.events import FrozenEvent, event_type_from_format_version # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 from synapse.events.utils import prune_event -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import get_domain_from_id -from synapse.util.logcontext import ( +from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, make_deferred_yieldable, run_in_background, ) +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import get_domain_from_id from synapse.util.metrics import Measure from ._base import SQLBaseStore @@ -327,7 +327,7 @@ class EventsWorkerStore(SQLBaseStore): Args: events (list(str)): list of event_ids to fetch - allow_rejected (bool): Whether to teturn events that were rejected + allow_rejected (bool): Whether to return events that were rejected update_metrics (bool): Whether to update the cache hit ratio metrics Returns: diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 983ce132910ac8d8c3355ec6cc859d642d211bdb..8b2c2a97ab46336eb58c83372603eb3fd59b64c1 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -25,6 +25,7 @@ from twisted.internet import defer from synapse.api.constants import UserTypes from synapse.api.errors import Codes, StoreError, ThreepidValidationError +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore from synapse.types import UserID @@ -89,7 +90,8 @@ class RegistrationWorkerStore(SQLBaseStore): token (str): The access token of a user. Returns: defer.Deferred: None, if the token did not match, otherwise dict - including the keys `name`, `is_guest`, `device_id`, `token_id`. + including the keys `name`, `is_guest`, `device_id`, `token_id`, + `valid_until_ms`. """ return self.runInteraction( "get_user_by_access_token", self._query_for_auth, token @@ -283,7 +285,7 @@ class RegistrationWorkerStore(SQLBaseStore): def _query_for_auth(self, txn, token): sql = ( "SELECT users.name, users.is_guest, access_tokens.id as token_id," - " access_tokens.device_id" + " access_tokens.device_id, access_tokens.valid_until_ms" " FROM users" " INNER JOIN access_tokens on users.name = access_tokens.user_id" " WHERE token = ?" @@ -431,19 +433,6 @@ class RegistrationWorkerStore(SQLBaseStore): ) ) - @defer.inlineCallbacks - def get_3pid_guest_access_token(self, medium, address): - ret = yield self._simple_select_one( - "threepid_guest_access_tokens", - {"medium": medium, "address": address}, - ["guest_access_token"], - True, - "get_3pid_guest_access_token", - ) - if ret: - defer.returnValue(ret["guest_access_token"]) - defer.returnValue(None) - @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address, require_verified=False): """Returns user id from threepid @@ -615,23 +604,29 @@ class RegistrationStore( ) self.register_background_update_handler( - "users_set_deactivated_flag", self._backgroud_update_set_deactivated_flag + "users_set_deactivated_flag", self._background_update_set_deactivated_flag ) # Create a background job for culling expired 3PID validity tokens - hs.get_clock().looping_call( - self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS - ) + def start_cull(): + # run as a background process to make sure that the database transactions + # have a logcontext to report to + return run_as_background_process( + "cull_expired_threepid_validation_tokens", + self.cull_expired_threepid_validation_tokens, + ) + + hs.get_clock().looping_call(start_cull, THIRTY_MINUTES_IN_MS) @defer.inlineCallbacks - def _backgroud_update_set_deactivated_flag(self, progress, batch_size): + def _background_update_set_deactivated_flag(self, progress, batch_size): """Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1 for each of them. """ last_user = progress.get("user_id", "") - def _backgroud_update_set_deactivated_flag_txn(txn): + def _background_update_set_deactivated_flag_txn(txn): txn.execute( """ SELECT @@ -676,7 +671,7 @@ class RegistrationStore( return False end = yield self.runInteraction( - "users_set_deactivated_flag", _backgroud_update_set_deactivated_flag_txn + "users_set_deactivated_flag", _background_update_set_deactivated_flag_txn ) if end: @@ -685,14 +680,16 @@ class RegistrationStore( defer.returnValue(batch_size) @defer.inlineCallbacks - def add_access_token_to_user(self, user_id, token, device_id=None): + def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms): """Adds an access token for the given user. Args: user_id (str): The user ID. token (str): The new access token to add. device_id (str): ID of the device to associate with the access - token + token + valid_until_ms (int|None): when the token is valid until. None for + no expiry. Raises: StoreError if there was a problem adding this. """ @@ -700,14 +697,19 @@ class RegistrationStore( yield self._simple_insert( "access_tokens", - {"id": next_id, "user_id": user_id, "token": token, "device_id": device_id}, + { + "id": next_id, + "user_id": user_id, + "token": token, + "device_id": device_id, + "valid_until_ms": valid_until_ms, + }, desc="add_access_token_to_user", ) - def register( + def register_user( self, user_id, - token=None, password_hash=None, was_guest=False, make_guest=False, @@ -720,9 +722,6 @@ class RegistrationStore( Args: user_id (str): The desired user ID to register. - token (str): The desired access token to use for this user. If this - is not None, the given access token is associated with the user - id. password_hash (str): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. @@ -739,10 +738,9 @@ class RegistrationStore( StoreError if the user_id could not be registered. """ return self.runInteraction( - "register", - self._register, + "register_user", + self._register_user, user_id, - token, password_hash, was_guest, make_guest, @@ -752,11 +750,10 @@ class RegistrationStore( user_type, ) - def _register( + def _register_user( self, txn, user_id, - token, password_hash, was_guest, make_guest, @@ -769,8 +766,6 @@ class RegistrationStore( now = int(self.clock.time()) - next_id = self._access_tokens_id_gen.get_next() - try: if was_guest: # Ensure that the guest user actually exists @@ -818,14 +813,6 @@ class RegistrationStore( if self._account_validity.enabled: self.set_expiration_date_for_user_txn(txn, user_id) - if token: - # it's possible for this to get a conflict, but only for a single user - # since tokens are namespaced based on their user ID - txn.execute( - "INSERT INTO access_tokens(id, user_id, token)" " VALUES (?,?,?)", - (next_id, user_id, token), - ) - if create_profile_with_displayname: # set a default displayname serverside to avoid ugly race # between auto-joins and clients trying to set displaynames @@ -972,40 +959,6 @@ class RegistrationStore( defer.returnValue(res if res else False) - @defer.inlineCallbacks - def save_or_get_3pid_guest_access_token( - self, medium, address, access_token, inviter_user_id - ): - """ - Gets the 3pid's guest access token if exists, else saves access_token. - - Args: - medium (str): Medium of the 3pid. Must be "email". - address (str): 3pid address. - access_token (str): The access token to persist if none is - already persisted. - inviter_user_id (str): User ID of the inviter. - - Returns: - deferred str: Whichever access token is persisted at the end - of this function call. - """ - - def insert(txn): - txn.execute( - "INSERT INTO threepid_guest_access_tokens " - "(medium, address, guest_access_token, first_inviter) " - "VALUES (?, ?, ?, ?)", - (medium, address, access_token, inviter_user_id), - ) - - try: - yield self.runInteraction("save_3pid_guest_access_token", insert) - defer.returnValue(access_token) - except self.database_engine.module.IntegrityError: - ret = yield self.get_3pid_guest_access_token(medium, address) - defer.returnValue(ret) - def add_user_pending_deactivation(self, user_id): """ Adds a user to the table of users who need to be parted from all the rooms they're diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 1b01934c19e8d129ef50ee07238e09a28b76cce0..9954bc094f8d9ad4b302d3f3bd76b17a3d432b25 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -60,7 +60,7 @@ class PaginationChunk(object): class RelationPaginationToken(object): """Pagination token for relation pagination API. - As the results are order by topological ordering, we can use the + As the results are in topological order, we can use the `topological_ordering` and `stream_ordering` fields of the events at the boundaries of the chunk as pagination tokens. diff --git a/synapse/storage/schema/delta/55/access_token_expiry.sql b/synapse/storage/schema/delta/55/access_token_expiry.sql new file mode 100644 index 0000000000000000000000000000000000000000..4590604bfd8ad5e286f174aba5dc36fb1e10ece6 --- /dev/null +++ b/synapse/storage/schema/delta/55/access_token_expiry.sql @@ -0,0 +1,18 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- when this access token can be used until, in ms since the epoch. NULL means the token +-- never expires. +ALTER TABLE access_tokens ADD COLUMN valid_until_ms BIGINT; diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index d9482a384339f3d77c1aaaf260aed9ef677d4c47..a0465484df5eb9f9f6841ed373b24171c20dae26 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -41,12 +41,12 @@ from six.moves import range from twisted.internet import defer +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage._base import SQLBaseStore from synapse.storage.engines import PostgresEngine from synapse.storage.events_worker import EventsWorkerStore from synapse.types import RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -833,7 +833,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: Deferred[tuple[list[_EventDictReturn], str]]: Returns the results as a list of _EventDictReturn and a token that points to the end - of the result set. + of the result set. If no events are returned then the end of the + stream has been reached (i.e. there are no events between + `from_token` and `to_token`), or `limit` is zero. """ assert int(limit) >= 0 @@ -905,15 +907,15 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): only those before direction(char): Either 'b' or 'f' to indicate whether we are paginating forwards or backwards from `from_key`. - limit (int): The maximum number of events to return. Zero or less - means no limit. + limit (int): The maximum number of events to return. event_filter (Filter|None): If provided filters the events to those that match the filter. Returns: - tuple[list[dict], str]: Returns the results as a list of dicts and - a token that points to the end of the result set. The dicts have - the keys "event_id", "topological_ordering" and "stream_orderign". + tuple[list[FrozenEvent], str]: Returns the results as a list of + events and a token that points to the end of the result set. If no + events are returned then the end of the stream has been reached + (i.e. there are no events between `from_key` and `to_key`). """ from_key = RoomStreamToken.parse(from_key) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index b1188f6bcb2841117f51e4ea5ed44c54c3d8ef4e..fd1861917805c69ecae3a6c4a87721eb8752c4cc 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -133,34 +133,6 @@ class TransactionStore(SQLBaseStore): desc="set_received_txn_response", ) - def prep_send_transaction(self, transaction_id, destination, origin_server_ts): - """Persists an outgoing transaction and calculates the values for the - previous transaction id list. - - This should be called before sending the transaction so that it has the - correct value for the `prev_ids` key. - - Args: - transaction_id (str) - destination (str) - origin_server_ts (int) - - Returns: - list: A list of previous transaction ids. - """ - return defer.succeed([]) - - def delivered_txn(self, transaction_id, destination, code, response_dict): - """Persists the response for an outgoing transaction. - - Args: - transaction_id (str) - destination (str) - code (int) - response_json (str) - """ - pass - @defer.inlineCallbacks def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index dcc747cac1fe9e1a7278c9cee541c603f5e688be..f506b2a695f6b9b5e256e1b43a67eea9b25b6ab9 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -21,7 +21,7 @@ import attr from twisted.internet import defer, task -from synapse.util.logcontext import PreserveLoggingContext +from synapse.logging import context logger = logging.getLogger(__name__) @@ -46,7 +46,7 @@ class Clock(object): @defer.inlineCallbacks def sleep(self, seconds): d = defer.Deferred() - with PreserveLoggingContext(): + with context.PreserveLoggingContext(): self._reactor.callLater(seconds, d.callback, seconds) res = yield d defer.returnValue(res) @@ -62,7 +62,10 @@ class Clock(object): def looping_call(self, f, msec): """Call a function repeatedly. - Waits `msec` initially before calling `f` for the first time. + Waits `msec` initially before calling `f` for the first time. + + Note that the function will be called with no logcontext, so if it is anything + other than trivial, you probably want to wrap it in run_as_background_process. Args: f(function): The function to call repeatedly. @@ -77,6 +80,9 @@ class Clock(object): def call_later(self, delay, callback, *args, **kwargs): """Call something later + Note that the function will be called with no logcontext, so if it is anything + other than trivial, you probably want to wrap it in run_as_background_process. + Args: delay(float): How long to wait in seconds. callback(function): Function to call @@ -85,10 +91,10 @@ class Clock(object): """ def wrapped_callback(*args, **kwargs): - with PreserveLoggingContext(): + with context.PreserveLoggingContext(): callback(*args, **kwargs) - with PreserveLoggingContext(): + with context.PreserveLoggingContext(): return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) def cancel_call_later(self, timer, ignore_errs=False): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 7757b8708ac1e08954636f69f19c425d070e8dc3..58a6b8764f10921e4d442d3fdfab68203a6d9389 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -23,13 +23,12 @@ from twisted.internet import defer from twisted.internet.defer import CancelledError from twisted.python import failure -from synapse.util import Clock, logcontext, unwrapFirstError - -from .logcontext import ( +from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, run_in_background, ) +from synapse.util import Clock, unwrapFirstError logger = logging.getLogger(__name__) @@ -153,7 +152,7 @@ def concurrently_execute(func, args, limit): except StopIteration: pass - return logcontext.make_deferred_yieldable( + return make_deferred_yieldable( defer.gatherResults( [run_in_background(_concurrently_execute_inner) for _ in range(limit)], consumeErrors=True, @@ -174,7 +173,7 @@ def yieldable_gather_results(func, iter, *args, **kwargs): Deferred[list]: Resolved when all functions have been invoked, or errors if one of the function calls fails. """ - return logcontext.make_deferred_yieldable( + return make_deferred_yieldable( defer.gatherResults( [run_in_background(func, item, *args, **kwargs) for item in iter], consumeErrors=True, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index d2f25063aaa8f290e3320ad5c8ef0d246c13e2d3..675db2f4483ca8f5766efa6520504e16d45b5bc3 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -24,7 +24,8 @@ from six import itervalues, string_types from twisted.internet import defer -from synapse.util import logcontext, unwrapFirstError +from synapse.logging.context import make_deferred_yieldable, preserve_fn +from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import get_cache_factor_for from synapse.util.caches.lrucache import LruCache @@ -388,7 +389,7 @@ class CacheDescriptor(_CacheDescriptorBase): except KeyError: ret = defer.maybeDeferred( - logcontext.preserve_fn(self.function_to_call), obj, *args, **kwargs + preserve_fn(self.function_to_call), obj, *args, **kwargs ) def onErr(f): @@ -408,7 +409,7 @@ class CacheDescriptor(_CacheDescriptorBase): observer = result_d.observe() if isinstance(observer, defer.Deferred): - return logcontext.make_deferred_yieldable(observer) + return make_deferred_yieldable(observer) else: return observer @@ -563,7 +564,7 @@ class CacheListDescriptor(_CacheDescriptorBase): cached_defers.append( defer.maybeDeferred( - logcontext.preserve_fn(self.function_to_call), **args_to_call + preserve_fn(self.function_to_call), **args_to_call ).addCallbacks(complete_all, errback) ) @@ -571,7 +572,7 @@ class CacheListDescriptor(_CacheDescriptorBase): d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( lambda _: results, unwrapFirstError ) - return logcontext.make_deferred_yieldable(d) + return make_deferred_yieldable(d) else: return results diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index cbe54d45dda3f493f5abc799e4842980a31a9b14..d6908e169d45435b0bd57b823932ba2b313d2244 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -16,9 +16,9 @@ import logging from twisted.internet import defer +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import register_cache -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) @@ -78,7 +78,7 @@ class ResponseCache(object): *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with - logcontext.run_in_background). + synapse.logging.context.run_in_background). Can return either a new Deferred (which also doesn't follow the synapse logcontext rules), or, if *deferred* was already complete, the actual diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 5a79db821c98d8e560df0d91ee9c7e49193bb045..45af8d3eeb03ca3a47d04440b5bd6865428b30c6 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -17,8 +17,8 @@ import logging from twisted.internet import defer +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.logcontext import make_deferred_yieldable, run_in_background logger = logging.getLogger(__name__) diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 629ed44149bd49e46776bfd06a2f8d40733d1391..8b17d1c8b8914112c5ac5ef7741b96df26b17bb5 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -17,7 +17,7 @@ from six.moves import queue from twisted.internet import threads -from synapse.util.logcontext import make_deferred_yieldable, run_in_background +from synapse.logging.context import make_deferred_yieldable, run_in_background class BackgroundFileConsumer(object): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 6b0d2deea040482eb6e5094e86d491c94037911d..40e5c10a493d351a127fe0479a2c263428282ad1 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -1,4 +1,4 @@ -# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,668 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" Thread-local-alike tracking of log contexts within synapse - -This module provides objects and utilities for tracking contexts through -synapse code, so that log lines can include a request identifier, and so that -CPU and database activity can be accounted for against the request that caused -them. - -See doc/log_contexts.rst for details on how this works. +""" +Backwards compatibility re-exports of ``synapse.logging.context`` functionality. """ -import logging -import threading - -from twisted.internet import defer, threads - -logger = logging.getLogger(__name__) - -try: - import resource - - # Python doesn't ship with a definition of RUSAGE_THREAD but it's defined - # to be 1 on linux so we hard code it. - RUSAGE_THREAD = 1 - - # If the system doesn't support RUSAGE_THREAD then this should throw an - # exception. - resource.getrusage(RUSAGE_THREAD) - - def get_thread_resource_usage(): - return resource.getrusage(RUSAGE_THREAD) - - -except Exception: - # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we - # won't track resource usage by returning None. - def get_thread_resource_usage(): - return None - - -class ContextResourceUsage(object): - """Object for tracking the resources used by a log context - - Attributes: - ru_utime (float): user CPU time (in seconds) - ru_stime (float): system CPU time (in seconds) - db_txn_count (int): number of database transactions done - db_sched_duration_sec (float): amount of time spent waiting for a - database connection - db_txn_duration_sec (float): amount of time spent doing database - transactions (excluding scheduling time) - evt_db_fetch_count (int): number of events requested from the database - """ - - __slots__ = [ - "ru_stime", - "ru_utime", - "db_txn_count", - "db_txn_duration_sec", - "db_sched_duration_sec", - "evt_db_fetch_count", - ] - - def __init__(self, copy_from=None): - """Create a new ContextResourceUsage - - Args: - copy_from (ContextResourceUsage|None): if not None, an object to - copy stats from - """ - if copy_from is None: - self.reset() - else: - self.ru_utime = copy_from.ru_utime - self.ru_stime = copy_from.ru_stime - self.db_txn_count = copy_from.db_txn_count - - self.db_txn_duration_sec = copy_from.db_txn_duration_sec - self.db_sched_duration_sec = copy_from.db_sched_duration_sec - self.evt_db_fetch_count = copy_from.evt_db_fetch_count - - def copy(self): - return ContextResourceUsage(copy_from=self) - - def reset(self): - self.ru_stime = 0.0 - self.ru_utime = 0.0 - self.db_txn_count = 0 - - self.db_txn_duration_sec = 0 - self.db_sched_duration_sec = 0 - self.evt_db_fetch_count = 0 - - def __repr__(self): - return ( - "<ContextResourceUsage ru_stime='%r', ru_utime='%r', " - "db_txn_count='%r', db_txn_duration_sec='%r', " - "db_sched_duration_sec='%r', evt_db_fetch_count='%r'>" - ) % ( - self.ru_stime, - self.ru_utime, - self.db_txn_count, - self.db_txn_duration_sec, - self.db_sched_duration_sec, - self.evt_db_fetch_count, - ) - - def __iadd__(self, other): - """Add another ContextResourceUsage's stats to this one's. - - Args: - other (ContextResourceUsage): the other resource usage object - """ - self.ru_utime += other.ru_utime - self.ru_stime += other.ru_stime - self.db_txn_count += other.db_txn_count - self.db_txn_duration_sec += other.db_txn_duration_sec - self.db_sched_duration_sec += other.db_sched_duration_sec - self.evt_db_fetch_count += other.evt_db_fetch_count - return self - - def __isub__(self, other): - self.ru_utime -= other.ru_utime - self.ru_stime -= other.ru_stime - self.db_txn_count -= other.db_txn_count - self.db_txn_duration_sec -= other.db_txn_duration_sec - self.db_sched_duration_sec -= other.db_sched_duration_sec - self.evt_db_fetch_count -= other.evt_db_fetch_count - return self - - def __add__(self, other): - res = ContextResourceUsage(copy_from=self) - res += other - return res - - def __sub__(self, other): - res = ContextResourceUsage(copy_from=self) - res -= other - return res - - -class LoggingContext(object): - """Additional context for log formatting. Contexts are scoped within a - "with" block. - - If a parent is given when creating a new context, then: - - logging fields are copied from the parent to the new context on entry - - when the new context exits, the cpu usage stats are copied from the - child to the parent - - Args: - name (str): Name for the context for debugging. - parent_context (LoggingContext|None): The parent of the new context - """ - - __slots__ = [ - "previous_context", - "name", - "parent_context", - "_resource_usage", - "usage_start", - "main_thread", - "alive", - "request", - "tag", - ] - - thread_local = threading.local() - - class Sentinel(object): - """Sentinel to represent the root context""" - - __slots__ = [] - - def __str__(self): - return "sentinel" - - def copy_to(self, record): - pass - - def start(self): - pass - - def stop(self): - pass - - def add_database_transaction(self, duration_sec): - pass - - def add_database_scheduled(self, sched_sec): - pass - - def record_event_fetch(self, event_count): - pass - - def __nonzero__(self): - return False - - __bool__ = __nonzero__ # python3 - - sentinel = Sentinel() - - def __init__(self, name=None, parent_context=None, request=None): - self.previous_context = LoggingContext.current_context() - self.name = name - - # track the resources used by this context so far - self._resource_usage = ContextResourceUsage() - - # If alive has the thread resource usage when the logcontext last - # became active. - self.usage_start = None - - self.main_thread = threading.current_thread() - self.request = None - self.tag = "" - self.alive = True - - self.parent_context = parent_context - - if self.parent_context is not None: - self.parent_context.copy_to(self) - - if request is not None: - # the request param overrides the request from the parent context - self.request = request - - def __str__(self): - if self.request: - return str(self.request) - return "%s@%x" % (self.name, id(self)) - - @classmethod - def current_context(cls): - """Get the current logging context from thread local storage - - Returns: - LoggingContext: the current logging context - """ - return getattr(cls.thread_local, "current_context", cls.sentinel) - - @classmethod - def set_current_context(cls, context): - """Set the current logging context in thread local storage - Args: - context(LoggingContext): The context to activate. - Returns: - The context that was previously active - """ - current = cls.current_context() - - if current is not context: - current.stop() - cls.thread_local.current_context = context - context.start() - return current - - def __enter__(self): - """Enters this logging context into thread local storage""" - old_context = self.set_current_context(self) - if self.previous_context != old_context: - logger.warn( - "Expected previous context %r, found %r", - self.previous_context, - old_context, - ) - self.alive = True - - return self - - def __exit__(self, type, value, traceback): - """Restore the logging context in thread local storage to the state it - was before this context was entered. - Returns: - None to avoid suppressing any exceptions that were thrown. - """ - current = self.set_current_context(self.previous_context) - if current is not self: - if current is self.sentinel: - logger.warning("Expected logging context %s was lost", self) - else: - logger.warning( - "Expected logging context %s but found %s", self, current - ) - self.previous_context = None - self.alive = False - - # if we have a parent, pass our CPU usage stats on - if self.parent_context is not None and hasattr( - self.parent_context, "_resource_usage" - ): - self.parent_context._resource_usage += self._resource_usage - - # reset them in case we get entered again - self._resource_usage.reset() - - def copy_to(self, record): - """Copy logging fields from this context to a log record or - another LoggingContext - """ - - # 'request' is the only field we currently use in the logger, so that's - # all we need to copy - record.request = self.request - - def start(self): - if threading.current_thread() is not self.main_thread: - logger.warning("Started logcontext %s on different thread", self) - return - - # If we haven't already started record the thread resource usage so - # far - if not self.usage_start: - self.usage_start = get_thread_resource_usage() - - def stop(self): - if threading.current_thread() is not self.main_thread: - logger.warning("Stopped logcontext %s on different thread", self) - return - - # When we stop, let's record the cpu used since we started - if not self.usage_start: - logger.warning("Called stop on logcontext %s without calling start", self) - return - - utime_delta, stime_delta = self._get_cputime() - self._resource_usage.ru_utime += utime_delta - self._resource_usage.ru_stime += stime_delta - - self.usage_start = None - - def get_resource_usage(self): - """Get resources used by this logcontext so far. - - Returns: - ContextResourceUsage: a *copy* of the object tracking resource - usage so far - """ - # we always return a copy, for consistency - res = self._resource_usage.copy() - - # If we are on the correct thread and we're currently running then we - # can include resource usage so far. - is_main_thread = threading.current_thread() is self.main_thread - if self.alive and self.usage_start and is_main_thread: - utime_delta, stime_delta = self._get_cputime() - res.ru_utime += utime_delta - res.ru_stime += stime_delta - - return res - - def _get_cputime(self): - """Get the cpu usage time so far - - Returns: Tuple[float, float]: seconds in user mode, seconds in system mode - """ - current = get_thread_resource_usage() - - utime_delta = current.ru_utime - self.usage_start.ru_utime - stime_delta = current.ru_stime - self.usage_start.ru_stime - - # sanity check - if utime_delta < 0: - logger.error( - "utime went backwards! %f < %f", - current.ru_utime, - self.usage_start.ru_utime, - ) - utime_delta = 0 - - if stime_delta < 0: - logger.error( - "stime went backwards! %f < %f", - current.ru_stime, - self.usage_start.ru_stime, - ) - stime_delta = 0 - - return utime_delta, stime_delta - - def add_database_transaction(self, duration_sec): - if duration_sec < 0: - raise ValueError("DB txn time can only be non-negative") - self._resource_usage.db_txn_count += 1 - self._resource_usage.db_txn_duration_sec += duration_sec - - def add_database_scheduled(self, sched_sec): - """Record a use of the database pool - - Args: - sched_sec (float): number of seconds it took us to get a - connection - """ - if sched_sec < 0: - raise ValueError("DB scheduling time can only be non-negative") - self._resource_usage.db_sched_duration_sec += sched_sec - - def record_event_fetch(self, event_count): - """Record a number of events being fetched from the db - - Args: - event_count (int): number of events being fetched - """ - self._resource_usage.evt_db_fetch_count += event_count - - -class LoggingContextFilter(logging.Filter): - """Logging filter that adds values from the current logging context to each - record. - Args: - **defaults: Default values to avoid formatters complaining about - missing fields - """ - - def __init__(self, **defaults): - self.defaults = defaults - - def filter(self, record): - """Add each fields from the logging contexts to the record. - Returns: - True to include the record in the log output. - """ - context = LoggingContext.current_context() - for key, value in self.defaults.items(): - setattr(record, key, value) - - # context should never be None, but if it somehow ends up being, then - # we end up in a death spiral of infinite loops, so let's check, for - # robustness' sake. - if context is not None: - context.copy_to(record) - - return True - - -class PreserveLoggingContext(object): - """Captures the current logging context and restores it when the scope is - exited. Used to restore the context after a function using - @defer.inlineCallbacks is resumed by a callback from the reactor.""" - - __slots__ = ["current_context", "new_context", "has_parent"] - - def __init__(self, new_context=None): - if new_context is None: - new_context = LoggingContext.sentinel - self.new_context = new_context - - def __enter__(self): - """Captures the current logging context""" - self.current_context = LoggingContext.set_current_context(self.new_context) - - if self.current_context: - self.has_parent = self.current_context.previous_context is not None - if not self.current_context.alive: - logger.debug("Entering dead context: %s", self.current_context) - - def __exit__(self, type, value, traceback): - """Restores the current logging context""" - context = LoggingContext.set_current_context(self.current_context) - - if context != self.new_context: - if context is LoggingContext.sentinel: - logger.warning("Expected logging context %s was lost", self.new_context) - else: - logger.warning( - "Expected logging context %s but found %s", - self.new_context, - context, - ) - - if self.current_context is not LoggingContext.sentinel: - if not self.current_context.alive: - logger.debug("Restoring dead context: %s", self.current_context) - - -def nested_logging_context(suffix, parent_context=None): - """Creates a new logging context as a child of another. - - The nested logging context will have a 'request' made up of the parent context's - request, plus the given suffix. - - CPU/db usage stats will be added to the parent context's on exit. - - Normal usage looks like: - - with nested_logging_context(suffix): - # ... do stuff - - Args: - suffix (str): suffix to add to the parent context's 'request'. - parent_context (LoggingContext|None): parent context. Will use the current context - if None. - - Returns: - LoggingContext: new logging context. - """ - if parent_context is None: - parent_context = LoggingContext.current_context() - return LoggingContext( - parent_context=parent_context, request=parent_context.request + "-" + suffix - ) - - -def preserve_fn(f): - """Function decorator which wraps the function with run_in_background""" - - def g(*args, **kwargs): - return run_in_background(f, *args, **kwargs) - - return g - - -def run_in_background(f, *args, **kwargs): - """Calls a function, ensuring that the current context is restored after - return from the function, and that the sentinel context is set once the - deferred returned by the function completes. - - Useful for wrapping functions that return a deferred which you don't yield - on (for instance because you want to pass it to deferred.gatherResults()). - - Note that if you completely discard the result, you should make sure that - `f` doesn't raise any deferred exceptions, otherwise a scary-looking - CRITICAL error about an unhandled error will be logged without much - indication about where it came from. - """ - current = LoggingContext.current_context() - try: - res = f(*args, **kwargs) - except: # noqa: E722 - # the assumption here is that the caller doesn't want to be disturbed - # by synchronous exceptions, so let's turn them into Failures. - return defer.fail() - - if not isinstance(res, defer.Deferred): - return res - - if res.called and not res.paused: - # The function should have maintained the logcontext, so we can - # optimise out the messing about - return res - - # The function may have reset the context before returning, so - # we need to restore it now. - ctx = LoggingContext.set_current_context(current) - - # The original context will be restored when the deferred - # completes, but there is nothing waiting for it, so it will - # get leaked into the reactor or some other function which - # wasn't expecting it. We therefore need to reset the context - # here. - # - # (If this feels asymmetric, consider it this way: we are - # effectively forking a new thread of execution. We are - # probably currently within a ``with LoggingContext()`` block, - # which is supposed to have a single entry and exit point. But - # by spawning off another deferred, we are effectively - # adding a new exit point.) - res.addBoth(_set_context_cb, ctx) - return res - - -def make_deferred_yieldable(deferred): - """Given a deferred, make it follow the Synapse logcontext rules: - - If the deferred has completed (or is not actually a Deferred), essentially - does nothing (just returns another completed deferred with the - result/failure). - - If the deferred has not yet completed, resets the logcontext before - returning a deferred. Then, when the deferred completes, restores the - current logcontext before running callbacks/errbacks. - - (This is more-or-less the opposite operation to run_in_background.) - """ - if not isinstance(deferred, defer.Deferred): - return deferred - - if deferred.called and not deferred.paused: - # it looks like this deferred is ready to run any callbacks we give it - # immediately. We may as well optimise out the logcontext faffery. - return deferred - - # ok, we can't be sure that a yield won't block, so let's reset the - # logcontext, and add a callback to the deferred to restore it. - prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) - deferred.addBoth(_set_context_cb, prev_context) - return deferred - - -def _set_context_cb(result, context): - """A callback function which just sets the logging context""" - LoggingContext.set_current_context(context) - return result - - -def defer_to_thread(reactor, f, *args, **kwargs): - """ - Calls the function `f` using a thread from the reactor's default threadpool and - returns the result as a Deferred. - - Creates a new logcontext for `f`, which is created as a child of the current - logcontext (so its CPU usage metrics will get attributed to the current - logcontext). `f` should preserve the logcontext it is given. - - The result deferred follows the Synapse logcontext rules: you should `yield` - on it. - - Args: - reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread - the Deferred will be invoked, and whose threadpool we should use for the - function. - - Normally this will be hs.get_reactor(). - - f (callable): The function to call. - - args: positional arguments to pass to f. - - kwargs: keyword arguments to pass to f. - - Returns: - Deferred: A Deferred which fires a callback with the result of `f`, or an - errback if `f` throws an exception. - """ - return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) - - -def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): - """ - A wrapper for twisted.internet.threads.deferToThreadpool, which handles - logcontexts correctly. - - Calls the function `f` using a thread from the given threadpool and returns - the result as a Deferred. - - Creates a new logcontext for `f`, which is created as a child of the current - logcontext (so its CPU usage metrics will get attributed to the current - logcontext). `f` should preserve the logcontext it is given. - - The result deferred follows the Synapse logcontext rules: you should `yield` - on it. - - Args: - reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread - the Deferred will be invoked. Normally this will be hs.get_reactor(). - - threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for - running `f`. Normally this will be hs.get_reactor().getThreadPool(). - - f (callable): The function to call. - - args: positional arguments to pass to f. - - kwargs: keyword arguments to pass to f. - - Returns: - Deferred: A Deferred which fires a callback with the result of `f`, or an - errback if `f` throws an exception. - """ - logcontext = LoggingContext.current_context() - - def g(): - with LoggingContext(parent_context=logcontext): - return f(*args, **kwargs) - - return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g)) +from synapse.logging.context import ( + LoggingContext, + LoggingContextFilter, + PreserveLoggingContext, + defer_to_thread, + make_deferred_yieldable, + nested_logging_context, + preserve_fn, + run_in_background, +) + +__all__ = [ + "defer_to_thread", + "LoggingContext", + "LoggingContextFilter", + "make_deferred_yieldable", + "nested_logging_context", + "preserve_fn", + "PreserveLoggingContext", + "run_in_background", +] diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index fbf570c756d8b984d11d7c44da46e9c66b37d1d0..320e8f8174044c6d48727ce1463c1fafecdb2abb 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,41 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +""" +Backwards compatibility re-exports of ``synapse.logging.formatter`` functionality. +""" -import logging -import traceback +from synapse.logging.formatter import LogFormatter -from six import StringIO - - -class LogFormatter(logging.Formatter): - """Log formatter which gives more detail for exceptions - - This is the same as the standard log formatter, except that when logging - exceptions [typically via log.foo("msg", exc_info=1)], it prints the - sequence that led up to the point at which the exception was caught. - (Normally only stack frames between the point the exception was raised and - where it was caught are logged). - """ - - def __init__(self, *args, **kwargs): - super(LogFormatter, self).__init__(*args, **kwargs) - - def formatException(self, ei): - sio = StringIO() - (typ, val, tb) = ei - - # log the stack above the exception capture point if possible, but - # check that we actually have an f_back attribute to work around - # https://twistedmatrix.com/trac/ticket/9305 - - if tb and hasattr(tb.tb_frame, "f_back"): - sio.write("Capture point (most recent call last):\n") - traceback.print_stack(tb.tb_frame.f_back, None, sio) - - traceback.print_exception(typ, val, tb, None, sio) - s = sio.getvalue() - sio.close() - if s[-1:] == "\n": - s = s[:-1] - return s +__all__ = ["LogFormatter"] diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 01284d3cf81200c9ea9b1c5537b8e434595c9652..c30b6de19c98db717036f93bad0a48fd5e7e521a 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -20,8 +20,8 @@ from prometheus_client import Counter from twisted.internet import defer +from synapse.logging.context import LoggingContext from synapse.metrics import InFlightGauge -from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 06defa81992ee9312e6bf7ba6c35b691785d25a4..5ca4521ce36c97d7751c8bcf732690df6bedbab9 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -20,7 +20,7 @@ import logging from twisted.internet import defer from synapse.api.errors import LimitExceededError -from synapse.util.logcontext import ( +from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, run_in_background, @@ -36,9 +36,11 @@ class FederationRateLimiter(object): clock (Clock) config (FederationRateLimitConfig) """ - self.clock = clock - self._config = config - self.ratelimiters = {} + + def new_limiter(): + return _PerHostRatelimiter(clock=clock, config=config) + + self.ratelimiters = collections.defaultdict(new_limiter) def ratelimit(self, host): """Used to ratelimit an incoming request from given host @@ -53,11 +55,9 @@ class FederationRateLimiter(object): host (str): Origin of incoming request. Returns: - _PerHostRatelimiter + context manager which returns a deferred. """ - return self.ratelimiters.setdefault( - host, _PerHostRatelimiter(clock=self.clock, config=self._config) - ).ratelimit() + return self.ratelimiters[host].ratelimit() class _PerHostRatelimiter(object): @@ -122,7 +122,7 @@ class _PerHostRatelimiter(object): self.request_times.append(time_now) def queue_request(): - if len(self.current_processing) > self.concurrent_requests: + if len(self.current_processing) >= self.concurrent_requests: queue_defer = defer.Deferred() self.ready_request_queue[request_id] = queue_defer logger.info( diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 1a77456498ac2ae5cfc735046b4b6f8c68f54160..d8d0ceae51733ef344afd444d0a0f5afb859704b 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -17,7 +17,7 @@ import random from twisted.internet import defer -import synapse.util.logcontext +import synapse.logging.context from synapse.api.errors import CodeMessageException logger = logging.getLogger(__name__) @@ -225,4 +225,4 @@ class RetryDestinationLimiter(object): logger.exception("Failed to store destination_retry_timings") # we deliberately do this in the background. - synapse.util.logcontext.run_in_background(store_retry_timings) + synapse.logging.context.run_in_background(store_retry_timings) diff --git a/synctl b/synctl index 30d751236faaa369862aa05010dbd558ac012880..794de99ea3c174f7e9995e69e67f99b5358e59e5 100755 --- a/synctl +++ b/synctl @@ -150,8 +150,9 @@ def main(): parser.add_argument( "--no-daemonize", action="store_false", + dest="daemonize", help="Run synapse in the foreground for debugging. " - "Will work only if the daemonize option is not set in the config." + "Will work only if the daemonize option is not set in the config.", ) options = parser.parse_args() @@ -159,7 +160,7 @@ def main(): if options.worker and options.all_processes: write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr) sys.exit(1) - if options.no_daemonize and options.all_processes: + if not options.daemonize and options.all_processes: write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr) sys.exit(1) @@ -169,9 +170,8 @@ def main(): write( "No config file found\n" "To generate a config file, run '%s -c %s --generate-config" - " --server-name=<server name> --report-stats=<yes/no>'\n" % ( - " ".join(SYNAPSE), options.configfile, - ), + " --server-name=<server name> --report-stats=<yes/no>'\n" + % (" ".join(SYNAPSE), options.configfile), stream=sys.stderr, ) sys.exit(1) @@ -289,7 +289,7 @@ def main(): # Check if synapse is already running if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): abort("synapse.app.homeserver already running") - start(configfile, bool(options.no_daemonize)) + start(configfile, bool(options.daemonize)) for worker in workers: env = os.environ.copy() diff --git a/sytest-blacklist b/sytest-blacklist new file mode 100644 index 0000000000000000000000000000000000000000..11785fd43f90a63e25e0dd99f3989d60cbcb6eb4 --- /dev/null +++ b/sytest-blacklist @@ -0,0 +1,31 @@ +# This file serves as a blacklist for SyTest tests that we expect will fail in +# Synapse. +# +# Each line of this file is scanned by sytest during a run and if the line +# exactly matches the name of a test, it will be marked as "expected fail", +# meaning the test will still run, but failure will not mark the entire test +# suite as failing. +# +# Test names are encouraged to have a bug accompanied with them, serving as an +# explanation for why the test has been excluded. + +# Blacklisted due to https://github.com/matrix-org/synapse/issues/1679 +Remote room members also see posted message events + +# Blacklisted due to https://github.com/matrix-org/synapse/issues/2065 +Guest users can accept invites to private rooms over federation + +# Blacklisted due to https://github.com/vector-im/riot-web/issues/7211 +The only membership state included in a gapped incremental sync is for senders in the timeline + +# Blacklisted due to https://github.com/matrix-org/synapse/issues/1658 +Newly created users see their own presence in /initialSync (SYT-34) + +# Blacklisted due to https://github.com/matrix-org/synapse/issues/1396 +Should reject keys claiming to belong to a different user + +# Blacklisted due to https://github.com/matrix-org/synapse/issues/1531 +Enabling an unknown default rule fails with 404 + +# Blacklisted due to https://github.com/matrix-org/synapse/issues/1663 +New federated private chats get full presence information (SYN-115) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index d4e75b5b2e7229bcd808fd7c8553557ab6dc59a8..c0cb8ef296ffc3de66f32e8d244ccd52b1e77e30 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -21,7 +21,14 @@ from twisted.internet import defer import synapse.handlers.auth from synapse.api.auth import Auth -from synapse.api.errors import AuthError, Codes, ResourceLimitError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientCredentialsError, + InvalidClientTokenError, + MissingClientTokenError, + ResourceLimitError, +) from synapse.types import UserID from tests import unittest @@ -70,7 +77,9 @@ class AuthTestCase(unittest.TestCase): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, InvalidClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") def test_get_user_by_req_user_missing_token(self): user_info = {"name": self.test_user, "token_id": "ditto"} @@ -79,7 +88,9 @@ class AuthTestCase(unittest.TestCase): request = Mock(args={}) request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, MissingClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_MISSING_TOKEN") @defer.inlineCallbacks def test_get_user_by_req_appservice_valid_token(self): @@ -133,7 +144,9 @@ class AuthTestCase(unittest.TestCase): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, InvalidClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") def test_get_user_by_req_appservice_bad_token(self): self.store.get_app_service_by_token = Mock(return_value=None) @@ -143,7 +156,9 @@ class AuthTestCase(unittest.TestCase): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, InvalidClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") def test_get_user_by_req_appservice_missing_token(self): app_service = Mock(token="foobar", url="a_url", sender=self.test_user) @@ -153,7 +168,9 @@ class AuthTestCase(unittest.TestCase): request = Mock(args={}) request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, MissingClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_MISSING_TOKEN") @defer.inlineCallbacks def test_get_user_by_req_appservice_valid_token_valid_user_id(self): @@ -244,10 +261,12 @@ class AuthTestCase(unittest.TestCase): USER_ID = "@percy:matrix.org" self.store.add_access_token_to_user = Mock() - token = yield self.hs.handlers.auth_handler.issue_access_token( - USER_ID, "DEVICE" + token = yield self.hs.handlers.auth_handler.get_access_token_for_user_id( + USER_ID, "DEVICE", valid_until_ms=None + ) + self.store.add_access_token_to_user.assert_called_with( + USER_ID, token, "DEVICE", None ) - self.store.add_access_token_to_user.assert_called_with(USER_ID, token, "DEVICE") def get_user(tok): if token != tok: @@ -280,7 +299,7 @@ class AuthTestCase(unittest.TestCase): request.args[b"access_token"] = [guest_tok.encode("ascii")] request.requestHeaders.getRawHeaders = mock_getRawHeaders() - with self.assertRaises(AuthError) as cm: + with self.assertRaises(InvalidClientCredentialsError) as cm: yield self.auth.get_user_by_req(request, allow_guest=True) self.assertEqual(401, cm.exception.code) @@ -325,7 +344,7 @@ class AuthTestCase(unittest.TestCase): unknown_threepid = {"medium": "email", "address": "unreserved@server.com"} self.hs.config.mau_limits_reserved_threepids = [threepid] - yield self.store.register(user_id="user1", token="123", password_hash=None) + yield self.store.register_user(user_id="user1", password_hash=None) with self.assertRaises(ResourceLimitError): yield self.auth.check_auth_blocking() diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index db9f86bdacf0ecc8ed28e989b4629e48dc60bed8..04b8c2c07c621fae7f53ea316dee5d3352fa3352 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -22,7 +22,7 @@ from synapse.appservice.scheduler import ( _ServiceQueuer, _TransactionController, ) -from synapse.util.logcontext import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable from tests import unittest diff --git a/tests/config/test_ratelimiting.py b/tests/config/test_ratelimiting.py new file mode 100644 index 0000000000000000000000000000000000000000..13ab282384e37645a56c7d497a1286e4863e063e --- /dev/null +++ b/tests/config/test_ratelimiting.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.config.homeserver import HomeServerConfig + +from tests.unittest import TestCase +from tests.utils import default_config + + +class RatelimitConfigTestCase(TestCase): + def test_parse_rc_federation(self): + config_dict = default_config("test") + config_dict["rc_federation"] = { + "window_size": 20000, + "sleep_limit": 693, + "sleep_delay": 252, + "reject_limit": 198, + "concurrent": 7, + } + + config = HomeServerConfig() + config.parse_config_dict(config_dict, "", "") + config_obj = config.rc_federation + + self.assertEqual(config_obj.window_size, 20000) + self.assertEqual(config_obj.sleep_limit, 693) + self.assertEqual(config_obj.sleep_delay, 252) + self.assertEqual(config_obj.reject_limit, 198) + self.assertEqual(config_obj.concurrent, 7) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 5a355f00cc2f39593973e1779ab197d4a560f8fa..795703967d789ee6321754e4ccaff2246198c72e 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -30,9 +30,12 @@ from synapse.crypto.keyring import ( ServerKeyFetcher, StoreKeyFetcher, ) +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, +) from synapse.storage.keys import FetchKeyResult -from synapse.util import logcontext -from synapse.util.logcontext import LoggingContext from tests import unittest @@ -131,7 +134,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def get_perspectives(**kwargs): self.assertEquals(LoggingContext.current_context().request, "11") - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): yield persp_deferred defer.returnValue(persp_resp) @@ -158,7 +161,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertFalse(res_deferreds[0].called) res_deferreds[0].addBoth(self.check_context, None) - yield logcontext.make_deferred_yieldable(res_deferreds[0]) + yield make_deferred_yieldable(res_deferreds[0]) # let verify_json_objects_for_server finish its work before we kill the # logcontext @@ -184,7 +187,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): [("server10", json1, 0, "test")] ) res_deferreds_2[0].addBoth(self.check_context, None) - yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) + yield make_deferred_yieldable(res_deferreds_2[0]) # let verify_json_objects_for_server finish its work before we kill the # logcontext diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index 5e7d2d3361f3e3276b363cf3579a033abbec34f4..fc37c4328c3ac59ff7b71dd63805a9caa4c0df7a 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -55,7 +55,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called() @@ -94,7 +94,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called() @@ -127,7 +127,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called() @@ -169,7 +169,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called_once() @@ -198,7 +198,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_not_called() writer.write_state.assert_not_called() diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index b204a0700d25da5bdd26171b9e26a051f0e241be..b03103d96ff54662b2c90af6a08685fc7b6af405 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -117,7 +117,9 @@ class AuthTestCase(unittest.TestCase): def test_mau_limits_disabled(self): self.hs.config.limit_usage_by_mau = False # Ensure does not throw exception - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) yield self.auth_handler.validate_short_term_login_token_and_get_user_id( self._get_macaroon().serialize() @@ -131,7 +133,9 @@ class AuthTestCase(unittest.TestCase): ) with self.assertRaises(ResourceLimitError): - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.large_number_of_users) @@ -150,7 +154,9 @@ class AuthTestCase(unittest.TestCase): return_value=defer.succeed(self.hs.config.max_mau_value) ) with self.assertRaises(ResourceLimitError): - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) @@ -166,7 +172,9 @@ class AuthTestCase(unittest.TestCase): self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) ) - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().user_last_seen_monthly_active = Mock( return_value=defer.succeed(self.hs.get_clock().time_msec()) ) @@ -185,7 +193,9 @@ class AuthTestCase(unittest.TestCase): return_value=defer.succeed(self.small_number_of_users) ) # Ensure does not raise exception - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.small_number_of_users) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 4edce7af435f515fc5cad285e2b2d1c3e0862897..90d0129374043bddbe8853bbeeb666c9dcf470a4 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -18,7 +18,7 @@ from mock import Mock from twisted.internet import defer from synapse.api.constants import UserTypes -from synapse.api.errors import ResourceLimitError, SynapseError +from synapse.api.errors import Codes, ResourceLimitError, SynapseError from synapse.handlers.register import RegistrationHandler from synapse.types import RoomAlias, UserID, create_requester @@ -67,7 +67,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user_id = frank.to_string() requester = create_requester(user_id) result_user_id, result_token = self.get_success( - self.handler.get_or_create_user(requester, frank.localpart, "Frankie") + self.get_or_create_user(requester, frank.localpart, "Frankie") ) self.assertEquals(result_user_id, user_id) self.assertTrue(result_token is not None) @@ -77,17 +77,13 @@ class RegistrationTestCase(unittest.HomeserverTestCase): store = self.hs.get_datastore() frank = UserID.from_string("@frank:test") self.get_success( - store.register( - user_id=frank.to_string(), - token="jkv;g498752-43gj['eamb!-5", - password_hash=None, - ) + store.register_user(user_id=frank.to_string(), password_hash=None) ) local_part = frank.localpart user_id = frank.to_string() requester = create_requester(user_id) result_user_id, result_token = self.get_success( - self.handler.get_or_create_user(requester, local_part, None) + self.get_or_create_user(requester, local_part, None) ) self.assertEquals(result_user_id, user_id) self.assertTrue(result_token is not None) @@ -95,9 +91,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_mau_limits_when_disabled(self): self.hs.config.limit_usage_by_mau = False # Ensure does not throw exception - self.get_success( - self.handler.get_or_create_user(self.requester, "a", "display_name") - ) + self.get_success(self.get_or_create_user(self.requester, "a", "display_name")) def test_get_or_create_user_mau_not_blocked(self): self.hs.config.limit_usage_by_mau = True @@ -105,7 +99,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): return_value=defer.succeed(self.hs.config.max_mau_value - 1) ) # Ensure does not throw exception - self.get_success(self.handler.get_or_create_user(self.requester, "c", "User")) + self.get_success(self.get_or_create_user(self.requester, "c", "User")) def test_get_or_create_user_mau_blocked(self): self.hs.config.limit_usage_by_mau = True @@ -113,7 +107,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): return_value=defer.succeed(self.lots_of_users) ) self.get_failure( - self.handler.get_or_create_user(self.requester, "b", "display_name"), + self.get_or_create_user(self.requester, "b", "display_name"), ResourceLimitError, ) @@ -121,7 +115,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): return_value=defer.succeed(self.hs.config.max_mau_value) ) self.get_failure( - self.handler.get_or_create_user(self.requester, "b", "display_name"), + self.get_or_create_user(self.requester, "b", "display_name"), ResourceLimitError, ) @@ -131,21 +125,21 @@ class RegistrationTestCase(unittest.HomeserverTestCase): return_value=defer.succeed(self.lots_of_users) ) self.get_failure( - self.handler.register(localpart="local_part"), ResourceLimitError + self.handler.register_user(localpart="local_part"), ResourceLimitError ) self.store.get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) ) self.get_failure( - self.handler.register(localpart="local_part"), ResourceLimitError + self.handler.register_user(localpart="local_part"), ResourceLimitError ) def test_auto_create_auto_join_rooms(self): room_alias_str = "#room:test" self.hs.config.auto_join_rooms = [room_alias_str] - res = self.get_success(self.handler.register(localpart="jeff")) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) directory_handler = self.hs.get_handlers().directory_handler room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -156,25 +150,25 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_auto_create_auto_join_rooms_with_no_rooms(self): self.hs.config.auto_join_rooms = [] frank = UserID.from_string("@frank:test") - res = self.get_success(self.handler.register(frank.localpart)) - self.assertEqual(res[0], frank.to_string()) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(frank.localpart)) + self.assertEqual(user_id, frank.to_string()) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_auto_create_auto_join_where_room_is_another_domain(self): self.hs.config.auto_join_rooms = ["#room:another"] frank = UserID.from_string("@frank:test") - res = self.get_success(self.handler.register(frank.localpart)) - self.assertEqual(res[0], frank.to_string()) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(frank.localpart)) + self.assertEqual(user_id, frank.to_string()) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_auto_create_auto_join_where_auto_create_is_false(self): self.hs.config.autocreate_auto_join_rooms = False room_alias_str = "#room:test" self.hs.config.auto_join_rooms = [room_alias_str] - res = self.get_success(self.handler.register(localpart="jeff")) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_auto_create_auto_join_rooms_when_support_user_exists(self): @@ -182,8 +176,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase): self.hs.config.auto_join_rooms = [room_alias_str] self.store.is_support_user = Mock(return_value=True) - res = self.get_success(self.handler.register(localpart="support")) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="support")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) directory_handler = self.hs.get_handlers().directory_handler room_alias = RoomAlias.from_string(room_alias_str) @@ -211,24 +205,82 @@ class RegistrationTestCase(unittest.HomeserverTestCase): # When:- # * the user is registered and post consent actions are called - res = self.get_success(self.handler.register(localpart="jeff")) - self.get_success(self.handler.post_consent_actions(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + self.get_success(self.handler.post_consent_actions(user_id)) # Then:- # * Ensure that they have not been joined to the room - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_register_support_user(self): - res = self.get_success( - self.handler.register(localpart="user", user_type=UserTypes.SUPPORT) + user_id = self.get_success( + self.handler.register_user(localpart="user", user_type=UserTypes.SUPPORT) ) - self.assertTrue(self.store.is_support_user(res[0])) + d = self.store.is_support_user(user_id) + self.assertTrue(self.get_success(d)) def test_register_not_support_user(self): - res = self.get_success(self.handler.register(localpart="user")) - self.assertFalse(self.store.is_support_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="user")) + d = self.store.is_support_user(user_id) + self.assertFalse(self.get_success(d)) def test_invalid_user_id_length(self): invalid_user_id = "x" * 256 - self.get_failure(self.handler.register(localpart=invalid_user_id), SynapseError) + self.get_failure( + self.handler.register_user(localpart=invalid_user_id), SynapseError + ) + + @defer.inlineCallbacks + def get_or_create_user(self, requester, localpart, displayname, password_hash=None): + """Creates a new user if the user does not exist, + else revokes all previous access tokens and generates a new one. + + XXX: this used to be in the main codebase, but was only used by this file, + so got moved here. TODO: get rid of it, probably + + Args: + localpart : The local part of the user ID to register. If None, + one will be randomly generated. + Returns: + A tuple of (user_id, access_token). + Raises: + RegistrationError if there was a problem registering. + """ + if localpart is None: + raise SynapseError(400, "Request must include user id") + yield self.hs.get_auth().check_auth_blocking() + need_register = True + + try: + yield self.handler.check_username(localpart) + except SynapseError as e: + if e.errcode == Codes.USER_IN_USE: + need_register = False + else: + raise + + user = UserID(localpart, self.hs.hostname) + user_id = user.to_string() + token = self.macaroon_generator.generate_access_token(user_id) + + if need_register: + yield self.handler.register_with_store( + user_id=user_id, + password_hash=password_hash, + create_profile_with_displayname=user.localpart, + ) + else: + yield self.hs.get_auth_handler().delete_access_tokens_for_user(user_id) + + yield self.store.add_access_token_to_user( + user_id=user_id, token=token, device_id=None, valid_until_ms=None + ) + + if displayname is not None: + # logger.info("setting user display name: %s -> %s", user_id, displayname) + yield self.hs.get_profile_handler().set_displayname( + user, requester, displayname, by_admin=True + ) + + defer.returnValue((user_id, token)) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index b135486c4877d5d6b8f30fe6f84be6e17f9a6fe3..c5e91a8c41b84dc10a83e1cf8fb54750035fb31a 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -47,11 +47,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): def test_handle_local_profile_change_with_support_user(self): support_user_id = "@support:test" self.get_success( - self.store.register( - user_id=support_user_id, - token="123", - password_hash=None, - user_type=UserTypes.SUPPORT, + self.store.register_user( + user_id=support_user_id, password_hash=None, user_type=UserTypes.SUPPORT ) ) @@ -73,11 +70,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): def test_handle_user_deactivated_support_user(self): s_user_id = "@support:test" self.get_success( - self.store.register( - user_id=s_user_id, - token="123", - password_hash=None, - user_type=UserTypes.SUPPORT, + self.store.register_user( + user_id=s_user_id, password_hash=None, user_type=UserTypes.SUPPORT ) ) @@ -90,7 +84,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): def test_handle_user_deactivated_regular_user(self): r_user_id = "@regular:test" self.get_success( - self.store.register(user_id=r_user_id, token="123", password_hash=None) + self.store.register_user(user_id=r_user_id, password_hash=None) ) self.store.remove_from_user_dir = Mock() self.get_success(self.handler.handle_user_deactivated(r_user_id)) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 417fda3ab205b2e7b6d07cc60bf219b28ad93356..a49f9b322479b0eacbc0338eca2ee36b7d6ccc9f 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -36,8 +36,8 @@ from synapse.http.federation.matrix_federation_agent import ( _cache_period_from_headers, ) from synapse.http.federation.srv_resolver import Server +from synapse.logging.context import LoggingContext from synapse.util.caches.ttlcache import TTLCache -from synapse.util.logcontext import LoggingContext from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.server import FakeTransport, ThreadedMemoryReactorClock diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index cf6c6e95b5204bc13a10f73f1835f5f2a71c3405..65b51dc981b5ca240399d8f129ff563a0bcbc231 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -22,7 +22,7 @@ from twisted.internet.error import ConnectError from twisted.names import dns, error from synapse.http.federation.srv_resolver import SrvResolver -from synapse.util.logcontext import LoggingContext +from synapse.logging.context import LoggingContext from tests import unittest from tests.utils import MockClock diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index c4c0d9b96817a29bd0cbc4b28e9779a07ee2a2ad..b9d6d7ad1c1fa333e3634b1249741c78e9fc94e7 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -29,7 +29,7 @@ from synapse.http.matrixfederationclient import ( MatrixFederationHttpClient, MatrixFederationRequest, ) -from synapse.util.logcontext import LoggingContext +from synapse.logging.context import LoggingContext from tests.server import FakeTransport from tests.unittest import HomeserverTestCase diff --git a/tests/patch_inline_callbacks.py b/tests/patch_inline_callbacks.py index ee0add3455e711f5bd09f451eec28d11529cd17f..220884311caec91ff4b736096cc2efa2e6edf154 100644 --- a/tests/patch_inline_callbacks.py +++ b/tests/patch_inline_callbacks.py @@ -28,7 +28,7 @@ def do_patch(): Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit """ - from synapse.util.logcontext import LoggingContext + from synapse.logging.context import LoggingContext orig_inline_callbacks = defer.inlineCallbacks diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 22c3f73ef31e37d0912be6432cbf4fcb05a8d640..8ce6bb62da4bbcf909ee972301fb13017bb2b92c 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -18,8 +18,8 @@ from mock import Mock from twisted.internet.defer import Deferred import synapse.rest.admin +from synapse.logging.context import make_deferred_yieldable from synapse.rest.client.v1 import login, room -from synapse.util.logcontext import make_deferred_yieldable from tests.unittest import HomeserverTestCase diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 708dc26e6147440a6dc58831893044f6cfdd5941..a8adc9a61d1b69842cce09f9e1ea710be89fd545 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -2,9 +2,9 @@ from mock import Mock, call from twisted.internet import defer, reactor +from synapse.logging.context import LoggingContext from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache from synapse.util import Clock -from synapse.util.logcontext import LoggingContext from tests import unittest from tests.utils import MockClock diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 0397f91a9e69d88c380d3af3700f9a1a286f4a17..eae5411325fce0bb6d407283c57e986adc2b5745 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -2,10 +2,14 @@ import json import synapse.rest.admin from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import devices +from synapse.rest.client.v2_alpha.account import WhoamiRestServlet from tests import unittest +from tests.unittest import override_config LOGIN_URL = b"/_matrix/client/r0/login" +TEST_URL = b"/_matrix/client/r0/account/whoami" class LoginRestServletTestCase(unittest.HomeserverTestCase): @@ -13,6 +17,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, + devices.register_servlets, + lambda hs, http_server: WhoamiRestServlet(hs).register(http_server), ] def make_homeserver(self, reactor, clock): @@ -144,3 +150,105 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) + + @override_config({"session_lifetime": "24h"}) + def test_soft_logout(self): + self.register_user("kermit", "monkey") + + # we shouldn't be able to make requests without an access token + request, channel = self.make_request(b"GET", TEST_URL) + self.render(request) + self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEquals(channel.json_body["errcode"], "M_MISSING_TOKEN") + + # log in as normal + params = { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": "kermit"}, + "password": "monkey", + } + request, channel = self.make_request(b"POST", LOGIN_URL, params) + self.render(request) + + self.assertEquals(channel.code, 200, channel.result) + access_token = channel.json_body["access_token"] + device_id = channel.json_body["device_id"] + + # we should now be able to make requests with the access token + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 200, channel.result) + + # time passes + self.reactor.advance(24 * 3600) + + # ... and we should be soft-logouted + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEquals(channel.json_body["soft_logout"], True) + + # + # test behaviour after deleting the expired device + # + + # we now log in as a different device + access_token_2 = self.login("kermit", "monkey") + + # more requests with the expired token should still return a soft-logout + self.reactor.advance(3600) + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEquals(channel.json_body["soft_logout"], True) + + # ... but if we delete that device, it will be a proper logout + self._delete_device(access_token_2, "kermit", "monkey", device_id) + + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEquals(channel.json_body["soft_logout"], False) + + def _delete_device(self, access_token, user_id, password, device_id): + """Perform the UI-Auth to delete a device""" + request, channel = self.make_request( + b"DELETE", "devices/" + device_id, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + # check it's a UI-Auth fail + self.assertEqual( + set(channel.json_body.keys()), + {"flows", "params", "session"}, + channel.result, + ) + + auth = { + "type": "m.login.password", + # https://github.com/matrix-org/synapse/issues/5665 + # "identifier": {"type": "m.id.user", "user": user_id}, + "user": user_id, + "password": password, + "session": channel.json_body["session"], + } + + request, channel = self.make_request( + b"DELETE", + "devices/" + device_id, + access_token=access_token, + content={"auth": auth}, + ) + self.render(request) + self.assertEquals(channel.code, 200, channel.result) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index dff9b2f10c553f0f9973405abddb2edb89861c37..140d8b377278244b8e83273598b31a51e468c5fa 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -288,3 +288,50 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): # if the user isn't already in the room), because we only want to # make sure the user isn't in the room. pass + + +class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + profile.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["require_auth_for_profile_requests"] = True + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor, clock, hs): + # User requesting the profile. + self.requester = self.register_user("requester", "pass") + self.requester_tok = self.login("requester", "pass") + + def test_can_lookup_own_profile(self): + """Tests that a user can lookup their own profile without having to be in a room + if 'require_auth_for_profile_requests' is set to true in the server's config. + """ + request, channel = self.make_request( + "GET", "/profile/" + self.requester, access_token=self.requester_tok + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + request, channel = self.make_request( + "GET", + "/profile/" + self.requester + "/displayname", + access_token=self.requester_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + request, channel = self.make_request( + "GET", + "/profile/" + self.requester + "/avatar_url", + access_token=self.requester_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 3deeed3a70f669232d3bb353b5274bd8d9557359..58c69518520d72c359c9017f69662a5639ae0155 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -126,6 +126,11 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel.json_body["chunk"][0], ) + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEquals( + channel.json_body["original_event"]["event_id"], self.parent_id + ) + # Make sure next_batch has something in it that looks like it could be a # valid token. self.assertIsInstance( @@ -466,9 +471,15 @@ class RelationsTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.json_body["content"], new_body) - self.assertEquals( - channel.json_body["unsigned"].get("m.relations"), - {RelationTypes.REPLACE: {"event_id": edit_event_id}}, + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) def test_multi_edit(self): @@ -518,9 +529,15 @@ class RelationsTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.json_body["content"], new_body) - self.assertEquals( - channel.json_body["unsigned"].get("m.relations"), - {RelationTypes.REPLACE: {"event_id": edit_event_id}}, + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) def _send_relation( diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 39c9342423af094d04644db4f3d4381f99d3390e..bc662b61dbefda189c3d0f5be758afdaee824df0 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -24,11 +24,11 @@ from six.moves.urllib import parse from twisted.internet.defer import Deferred +from synapse.logging.context import make_deferred_yieldable from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend -from synapse.util.logcontext import make_deferred_yieldable from tests import unittest diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 8fe596186640aa0a2991cd43529623d6281f0cb1..976652aee820665ca300caee3a73d9a679327b70 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -460,3 +460,15 @@ class URLPreviewTests(unittest.HomeserverTestCase): "error": "DNS resolution failure during URL preview generation", }, ) + + def test_OPTIONS(self): + """ + OPTIONS returns the OPTIONS. + """ + request, channel = self.make_request( + "OPTIONS", "url_preview?url=http://example.com", shorthand=False + ) + request.render(self.preview_url) + self.pump() + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body, {}) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 59c6f8c227a025a82fb515e3d7586e2943bce824..09305c3bf1c9e5f8359651eda73bbe23cd576d52 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -185,9 +185,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" - self.get_success( - self.store.register(user_id=user_id, token="123", password_hash=None) - ) + self.get_success(self.store.register_user(user_id=user_id, password_hash=None)) active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 0ce0b991f98a6c441ffe49c380ffecdf43b62343..1494650d10be22108cb7fba4c73a12384d0d679d 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -53,10 +53,10 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): # -1 because user3 is a support user and does not count user_num = len(threepids) - 1 - self.store.register(user_id=user1, token="123", password_hash=None) - self.store.register(user_id=user2, token="456", password_hash=None) - self.store.register( - user_id=user3, token="789", password_hash=None, user_type=UserTypes.SUPPORT + self.store.register_user(user_id=user1, password_hash=None) + self.store.register_user(user_id=user2, password_hash=None) + self.store.register_user( + user_id=user3, password_hash=None, user_type=UserTypes.SUPPORT ) self.pump() @@ -161,9 +161,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): def test_populate_monthly_users_is_guest(self): # Test that guest users are not added to mau list user_id = "@user_id:host" - self.store.register( - user_id=user_id, token="123", password_hash=None, make_guest=True - ) + self.store.register_user(user_id=user_id, password_hash=None, make_guest=True) self.store.upsert_monthly_active_user = Mock() self.store.populate_monthly_active_users(user_id) self.pump() @@ -216,8 +214,8 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.assertEquals(self.get_success(count), 0) # Test reserved registed users - self.store.register(user_id=user1, token="123", password_hash=None) - self.store.register(user_id=user2, token="456", password_hash=None) + self.store.register_user(user_id=user1, password_hash=None) + self.store.register_user(user_id=user2, password_hash=None) self.pump() now = int(self.hs.get_clock().time_msec()) @@ -232,11 +230,8 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.pump() self.assertEqual(self.get_success(count), 0) - self.store.register( - user_id=support_user_id, - token="123", - password_hash=None, - user_type=UserTypes.SUPPORT, + self.store.register_user( + user_id=support_user_id, password_hash=None, user_type=UserTypes.SUPPORT ) self.store.upsert_monthly_active_user(support_user_id) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 625b651e9123331bae23f7e616e147ff7c57e94a..0253c4ac05b441b93cc83a9ab9bffb01fe4c6669 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -37,7 +37,7 @@ class RegistrationStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_register(self): - yield self.store.register(self.user_id, self.tokens[0], self.pwhash) + yield self.store.register_user(self.user_id, self.pwhash) self.assertEquals( { @@ -53,17 +53,11 @@ class RegistrationStoreTestCase(unittest.TestCase): (yield self.store.get_user_by_id(self.user_id)), ) - result = yield self.store.get_user_by_access_token(self.tokens[0]) - - self.assertDictContainsSubset({"name": self.user_id}, result) - - self.assertTrue("token_id" in result) - @defer.inlineCallbacks def test_add_tokens(self): - yield self.store.register(self.user_id, self.tokens[0], self.pwhash) + yield self.store.register_user(self.user_id, self.pwhash) yield self.store.add_access_token_to_user( - self.user_id, self.tokens[1], self.device_id + self.user_id, self.tokens[1], self.device_id, valid_until_ms=None ) result = yield self.store.get_user_by_access_token(self.tokens[1]) @@ -77,9 +71,12 @@ class RegistrationStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_user_delete_access_tokens(self): # add some tokens - yield self.store.register(self.user_id, self.tokens[0], self.pwhash) + yield self.store.register_user(self.user_id, self.pwhash) yield self.store.add_access_token_to_user( - self.user_id, self.tokens[1], self.device_id + self.user_id, self.tokens[0], device_id=None, valid_until_ms=None + ) + yield self.store.add_access_token_to_user( + self.user_id, self.tokens[1], self.device_id, valid_until_ms=None ) # now delete some @@ -108,24 +105,12 @@ class RegistrationStoreTestCase(unittest.TestCase): res = yield self.store.is_support_user(None) self.assertFalse(res) - yield self.store.register(user_id=TEST_USER, token="123", password_hash=None) + yield self.store.register_user(user_id=TEST_USER, password_hash=None) res = yield self.store.is_support_user(TEST_USER) self.assertFalse(res) - yield self.store.register( - user_id=SUPPORT_USER, - token="456", - password_hash=None, - user_type=UserTypes.SUPPORT, + yield self.store.register_user( + user_id=SUPPORT_USER, password_hash=None, user_type=UserTypes.SUPPORT ) res = yield self.store.is_support_user(SUPPORT_USER) self.assertTrue(res) - - -class TokenGenerator: - def __init__(self): - self._last_issued_token = 0 - - def generate(self, user_id): - self._last_issued_token += 1 - return "%s-%d" % (user_id, self._last_issued_token) diff --git a/tests/test_federation.py b/tests/test_federation.py index 6a8339b5614f00a9da91947ac321c319a7a6861d..a73f18f88e22a7aad4ec0ee77e4a33de52ba97d3 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -3,9 +3,9 @@ from mock import Mock from twisted.internet.defer import maybeDeferred, succeed from synapse.events import FrozenEvent +from synapse.logging.context import LoggingContext from synapse.types import Requester, UserID from synapse.util import Clock -from synapse.util.logcontext import LoggingContext from tests import unittest from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver diff --git a/tests/test_server.py b/tests/test_server.py index da29ae92ced74283652f8510d2648d1d18aa1977..ba08483a4b6961a18e5cf0d16e8efa96bd80ae18 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -26,8 +26,8 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError from synapse.http.server import JsonResource from synapse.http.site import SynapseSite, logger +from synapse.logging.context import make_deferred_yieldable from synapse.util import Clock -from synapse.util.logcontext import make_deferred_yieldable from tests import unittest from tests.server import ( diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 813f984199be272bc85b580e56e8a8d52f6f7771..2d96b0fa8da41eae54a85f7cdf5aaab2d94f47d2 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -17,7 +17,7 @@ import os import twisted.logger -from synapse.util.logcontext import LoggingContextFilter +from synapse.logging.context import LoggingContextFilter class ToTwistedHandler(logging.Handler): diff --git a/tests/unittest.py b/tests/unittest.py index 684d5cb1cf4c155e7a84f977591e031082ec2b27..cabe787cb41427e43492cc000e6d63b73481721d 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -33,9 +33,9 @@ from synapse.api.constants import EventTypes from synapse.config.homeserver import HomeServerConfig from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest +from synapse.logging.context import LoggingContext from synapse.server import HomeServer from synapse.types import Requester, UserID, create_requester -from synapse.util.logcontext import LoggingContext from tests.server import get_clock, make_request, render, setup_test_homeserver from tests.test_utils.logging_setup import setup_logging @@ -157,6 +157,21 @@ class HomeserverTestCase(TestCase): """ A base TestCase that reduces boilerplate for HomeServer-using test cases. + Defines a setUp method which creates a mock reactor, and instantiates a homeserver + running on that reactor. + + There are various hooks for modifying the way that the homeserver is instantiated: + + * override make_homeserver, for example by making it pass different parameters into + setup_test_homeserver. + + * override default_config, to return a modified configuration dictionary for use + by setup_test_homeserver. + + * On a per-test basis, you can use the @override_config decorator to give a + dictionary containing additional configuration settings to be added to the basic + config dict. + Attributes: servlets (list[function]): List of servlet registration function. user_id (str): The user ID to assume if auth is hijacked. @@ -168,6 +183,13 @@ class HomeserverTestCase(TestCase): hijack_auth = True needs_threadpool = False + def __init__(self, methodName, *args, **kwargs): + super().__init__(methodName, *args, **kwargs) + + # see if we have any additional config for this test + method = getattr(self, methodName) + self._extra_config = getattr(method, "_extra_config", None) + def setUp(self): """ Set up the TestCase by calling the homeserver constructor, optionally @@ -276,7 +298,14 @@ class HomeserverTestCase(TestCase): Args: name (str): The homeserver name/domain. """ - return default_config(name) + config = default_config(name) + + # apply any additional config which was specified via the override_config + # decorator. + if self._extra_config is not None: + config.update(self._extra_config) + + return config def prepare(self, reactor, clock, homeserver): """ @@ -534,3 +563,27 @@ class HomeserverTestCase(TestCase): ) self.render(request) self.assertEqual(channel.code, 403, channel.result) + + +def override_config(extra_config): + """A decorator which can be applied to test functions to give additional HS config + + For use + + For example: + + class MyTestCase(HomeserverTestCase): + @override_config({"enable_registration": False, ...}) + def test_foo(self): + ... + + Args: + extra_config(dict): Additional config settings to be merged into the default + config dict before instantiating the test homeserver. + """ + + def decorator(func): + func._extra_config = extra_config + return func + + return decorator diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 6f8f52537cd34787670b0dff5bef61b429c7ff17..7807328e2fb1f93cbec63df9d854026a0813688e 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -21,7 +21,11 @@ import mock from twisted.internet import defer, reactor from synapse.api.errors import SynapseError -from synapse.util import logcontext +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, +) from synapse.util.caches import descriptors from tests import unittest @@ -32,7 +36,7 @@ logger = logging.getLogger(__name__) def run_on_reactor(): d = defer.Deferred() reactor.callLater(0, d.callback, 0) - return logcontext.make_deferred_yieldable(d) + return make_deferred_yieldable(d) class CacheTestCase(unittest.TestCase): @@ -153,7 +157,7 @@ class DescriptorTestCase(unittest.TestCase): def fn(self, arg1): @defer.inlineCallbacks def inner_fn(): - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): yield complete_lookup defer.returnValue(1) @@ -161,10 +165,10 @@ class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def do_lookup(): - with logcontext.LoggingContext() as c1: + with LoggingContext() as c1: c1.name = "c1" r = yield obj.fn(1) - self.assertEqual(logcontext.LoggingContext.current_context(), c1) + self.assertEqual(LoggingContext.current_context(), c1) defer.returnValue(r) def check_result(r): @@ -174,18 +178,12 @@ class DescriptorTestCase(unittest.TestCase): # set off a deferred which will do a cache lookup d1 = do_lookup() - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d1.addCallback(check_result) # and another d2 = do_lookup() - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d2.addCallback(check_result) # let the lookup complete @@ -210,29 +208,25 @@ class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def do_lookup(): - with logcontext.LoggingContext() as c1: + with LoggingContext() as c1: c1.name = "c1" try: d = obj.fn(1) self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, + LoggingContext.current_context(), LoggingContext.sentinel ) yield d self.fail("No exception thrown") except SynapseError: pass - self.assertEqual(logcontext.LoggingContext.current_context(), c1) + self.assertEqual(LoggingContext.current_context(), c1) obj = Cls() # set off a deferred which will do a cache lookup d1 = do_lookup() - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) return d1 @@ -288,23 +282,20 @@ class CachedListDescriptorTestCase(unittest.TestCase): @descriptors.cachedList("fn", "args1", inlineCallbacks=True) def list_fn(self, args1, arg2): - assert logcontext.LoggingContext.current_context().request == "c1" + assert LoggingContext.current_context().request == "c1" # we want this to behave like an asynchronous function yield run_on_reactor() - assert logcontext.LoggingContext.current_context().request == "c1" + assert LoggingContext.current_context().request == "c1" defer.returnValue(self.mock(args1, arg2)) - with logcontext.LoggingContext() as c1: + with LoggingContext() as c1: c1.request = "c1" obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) - self.assertEqual( - logcontext.LoggingContext.current_context(), - logcontext.LoggingContext.sentinel, - ) + self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) r = yield d1 - self.assertEqual(logcontext.LoggingContext.current_context(), c1) + self.assertEqual(LoggingContext.current_context(), c1) obj.mock.assert_called_once_with([10, 20], 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py index bf85d3b8ecffd64980291d3c017d609c9f93ea36..f60918069ac6962142fde77d9d1efa5fdbc254ce 100644 --- a/tests/util/test_async_utils.py +++ b/tests/util/test_async_utils.py @@ -16,9 +16,8 @@ from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred from twisted.internet.task import Clock -from synapse.util import logcontext +from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.util.async_helpers import timeout_deferred -from synapse.util.logcontext import LoggingContext from tests.unittest import TestCase @@ -69,14 +68,14 @@ class TimeoutDeferredTest(TestCase): @defer.inlineCallbacks def blocking(): non_completing_d = Deferred() - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): try: yield non_completing_d except CancelledError: blocking_was_cancelled[0] = True raise - with logcontext.LoggingContext("one") as context_one: + with LoggingContext("one") as context_one: # the errbacks should be run in the test logcontext def errback(res, deferred_name): self.assertIs( diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index ec7ba9719cf30c61356e1f6f1f1b317756ae01d3..0ec8ef90ce8bb4fa4a40064a4e266e47c62837e0 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -19,7 +19,8 @@ from six.moves import range from twisted.internet import defer, reactor from twisted.internet.defer import CancelledError -from synapse.util import Clock, logcontext +from synapse.logging.context import LoggingContext +from synapse.util import Clock from synapse.util.async_helpers import Linearizer from tests import unittest @@ -51,13 +52,13 @@ class LinearizerTestCase(unittest.TestCase): @defer.inlineCallbacks def func(i, sleep=False): - with logcontext.LoggingContext("func(%s)" % i) as lc: + with LoggingContext("func(%s)" % i) as lc: with (yield linearizer.queue("")): - self.assertEqual(logcontext.LoggingContext.current_context(), lc) + self.assertEqual(LoggingContext.current_context(), lc) if sleep: yield Clock(reactor).sleep(0) - self.assertEqual(logcontext.LoggingContext.current_context(), lc) + self.assertEqual(LoggingContext.current_context(), lc) func(0, sleep=True) for i in range(1, 100): diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 8adaee3c8d3d307eebfbb652d6bc76b0ac3e347d..8b8455c8b7b0644ae9a97bd8d89d693b625cbe20 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -1,8 +1,14 @@ import twisted.python.failure from twisted.internet import defer, reactor -from synapse.util import Clock, logcontext -from synapse.util.logcontext import LoggingContext +from synapse.logging.context import ( + LoggingContext, + PreserveLoggingContext, + make_deferred_yieldable, + nested_logging_context, + run_in_background, +) +from synapse.util import Clock from .. import unittest @@ -39,24 +45,17 @@ class LoggingContextTestCase(unittest.TestCase): callback_completed = [False] - def test(): + with LoggingContext() as context_one: context_one.request = "one" - d = function() + + # fire off function, but don't wait on it. + d2 = run_in_background(function) def cb(res): - self._check_test_key("one") callback_completed[0] = True return res - d.addCallback(cb) - - return d - - with LoggingContext() as context_one: - context_one.request = "one" - - # fire off function, but don't wait on it. - logcontext.run_in_background(test) + d2.addCallback(cb) self._check_test_key("one") @@ -92,7 +91,7 @@ class LoggingContextTestCase(unittest.TestCase): def test_run_in_background_with_non_blocking_fn(self): @defer.inlineCallbacks def nonblocking_function(): - with logcontext.PreserveLoggingContext(): + with PreserveLoggingContext(): yield defer.succeed(None) return self._test_run_in_background(nonblocking_function) @@ -101,7 +100,23 @@ class LoggingContextTestCase(unittest.TestCase): # a function which returns a deferred which looks like it has been # called, but is actually paused def testfunc(): - return logcontext.make_deferred_yieldable(_chained_deferred_function()) + return make_deferred_yieldable(_chained_deferred_function()) + + return self._test_run_in_background(testfunc) + + def test_run_in_background_with_coroutine(self): + async def testfunc(): + self._check_test_key("one") + d = Clock(reactor).sleep(0) + self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) + await d + self._check_test_key("one") + + return self._test_run_in_background(testfunc) + + def test_run_in_background_with_nonblocking_coroutine(self): + async def testfunc(): + self._check_test_key("one") return self._test_run_in_background(testfunc) @@ -119,7 +134,7 @@ class LoggingContextTestCase(unittest.TestCase): with LoggingContext() as context_one: context_one.request = "one" - d1 = logcontext.make_deferred_yieldable(blocking_function()) + d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) @@ -135,7 +150,7 @@ class LoggingContextTestCase(unittest.TestCase): with LoggingContext() as context_one: context_one.request = "one" - d1 = logcontext.make_deferred_yieldable(_chained_deferred_function()) + d1 = make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) @@ -152,7 +167,7 @@ class LoggingContextTestCase(unittest.TestCase): with LoggingContext() as context_one: context_one.request = "one" - d1 = logcontext.make_deferred_yieldable("bum") + d1 = make_deferred_yieldable("bum") self._check_test_key("one") r = yield d1 @@ -161,7 +176,7 @@ class LoggingContextTestCase(unittest.TestCase): def test_nested_logging_context(self): with LoggingContext(request="foo"): - nested_context = logcontext.nested_logging_context(suffix="bar") + nested_context = nested_logging_context(suffix="bar") self.assertEqual(nested_context.request, "foo-bar") diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py index 297aebbfbeeffc8e8d4d820da15d83cedb2ec65f..0fb60caacb1aed69d48d1bf1e0910bc035c22731 100644 --- a/tests/util/test_logformatter.py +++ b/tests/util/test_logformatter.py @@ -14,7 +14,7 @@ # limitations under the License. import sys -from synapse.util.logformatter import LogFormatter +from synapse.logging.formatter import LogFormatter from tests import unittest diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1aee91d537c7723f30da6eb42e5ca43ce0fd8b --- /dev/null +++ b/tests/util/test_ratelimitutils.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.config.homeserver import HomeServerConfig +from synapse.util.ratelimitutils import FederationRateLimiter + +from tests.server import get_clock +from tests.unittest import TestCase +from tests.utils import default_config + + +class FederationRateLimiterTestCase(TestCase): + def test_ratelimit(self): + """A simple test with the default values""" + reactor, clock = get_clock() + rc_config = build_rc_config() + ratelimiter = FederationRateLimiter(clock, rc_config) + + with ratelimiter.ratelimit("testhost") as d1: + # shouldn't block + self.successResultOf(d1) + + def test_concurrent_limit(self): + """Test what happens when we hit the concurrent limit""" + reactor, clock = get_clock() + rc_config = build_rc_config({"rc_federation": {"concurrent": 2}}) + ratelimiter = FederationRateLimiter(clock, rc_config) + + with ratelimiter.ratelimit("testhost") as d1: + # shouldn't block + self.successResultOf(d1) + + cm2 = ratelimiter.ratelimit("testhost") + d2 = cm2.__enter__() + # also shouldn't block + self.successResultOf(d2) + + cm3 = ratelimiter.ratelimit("testhost") + d3 = cm3.__enter__() + # this one should block, though ... + self.assertNoResult(d3) + + # ... until we complete an earlier request + cm2.__exit__(None, None, None) + self.successResultOf(d3) + + def test_sleep_limit(self): + """Test what happens when we hit the sleep limit""" + reactor, clock = get_clock() + rc_config = build_rc_config( + {"rc_federation": {"sleep_limit": 2, "sleep_delay": 500}} + ) + ratelimiter = FederationRateLimiter(clock, rc_config) + + with ratelimiter.ratelimit("testhost") as d1: + # shouldn't block + self.successResultOf(d1) + + with ratelimiter.ratelimit("testhost") as d2: + # nor this + self.successResultOf(d2) + + with ratelimiter.ratelimit("testhost") as d3: + # this one should block, though ... + self.assertNoResult(d3) + sleep_time = _await_resolution(reactor, d3) + self.assertAlmostEqual(sleep_time, 500, places=3) + + +def _await_resolution(reactor, d): + """advance the clock until the deferred completes. + + Returns the number of milliseconds it took to complete. + """ + start_time = reactor.seconds() + while not d.called: + reactor.advance(0.01) + return (reactor.seconds() - start_time) * 1000 + + +def build_rc_config(settings={}): + config_dict = default_config("test") + config_dict.update(settings) + config = HomeServerConfig() + config.parse_config_dict(config_dict, "", "") + return config.rc_federation diff --git a/tests/utils.py b/tests/utils.py index da43166f3a0bbdb6c32c226bc1c22eec3f9d2180..8a94ce0b475d65fb3225c53963b4419e9c786944 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -34,6 +34,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.federation.transport import server as federation_server from synapse.http.server import HttpServer +from synapse.logging.context import LoggingContext from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import PostgresEngine, create_engine @@ -42,7 +43,6 @@ from synapse.storage.prepare_database import ( _setup_new_database, prepare_database, ) -from synapse.util.logcontext import LoggingContext from synapse.util.ratelimitutils import FederationRateLimiter # set this to True to run the tests against postgres instead of sqlite. @@ -152,12 +152,6 @@ def default_config(name, parse=False): "mau_stats_only": False, "mau_limits_reserved_threepids": [], "admin_contact": None, - "rc_federation": { - "reject_limit": 10, - "sleep_limit": 10, - "sleep_delay": 10, - "concurrent": 10, - }, "rc_message": {"per_second": 10000, "burst_count": 10000}, "rc_registration": {"per_second": 10000, "burst_count": 10000}, "rc_login": {