diff --git a/.flake8 b/.flake8 index ed8440b87..e758ffec4 100644 --- a/.flake8 +++ b/.flake8 @@ -1,16 +1,9 @@ [flake8] -ignore = W503, C901, ANN101 max-line-length = 88 exclude = cookiecutter per-file-ignores = - # Don't require docstrings or type annotations in tests - # tests/*:D100,D102,D103,DAR,ANN - # Don't require docstrings conventions or type annotations in SDK samples - # samples/*:ANN,DAR - # Don't require docstrings conventions or type annotations in private modules - singer_sdk/helpers/_*.py:ANN,DAR,D105 - # Don't require docstrings conventions in "meta" code - # singer_sdk/helpers/_classproperty.py:D105 -max-complexity = 10 + # Don't require docstrings conventions in private modules + singer_sdk/helpers/_*.py:DAR + # Disabled some checks in samples code + samples/*:DAR docstring-convention = google -allow-star-arg-any = true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 82a178db9..36ea5cce3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,17 +4,17 @@ # nested in that directory, on any level # Default owners -* @edgarrmondragon @cjohnhanson @aaronsteers +* @edgarrmondragon @kgpayne # CI/CD /.github/workflows/ @edgarrmondragon @meltano/engineering # Docs (General) /docs/ @meltano/engineering @meltano/marketing -/README.md @afolson @tayloramurphy @meltano/engineering @meltano/marketing +/README.md @tayloramurphy @meltano/engineering @meltano/marketing # Docs (Contributing) -/docs/CONTRIBUTING.md @afolson @tayloramurphy @meltano/engineering +/docs/CONTRIBUTING.md @tayloramurphy @meltano/engineering # Release Ops (see `/.pyproject.toml` for list of bumped files) /cookiecutter/*/*/pyproject.toml @meltano/engineering diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 029af69f6..808c357b6 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -1,6 +1,6 @@ name: Bug Report description: File a bug report -title: "[Bug]: " +title: "bug: <title>" labels: ["kind/Bug", "valuestream/SDK"] assignees: - meltano/engineering @@ -15,9 +15,16 @@ body: attributes: label: Singer SDK Version description: Version of the library you are using - placeholder: "0.8.0" + placeholder: "0.31.1" validations: required: true + - type: checkboxes + id: regression + attributes: + label: Is this a regression? + description: Meaning this is something that previously worked correctly + options: + - label: "Yes" - type: dropdown id: python_version attributes: @@ -38,10 +45,12 @@ body: label: Bug scope description: Functionality this bug affects options: - - Taps (catalog, state, stream maps, etc.) + - Taps (catalog, state, etc.) + - Mapping (stream maps, flattening, etc.) - Targets (data type handling, batching, SQL object generation, etc.) - Configuration (settings parsing, validation, etc.) - CLI (options, error messages, logging, etc.) + - Cookiecutter templates - Other validations: required: true diff --git a/.github/ISSUE_TEMPLATE/docs.yml b/.github/ISSUE_TEMPLATE/docs.yml new file mode 100644 index 000000000..ca0d9c673 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/docs.yml @@ -0,0 +1,30 @@ +name: Documentation change +description: Request a documentation change +title: "docs: <title>" +labels: ["Documentation", "valuestream/SDK"] + +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this documentation request! + - type: dropdown + id: scope + attributes: + label: Documentation type + description: What kind of documentation change are you requesting? + options: + - Tutorials + - How-to guides + - Reference + - Explanation + validations: + required: true + - type: textarea + id: what-you-want + attributes: + label: Description + description: Describe what you want to see in the documentation + placeholder: "I was trying to do X, but the documentation didn't tell me how to do it, or it was unclear." + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml index f85d74efc..a5afd5974 100644 --- a/.github/ISSUE_TEMPLATE/feature.yml +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -1,6 +1,6 @@ name: Feature request description: Request a new feature -title: "[Feature]: <title>" +title: "feat: <title>" labels: ["kind/Feature", "valuestream/SDK"] assignees: - meltano/engineering @@ -16,10 +16,11 @@ body: label: Feature scope description: Functionality this new feature would impact options: - - Taps (catalog, state, stream maps, etc.) - - Targets (data type handling, batching, SQL object generation, etc.) + - Taps (catalog, state, stream maps, tests, etc.) + - Targets (data type handling, batching, SQL object generation, tests, etc.) - Configuration (settings parsing, validation, etc.) - CLI (options, error messages, logging, etc.) + - Cookiecutter templates - Other validations: required: true diff --git a/.github/semantic.yml b/.github/semantic.yml index 0d5b086ed..6ddb29231 100644 --- a/.github/semantic.yml +++ b/.github/semantic.yml @@ -4,7 +4,7 @@ titleOnly: true # Provides a custom URL for the "Details" link, which appears next to the success/failure message from the app: -targetUrl: https://github.com/meltano/sdk/blob/main/CONTRIBUTING.md#semantic-pull-requests +targetUrl: https://sdk.meltano.com/en/latest/CONTRIBUTING.html#semantic-pull-requests # The values allowed for the "type" part of the PR title/commit message. # e.g. for a PR title/commit message of "feat: add some stuff", the type would be "feat" diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 000000000..6263a6249 --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,7 @@ +# This config file extends the shared Meltano GitHub org stale bot config: +# https://github.com/meltano/.github/blob/main/.github/stale.yml + +_extends: .github + +# In most cases, this file should not be updated. +# Updates to the stale bot config should be shared by all Meltano GitHub repositories. diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ac07d7f75..5041f8c59 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4.0.0 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/constraints.txt b/.github/workflows/constraints.txt index 51fc53507..c214735cb 100644 --- a/.github/workflows/constraints.txt +++ b/.github/workflows/constraints.txt @@ -1,5 +1,5 @@ -pip==22.2.2 -poetry==1.1.14 -virtualenv==20.16.3 -nox==2022.8.7 -nox-poetry==1.0.1 +pip==23.2.1 +poetry==1.6.1 +pre-commit==3.4.0 +nox==2023.4.22 +nox-poetry==1.0.3 diff --git a/.github/workflows/cookiecutter-e2e.yml b/.github/workflows/cookiecutter-e2e.yml new file mode 100644 index 000000000..7f8e151fb --- /dev/null +++ b/.github/workflows/cookiecutter-e2e.yml @@ -0,0 +1,83 @@ +name: E2E Cookiecutters + +on: + pull_request: + types: [opened, synchronize, reopened] + paths: ["cookiecutter/**", "e2e-tests/cookiecutters/**"] + push: + branches: [main] + paths: ["cookiecutter/**", "e2e-tests/cookiecutters/**"] + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + FORCE_COLOR: "1" + +jobs: + lint: + name: Cookiecutter E2E ${{ matrix.python-version }} ${{ matrix.python-version }} / ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: true + matrix: + include: + - { python-version: "3.10", os: "ubuntu-latest" } + + steps: + - name: Check out the repository + uses: actions/checkout@v4.0.0 + + - name: Upgrade pip + env: + PIP_CONSTRAINT: .github/workflows/constraints.txt + run: | + pip install pip + pip --version + + - name: Install Poetry + run: | + pipx install poetry + poetry --version + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v4.7.0 + with: + python-version: ${{ matrix.python-version }} + architecture: x64 + cache: 'pip' + cache-dependency-path: 'poetry.lock' + + - name: Install pre-commit + run: | + pipx install pre-commit + pre-commit --version + + - name: Install Nox + env: + PIP_CONSTRAINT: .github/workflows/constraints.txt + run: | + pipx install --pip-args=--constraint=.github/workflows/constraints.txt nox + pipx inject --pip-args=--constraint=.github/workflows/constraints.txt nox nox-poetry + nox --version + + - name: Run Nox + run: | + nox --python=${{ matrix.python-version }} --session=test_cookiecutter + + - name: Upload build artifacts + if: always() + uses: actions/upload-artifact@v3 + with: + path: | + /tmp/tap-* + /tmp/target-* + /tmp/mapper-* + !/tmp/tap-*/.mypy_cache/ + !/tmp/target-*/.mypy_cache/ + !/tmp/mapper-*/.mypy_cache/ + !/tmp/tap-*/.tox/ + !/tmp/target-*/.tox/ + !/tmp/mapper-*/.tox/ diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index cbf0e9b08..3eb629246 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -16,12 +16,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the repository - uses: actions/checkout@v3.0.2 + uses: actions/checkout@v4.0.0 - name: GitHub dependency vulnerability check if: ${{ github.event_name == 'pull_request_target' }} - # Use this fork until https://github.com/actions/dependency-review-action/pull/165 is merged - uses: WillDaSilva/dependency-review-action@main + uses: actions/dependency-review-action@v3.0.8 + with: + fail-on-severity: high - name: FOSSA dependency license check run: | diff --git a/.github/workflows/pr-preview-links.yml b/.github/workflows/pr-preview-links.yml new file mode 100644 index 000000000..5a7e0d326 --- /dev/null +++ b/.github/workflows/pr-preview-links.yml @@ -0,0 +1,17 @@ +name: Read the Docs Pull Request Preview + +on: + pull_request_target: + types: + - opened + +permissions: + pull-requests: write + +jobs: + pr-preview-links: + runs-on: ubuntu-latest + steps: + - uses: readthedocs/actions/preview@v1 + with: + project-slug: "meltano-sdk" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0d84d5c17..4ba629239 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,28 +4,37 @@ on: release: types: [published] +permissions: + contents: write # Needed to upload artifacts to the release + id-token: write # Needed for OIDC PyPI publishing + jobs: release: name: Publish to PyPI runs-on: ubuntu-latest + environment: publishing steps: - name: Checkout code - uses: actions/checkout@v3.0.2 + uses: actions/checkout@v4.0.0 - name: Set up Python - uses: actions/setup-python@v4.2.0 + uses: actions/setup-python@v4.7.0 with: python-version: "3.10" - name: Upgrade pip + env: + PIP_CONSTRAINT: .github/workflows/constraints.txt run: | - pip install --constraint=.github/workflows/constraints.txt pip + pip install pip pip --version - name: Install Poetry + env: + PIP_CONSTRAINT: .github/workflows/constraints.txt run: | - pipx install --pip-args=--constraint=.github/workflows/constraints.txt poetry + pipx install poetry poetry --version - name: Check version @@ -47,7 +56,4 @@ jobs: file_glob: true - name: Publish - uses: pypa/gh-action-pypi-publish@v1.5.1 - with: - user: __token__ - password: ${{ secrets.PYPI_SECRET_TOKEN }} + uses: pypa/gh-action-pypi-publish@v1.8.10 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f07aab031..4ae50fadf 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -3,42 +3,59 @@ name: Test on: pull_request: types: [opened, synchronize, reopened] + paths: + - "cookiecutter/**" + - "samples/**" + - "singer_sdk/**" + - "tests/**" + - "noxfile.py" + - "poetry.lock" + - "pyproject.toml" + - ".github/workflows/test.yml" + - ".github/workflows/constraints.txt" push: branches: [main] + paths: + - "cookiecutter/**" + - "samples/**" + - "singer_sdk/**" + - "tests/**" + - "noxfile.py" + - "poetry.lock" + - "pyproject.toml" + - ".github/workflows/test.yml" + - ".github/workflows/constraints.txt" workflow_dispatch: inputs: {} +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + env: FORCE_COLOR: "1" jobs: tests: - name: Test on ${{ matrix.python-version }} (${{ matrix.session }}) / ${{ matrix.os }} + name: "Test on ${{ matrix.python-version }} (${{ matrix.session }}) / ${{ matrix.os }} / SQLAlchemy: ${{ matrix.sqlalchemy }}" runs-on: ${{ matrix.os }} env: NOXSESSION: ${{ matrix.session }} strategy: fail-fast: false matrix: + session: [tests] + os: ["ubuntu-latest", "macos-latest", "windows-latest"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + sqlalchemy: ["2.*"] include: - - { session: tests, python-version: "3.7", os: "ubuntu-latest" } - - { session: tests, python-version: "3.8", os: "ubuntu-latest" } - - { session: tests, python-version: "3.9", os: "ubuntu-latest" } - - { session: tests, python-version: "3.10", os: "ubuntu-latest" } - - { session: tests, python-version: "3.7", os: "macos-latest" } - - { session: tests, python-version: "3.8", os: "macos-latest" } - - { session: tests, python-version: "3.9", os: "macos-latest" } - - { session: tests, python-version: "3.10", os: "macos-latest" } - - { session: tests, python-version: "3.7", os: "windows-latest" } - - { session: tests, python-version: "3.8", os: "windows-latest" } - - { session: tests, python-version: "3.9", os: "windows-latest" } - - { session: tests, python-version: "3.10", os: "windows-latest" } - - { session: doctest, python-version: "3.10", os: "ubuntu-latest" } - - { session: mypy, python-version: "3.8", os: "ubuntu-latest" } + - { session: tests, python-version: "3.11", os: "ubuntu-latest", sqlalchemy: "1.*" } + - { session: doctest, python-version: "3.10", os: "ubuntu-latest", sqlalchemy: "2.*" } + - { session: mypy, python-version: "3.8", os: "ubuntu-latest", sqlalchemy: "2.*" } steps: - name: Check out the repository - uses: actions/checkout@v3.0.2 + uses: actions/checkout@v4.0.0 - name: Install Poetry env: @@ -48,7 +65,7 @@ jobs: poetry --version - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v4.2.0 + uses: actions/setup-python@v4.7.0 with: python-version: ${{ matrix.python-version }} architecture: x64 @@ -71,12 +88,14 @@ jobs: nox --version - name: Run Nox + env: + SQLALCHEMY_VERSION: ${{ matrix.sqlalchemy }} run: | nox --python=${{ matrix.python-version }} - name: Upload coverage data if: always() && (matrix.session == 'tests') - uses: actions/upload-artifact@v3.1.0 + uses: actions/upload-artifact@v3.1.2 with: name: coverage-data path: ".coverage.*" @@ -96,7 +115,7 @@ jobs: steps: - name: Check out the repository - uses: actions/checkout@v3.0.2 + uses: actions/checkout@v4.0.0 - name: Install Poetry env: @@ -106,7 +125,7 @@ jobs: poetry --version - name: Setup Python 3.10 - uses: actions/setup-python@v4.2.0 + uses: actions/setup-python@v4.7.0 with: python-version: '3.10' architecture: x64 @@ -138,7 +157,7 @@ jobs: needs: tests steps: - name: Check out the repository - uses: actions/checkout@v3.0.2 + uses: actions/checkout@v4.0.0 - name: Install Poetry run: | @@ -146,7 +165,7 @@ jobs: poetry --version - name: Set up Python - uses: actions/setup-python@v4.2.0 + uses: actions/setup-python@v4.7.0 with: python-version: '3.10' cache: 'pip' @@ -158,7 +177,7 @@ jobs: pip --version - name: Download coverage data - uses: actions/download-artifact@v3.0.0 + uses: actions/download-artifact@v3.0.2 with: name: coverage-data @@ -179,4 +198,7 @@ jobs: nox --session=coverage -- xml - name: Upload coverage report - uses: codecov/codecov-action@v3.1.0 + uses: codecov/codecov-action@v3.1.4 + with: + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/version_bump.yml b/.github/workflows/version_bump.yml index d8dbe6034..cd0c60d86 100644 --- a/.github/workflows/version_bump.yml +++ b/.github/workflows/version_bump.yml @@ -35,26 +35,27 @@ jobs: pull-requests: write # to create and update PRs steps: - - uses: actions/checkout@v3.0.2 + - uses: actions/checkout@v4.0.0 with: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v4.2.0 + uses: actions/setup-python@v4.7.0 with: python-version: "3.10" architecture: x64 - name: Bump version id: cz-bump - uses: commitizen-tools/commitizen-action@0.14.1 + uses: commitizen-tools/commitizen-action@0.18.2 with: increment: ${{ github.event.inputs.bump != 'auto' && github.event.inputs.bump || '' }} prerelease: ${{ github.event.inputs.prerelease != 'none' && github.event.inputs.prerelease || '' }} commit: "false" push: "false" changelog: "true" - github_token: ${{ secrets.GITHUB_TOKEN }} + github_token: ${{ secrets.MELTYBOT_GITHUB_AUTH_TOKEN }} + extra_requirements: 'git+https://github.com/meltano/commitizen-version-bump@main' changelog_increment_filename: _changelog_fragment.md - name: Draft Release @@ -68,8 +69,12 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Set repo file permissions + run: | + sudo chown -R $USER:$USER .git/objects + - name: Create Pull Request - uses: peter-evans/create-pull-request@v4 + uses: peter-evans/create-pull-request@v5 id: create-pull-request with: commit-message: "chore: Bump package version" @@ -88,4 +93,3 @@ jobs: base: main labels: release assignees: "${{ github.actor }}" - token: ${{ secrets.MELTYBOT_GITHUB_AUTH_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cec1a56a4..790d0e39e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,11 @@ ci: - autofix_prs: false + autofix_prs: true autoupdate_schedule: weekly autoupdate_commit_msg: 'chore: pre-commit autoupdate' repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: check-json - id: check-toml @@ -16,10 +16,17 @@ repos: - id: check-yaml exclude: | (?x)^( - cookiecutter/.*/meltano.yml + cookiecutter/.*/meltano.yml| + cookiecutter/.*/.pre-commit-config.yaml )$ - id: end-of-file-fixer - exclude: (cookiecutter/.*|docs/.*|samples/.*\.json) + exclude: | + (?x)^( + cookiecutter/.*| + docs/.*| + samples/.*\.json| + tests/snapshots/.* + )$ - id: trailing-whitespace exclude: | (?x)^( @@ -28,8 +35,25 @@ repos: tests/core/test_simpleeval.py )$ +- repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.26.3 + hooks: + - id: check-dependabot + - id: check-github-workflows + - id: check-readthedocs + +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.287 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + exclude: | + (?x)^( + cookiecutter/.* + )$ + - repo: https://github.com/psf/black - rev: 22.6.0 + rev: 23.7.0 hooks: - id: black exclude: | @@ -39,29 +63,19 @@ repos: tests/core/test_simpleeval.py )$ -- repo: https://github.com/pycqa/isort - rev: 5.10.1 - hooks: - - id: isort - exclude: (cookiecutter/.*|singer_sdk/helpers/_simpleeval/.*) - - repo: https://github.com/pycqa/flake8 - rev: 5.0.4 + rev: 6.1.0 hooks: - id: flake8 additional_dependencies: - darglint==1.8.1 - - flake8-annotations==2.9.0 - - flake8-docstrings==1.6.0 - files: 'singer_sdk/.*' - -- repo: https://github.com/asottile/pyupgrade - rev: v2.37.3 - hooks: - - id: pyupgrade - args: [--py37-plus] - exclude: | + files: | (?x)^( - singer_sdk/helpers/_simpleeval.py| - tests/core/test_simpleeval.py + singer_sdk/.*| + samples/.* )$ + +- repo: https://github.com/python-poetry/poetry + rev: 1.6.0 + hooks: + - id: poetry-check diff --git a/.readthedocs.yml b/.readthedocs.yml index af8749fc5..decc69828 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,12 +1,16 @@ version: 2 +build: + os: ubuntu-22.04 + tools: + python: "3.11" + sphinx: builder: html configuration: docs/conf.py fail_on_warning: true python: - version: 3.8 install: # - requirements: docs/requirements.txt - method: pip diff --git a/CHANGELOG.md b/CHANGELOG.md index 83be5fa39..62a31673c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,452 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## v0.31.1 (2023-08-17) +### ✨ New + +- [#1905](https://github.com/meltano/sdk/issues/1905) Add email field and use human-readable questions in templates + +### 🐛 Fixes + +- [#1913](https://github.com/meltano/sdk/issues/1913) Fix tap tests for multiple test classes with different input catalogs + +## v0.31.0 (2023-08-07) + +### ✨ New + +- [#1892](https://github.com/meltano/sdk/issues/1892) Add a mapper cookiecutter template +- [#1864](https://github.com/meltano/sdk/issues/1864) SQLTarget connector instance shared with sinks -- _**Thanks @BuzzCutNorman!**_ +- [#1878](https://github.com/meltano/sdk/issues/1878) Add `_sdc_sync_started_at` metadata column to indicate the start of the target process +- [#1484](https://github.com/meltano/sdk/issues/1484) Bump latest supported sqlalchemy from `1.*` to `2.*` + +### 🐛 Fixes + +- [#1898](https://github.com/meltano/sdk/issues/1898) Correctly serialize `decimal.Decimal` in JSON fields of SQL targets +- [#1881](https://github.com/meltano/sdk/issues/1881) Expose `add_record_metadata` as a builtin target setting +- [#1880](https://github.com/meltano/sdk/issues/1880) Append batch config if target supports the batch capability +- [#1865](https://github.com/meltano/sdk/issues/1865) Handle missing record properties in SQL sinks +- [#1838](https://github.com/meltano/sdk/issues/1838) Add deprecation warning when importing legacy testing helpers +- [#1842](https://github.com/meltano/sdk/issues/1842) Ensure all expected tap parameters are passed to `SQLTap` initializer +- [#1853](https://github.com/meltano/sdk/issues/1853) Check against the unconformed key properties when validating record keys +- [#1843](https://github.com/meltano/sdk/issues/1843) Target template should not reference `tap_id` +- [#1708](https://github.com/meltano/sdk/issues/1708) Finalize and write last state message with dedupe +- [#1835](https://github.com/meltano/sdk/issues/1835) Avoid setting up mapper in discovery mode + +### ⚙️ Under the Hood + +- [#1877](https://github.com/meltano/sdk/issues/1877) Use `importlib.resources` instead of `__file__` to retrieve sample Singer output files + +### 📚 Documentation Improvements + +- [#1852](https://github.com/meltano/sdk/issues/1852) Fix stale `pip_url` example that uses shell script workaround for editable installation + +## v0.30.0 (2023-07-10) + +### ✨ New + +- [#1815](https://github.com/meltano/sdk/issues/1815) Support optional headers for OAuth request -- _**Thanks @s7clarke10!**_ +- [#1800](https://github.com/meltano/sdk/issues/1800) Publish supported python versions in `--about` + +### 🐛 Fixes + +- [#1829](https://github.com/meltano/sdk/issues/1829) Update cookiecutter copyright assignment to cookiecutter user -- _**Thanks @riordan!**_ +- [#1826](https://github.com/meltano/sdk/issues/1826) Serialization of `decimal.Decimal` +- [#1827](https://github.com/meltano/sdk/issues/1827) Add explicit dependency on `packaging` library +- [#1820](https://github.com/meltano/sdk/issues/1820) Include SCHEMA message count in target logs + +### 📚 Documentation Improvements + +- [#1824](https://github.com/meltano/sdk/issues/1824) Document `RESTStream.rest_method` +- [#1818](https://github.com/meltano/sdk/issues/1818) Update testing.md + +## v0.29.0 (2023-07-06) + +### ✨ New + +- [#1769](https://github.com/meltano/sdk/issues/1769) Validate parsed/transformed record against schema message +- [#1525](https://github.com/meltano/sdk/issues/1525) Support union schemas + +### 🐛 Fixes + +- [#1809](https://github.com/meltano/sdk/issues/1809) Deserialize floats as `decimal.Decimal` +- [#1770](https://github.com/meltano/sdk/issues/1770) Check schema has arrived before record +- [#1796](https://github.com/meltano/sdk/issues/1796) Create batch directory if missing +- [#1688](https://github.com/meltano/sdk/issues/1688) Incremental where clause generation from triggering TypeError -- _**Thanks @BuzzCutNorman!**_ +- [#1778](https://github.com/meltano/sdk/issues/1778) Sink schema comparison before adding metadata columns +- [#1698](https://github.com/meltano/sdk/issues/1698) Force stream selection in tests +- [#1775](https://github.com/meltano/sdk/issues/1775) Add tests for SQL type conversion from JSON schemas +- [#1771](https://github.com/meltano/sdk/issues/1771) Add descriptions for `batch_config` properties +- [#1752](https://github.com/meltano/sdk/issues/1752) Change runner scope to function for target tests +- [#1753](https://github.com/meltano/sdk/issues/1753) Always emit a STATE message at the start of the sync process + +### ⚙️ Under the Hood + +- [#1745](https://github.com/meltano/sdk/issues/1745) Change `SQLStream.schema` into a cached property -- _**Thanks @mjsqu!**_ + +### 📚 Documentation Improvements + +- [#1756](https://github.com/meltano/sdk/issues/1756) Fix invalid JSON in Stream Maps page and add `meltano.yml` tabs -- _**Thanks @mjsqu!**_ +- [#1763](https://github.com/meltano/sdk/issues/1763) Add Cloud banner + +## v0.28.0 (2023-06-05) + +### ✨ New + +- [#1728](https://github.com/meltano/sdk/issues/1728) Add an optional Dependabot file to projects generated from templates +- [#1572](https://github.com/meltano/sdk/issues/1572) Add `batch_config` handling in `append_builtin_config()` -- _**Thanks @aaronsteers!**_ +- [#1686](https://github.com/meltano/sdk/issues/1686) Log stream errors +- [#1711](https://github.com/meltano/sdk/issues/1711) Validate records against stream schema in standard tap tests +- [#1709](https://github.com/meltano/sdk/issues/1709) Add a default Apache 2.0 license to tap and target templates + +### 🐛 Fixes + +- [#1742](https://github.com/meltano/sdk/issues/1742) Recommend `meltano run` in target cookiecutter README + +### ⚙️ Under the Hood + +- [#936](https://github.com/meltano/sdk/issues/936) Use inheritance to construct plugin CLI + +### 📚 Documentation Improvements + +- [#1721](https://github.com/meltano/sdk/issues/1721) Remove unsupported `previous_token` from HATEOAS example +- [#1703](https://github.com/meltano/sdk/issues/1703) Fix broken docs link for `record_metadata` page -- _**Thanks @menzenski!**_ + +## v0.27.0 (2023-05-11) + +### ✨ New + +- [#1681](https://github.com/meltano/sdk/issues/1681) Allow SQL tap developers to leverage `post_process` -- _**Thanks @BuzzCutNorman!**_ +- [#1672](https://github.com/meltano/sdk/issues/1672) Support deselecting streams by default +- [#1648](https://github.com/meltano/sdk/issues/1648) Use Ruff to lint projects generated with Cookiecutter templates + +### 🐛 Fixes + +- [#1680](https://github.com/meltano/sdk/issues/1680) Pin `urllib3` to `<2` to avoid incompatibility issues with botocore +- [#1646](https://github.com/meltano/sdk/issues/1646) Use `get_new_paginator` in REST tap cookiecutter template + +### ⚙️ Under the Hood + +- [#1668](https://github.com/meltano/sdk/issues/1668) Break out default batch file writer into a separate class + +### 📚 Documentation Improvements + +- [#1685](https://github.com/meltano/sdk/issues/1685) Add PyCharm debugging tips to docs +- [#1673](https://github.com/meltano/sdk/issues/1673) Fix docs build by specifying OS in RTD config file + +## v0.26.0 (2023-05-02) + +### ✨ New + +- [#1623](https://github.com/meltano/sdk/issues/1623) Explicitly support URL params in string form + +## v0.25.0 (2023-04-25) + +### ✨ New + +- [#1603](https://github.com/meltano/sdk/issues/1603) Allow `allowed_values` and `examples` in any JSON schema type constructor + +### ⚙️ Under the Hood + +- [#1610](https://github.com/meltano/sdk/issues/1610) Consolidate config parsing for all plugin base classes + +## v0.24.0 (2023-04-12) + +### ✨ New + +- [#1601](https://github.com/meltano/sdk/issues/1601) Allow skipping child streams by returning an empty child context from parent stream +- [#1581](https://github.com/meltano/sdk/issues/1581) Add `pattern`, `contentMediaType`, and `contentEncoding` to Schema data class -- _**Thanks @BuzzCutNorman!**_ + +### 🐛 Fixes + +- [#1587](https://github.com/meltano/sdk/issues/1587) Update cookiecutter tests path + +### ⚙️ Under the Hood + +- [#1570](https://github.com/meltano/sdk/issues/1570) Move "about" formatting logic into dedicated classes + +## v0.23.0 (2023-04-04) + +### ✨ New + +- [#1563](https://github.com/meltano/sdk/issues/1563) Migrate shell scripts for cookiecutter e2e tests to Nox -- _**Thanks @mkranna!**_ + +### 🐛 Fixes + +- [#1574](https://github.com/meltano/sdk/issues/1574) Conform metric field `type` to Singer spec +- [#1436](https://github.com/meltano/sdk/issues/1436) Handle sync abort, reduce duplicate `STATE` messages, rename `_MAX_RECORD_LIMIT` as `ABORT_AT_RECORD_COUNT` + +## v0.22.1 (2023-03-28) + +### 🐛 Fixes + +- [#1172](https://github.com/meltano/sdk/issues/1172) Handle merging of SQL types when character column lengths are less than the max -- _**Thanks @BuzzCutNorman!**_ +- [#1524](https://github.com/meltano/sdk/issues/1524) Preserve `__alias__` when mapping streams with repeated schema messages -- _**Thanks @DanilJr!**_ +- [#1526](https://github.com/meltano/sdk/issues/1526) Handle missing `type` value when checking JSON schema types + +### 📚 Documentation Improvements + +- [#1553](https://github.com/meltano/sdk/issues/1553) Change link color from pink to blue +- [#1544](https://github.com/meltano/sdk/issues/1544) Update branding colors in docs site +- [#1518](https://github.com/meltano/sdk/issues/1518) Fix HATEOAS pagination example + +## v0.22.0 (2023-03-14) + +### ✨ New + +- [#1478](https://github.com/meltano/sdk/issues/1478) Retry some streaming and decoding request errors -- _**Thanks @visch!**_ +- [#1480](https://github.com/meltano/sdk/issues/1480) Added `RESTStream.backoff_jitter` to support custom backoff jitter generators -- _**Thanks @visch!**_ +- [#1438](https://github.com/meltano/sdk/issues/1438) Cookiecutter target tox ini -- _**Thanks @mkranna!**_ + +### 🐛 Fixes + +- [#1467](https://github.com/meltano/sdk/issues/1467) Move `pyarrow` and `viztracer` extras to main dependencies +- [#1487](https://github.com/meltano/sdk/issues/1487) Address SQLAlchemy 2.0 deprecation warnings +- [#1482](https://github.com/meltano/sdk/issues/1482) Use pipx to run tox in CI template +- [#1454](https://github.com/meltano/sdk/issues/1454) Cookiecutter bearer auth config -- _**Thanks @radbrt!**_ +- [#1434](https://github.com/meltano/sdk/issues/1434) Tap template: fix style and docstrings, and add test cases for SQL and "Other" sources -- _**Thanks @flexponsive!**_ + +### 📚 Documentation Improvements + +- [#1492](https://github.com/meltano/sdk/issues/1492) Fix imports in pagination guide +- [#1446](https://github.com/meltano/sdk/issues/1446) Property conformance doc typo fix -- _**Thanks @radbrt!**_ + +## v0.21.0 (2023-02-21) + +### 🐛 Fixes + +- [#1410](https://github.com/meltano/sdk/issues/1410) Tap template: fix style, types and imports; and also catch more errors by building from replay files in CI -- _**Thanks @flexponsive!**_ +- [#1428](https://github.com/meltano/sdk/issues/1428) Tap template: cover all REST authentication cases, and one GraphQL case -- _**Thanks @flexponsive!**_ + +## v0.20.0 (2023-02-13) + +### ✨ New + +- [#1365](https://github.com/meltano/sdk/issues/1365) Add `strptime_to_utc` and `strftime` functions to `_singerlib.utils` -- _**Thanks @menzenski!**_ +- [#1394](https://github.com/meltano/sdk/issues/1394) Refactor SQLConnector connection handling -- _**Thanks @qbatten!**_ +- [#1241](https://github.com/meltano/sdk/issues/1241) Support declaring variant for use in package name +- [#1109](https://github.com/meltano/sdk/issues/1109) Support `requests.auth` authenticators + +### 🐛 Fixes + +- [#1380](https://github.com/meltano/sdk/issues/1380) Move tests in cookiecutters to project root to support `pytest_plugins` +- [#1406](https://github.com/meltano/sdk/issues/1406) Use a version of `isort` compatible with Python 3.8 +- [#1385](https://github.com/meltano/sdk/issues/1385) SQL Targets ignore collation when evaluating column data types -- _**Thanks @BuzzCutNorman!**_ +- [#1342](https://github.com/meltano/sdk/issues/1342) Remove SQLSink snakecase conform in favor of simpler transformations +- [#1364](https://github.com/meltano/sdk/issues/1364) TapDiscoveryTest remove catalog if one is passed + +### 📚 Documentation Improvements + +- [#1390](https://github.com/meltano/sdk/issues/1390) Add incremental replication example -- _**Thanks @flexponsive!**_ + +## v0.19.0 (2023-01-30) + +### ✨ New + +- [#1171](https://github.com/meltano/sdk/issues/1171) Improve included tap and target tests in `singer_sdk.testing` + +### 🐛 Fixes + +- [#1345](https://github.com/meltano/sdk/issues/1345) Remove tox dependency from tap/target template + +### 📚 Documentation Improvements + +- [#1358](https://github.com/meltano/sdk/issues/1358) Fix typo in `if __name__ == ` example + +## v0.18.0 (2023-01-23) + +### ✨ New + +- [#1283](https://github.com/meltano/sdk/issues/1283) Automatic catalog selection of replication keys + +### 📚 Documentation Improvements + +- [#1335](https://github.com/meltano/sdk/issues/1335) Stream maps example for adding property with hardcoded string value + +## v0.17.0 (2023-01-06) + +### 🐛 Fixes + +- [#1308](https://github.com/meltano/sdk/issues/1308) Replace hyphens with underscores when generating expected env var name `<PLUGIN_NAME>_LOGLEVEL` -- _**Thanks @adherr!**_ +- [#887](https://github.com/meltano/sdk/issues/887) Make `conform_record_data_types` work on nested objects and arrays -- _**Thanks @Jack-Burnett!**_ +- [#1287](https://github.com/meltano/sdk/issues/1287) Targets to fail gracefully when schema message is missing the `properties` key -- _**Thanks @visch!**_ + +### 📚 Documentation Improvements + +- [#1293](https://github.com/meltano/sdk/issues/1293) Add link to the [EDK](https://edk.meltano.com) + +## v0.16.0 (2022-12-19) + +### ✨ New + +- [#1262](https://github.com/meltano/sdk/issues/1262) Support string `"__NULL__"` whereever null values are allowed in stream maps configuration + +### 🐛 Fixes + +- [#1281](https://github.com/meltano/sdk/issues/1281) Apply Version bump commit file perms with sudo +- [#1280](https://github.com/meltano/sdk/issues/1280) Set repo file perms after checkout in Version bump workflow +- [#1214](https://github.com/meltano/sdk/issues/1214) Avoid duplicate entries in `required` array of JSON schema helpers + +## v0.15.0 (2022-12-08) + +### ✨ New + +- [#1188](https://github.com/meltano/sdk/issues/1188) Support boolean `additional_properties` in JSON schema helper objects +- [#1237](https://github.com/meltano/sdk/issues/1237) Catch and retry `ConnectionResetError` exceptions in HTTP taps +- [#1087](https://github.com/meltano/sdk/issues/1087) S3 batch storage -- _**Thanks @jamielxcarter!**_ +- [#1197](https://github.com/meltano/sdk/issues/1197) Support `patternProperties` in JSON schema helpers +- [#1157](https://github.com/meltano/sdk/issues/1157) Built-in handling of `default-target-schema` for SQL Targets -- _**Thanks @BuzzCutNorman!**_ + +### 🐛 Fixes + +- [#1238](https://github.com/meltano/sdk/issues/1238) Ensure metric tags coming from stream context can be JSON-serialized +- [#1233](https://github.com/meltano/sdk/issues/1233) Add level and logger name to default log format +- [#1219](https://github.com/meltano/sdk/issues/1219) Schema passthrough for whitelisted fields +- [#1174](https://github.com/meltano/sdk/issues/1174) Do not emit log message if no record properties were ignored +- [#1192](https://github.com/meltano/sdk/issues/1192) Change max record age for emitting state messages to 5 instead of 30 mins -- _**Thanks @spacecowboy!**_ + +### ⚡ Performance Improvements + +- [#1196](https://github.com/meltano/sdk/issues/1196) Improve performance of record message serialization -- _**Thanks @Jack-Burnett!**_ + +### 📚 Documentation Improvements + +- [#1243](https://github.com/meltano/sdk/issues/1243) Document inherited `PluginBase` attributes and methods +- [#1209](https://github.com/meltano/sdk/issues/1209) Fix argument descriptions for `OAuthAuthenticator` + +## v0.14.0 (2022-11-16) + +### ✨ New + +- [#1175](https://github.com/meltano/sdk/issues/1175) Add `datetime` functions to simpleeval env in stream maps -- _**Thanks @qbatten!**_ + +### 🐛 Fixes + +- [#1182](https://github.com/meltano/sdk/issues/1182) Update `SQLConnector` import for SQL target cookiecutter -- _**Thanks @radbrt!**_ +- [#1168](https://github.com/meltano/sdk/issues/1168) `SQLConnector.table_exists()` to use separate `table_name` and `schema_name` instead of fully qualified name -- _**Thanks @BuzzCutNorman!**_ +- [#1164](https://github.com/meltano/sdk/issues/1164) Write a valid final state message at the end of each stream sync -- _**Thanks @laurentS!**_ + +### ⚙️ Under the Hood + +- [#1114](https://github.com/meltano/sdk/issues/1114) Make DDL overridable for column `ADD`, `ALTER`, and `RENAME` operations + +## v0.13.1 (2022-11-08) + +### 🐛 Fixes + +- [#1126](https://github.com/meltano/sdk/issues/1126) Resolve failure in `_increment_stream_state()` for cases when `replication_method` is `LOG_BASED` +- [#1111](https://github.com/meltano/sdk/issues/1111) Push `_MAX_RECORDS_LIMIT` down into SQL + +### ⚙️ Under the Hood + +- [#1091](https://github.com/meltano/sdk/issues/1091) Move SQLConnector into a separate module, for use by both SQLStream and SQLSink + +### 📚 Documentation Improvements + +- [#1133](https://github.com/meltano/sdk/issues/1133) Fix duplicate `Known Limitations` header +- [#1118](https://github.com/meltano/sdk/issues/1118) Document `BATCH` limitations + +## v0.13.0 (2022-10-24) + +### ✨ New + +- [#1098](https://github.com/meltano/sdk/issues/1098) Add JSON Schema `Property` helpers for `allowed_values` (`enum`) and `examples` +- [#1096](https://github.com/meltano/sdk/issues/1096) Add secrets support for tap and target config, via `Property(..., secret=True)` +- [#1039](https://github.com/meltano/sdk/issues/1039) Support conforming singer property names to target identifier constraints in SQL sinks + +### 🐛 Fixes + +- [#1093](https://github.com/meltano/sdk/issues/1093) Add environment support to the cookie cutter for `meltano.yml` +- [#1036](https://github.com/meltano/sdk/issues/1036) Create schema and table on `add_sink` + +## v0.12.0 (2022-10-17) + +### ✨ New + +- [#1032](https://github.com/meltano/sdk/issues/1032) Support stream property selection push-down in SQL streams +- [#978](https://github.com/meltano/sdk/issues/978) Allow configuring a dedicated metrics logger + +### 🐛 Fixes + +- [#1043](https://github.com/meltano/sdk/issues/1043) Batch storage `split_url` to work with Windows paths -- _**Thanks @BuzzCutNorman!**_ +- [#826](https://github.com/meltano/sdk/issues/826) Remove Poetry version pin for GitHub Actions -- _**Thanks @visch!**_ +- [#1001](https://github.com/meltano/sdk/issues/1001) Use column name in `allow_column_alter` error message + +### 📚 Documentation Improvements + +- [#1060](https://github.com/meltano/sdk/issues/1060) Add explanation and recommendations for context usage +- [#1074](https://github.com/meltano/sdk/issues/1074) Document an example implementation and usage of `BaseHATEOASPaginator` +- [#1020](https://github.com/meltano/sdk/issues/1020) Fixed typo in `docs/stream_maps.md` -- _**Thanks @spacecowboy!**_ +- [#1006](https://github.com/meltano/sdk/issues/1006) Add links to Meltano install/tut + +## v0.11.1 (2022-09-27) + +### 🐛 Fixes + +- [#999](https://github.com/meltano/sdk/issues/999) Absolute file paths created by taps running in BATCH mode can't be processed by the Sink + +### 📚 Documentation Improvements + +- Change `targetUrl` of semantic PR title check to point to SDK docs + +## v0.11.0 (2022-09-23) + +### ✨ New + +- [#968](https://github.com/meltano/sdk/issues/968) Added cookiecutter support and docs for VSCode debugging +- [#904](https://github.com/meltano/sdk/issues/904) Add support for new `BATCH` message type in taps and targets + +### 🐛 Fixes + +- [#972](https://github.com/meltano/sdk/issues/972) Resolve issue where TypeError is thrown by SQLConnector cookiecutter implementation due to super() references + +### 📚 Documentation Improvements + +- [#988](https://github.com/meltano/sdk/issues/988) Add pipe before SDK logo in header +- [#970](https://github.com/meltano/sdk/issues/970) Move cookiecutter TODOs into markdown comments + +## v0.10.0 (2022-09-12) + +### ✨ New + +- [#829](https://github.com/meltano/sdk/issues/829) Add checks for primary keys, replication keys and state partitioning keys to standard tap tests -- _**Thanks @laurentS!**_ +- [#732](https://github.com/meltano/sdk/issues/732) Implement reference paginators. + +### 🐛 Fixes + +- [#898](https://github.com/meltano/sdk/issues/898) Fix SQL type merging for pre-existing target tables -- _**Thanks @BuzzCutNorman!**_ +- [#856](https://github.com/meltano/sdk/issues/856) Fix typo RecordsWitoutSchemaException -> RecordsWithoutSchemaException. + +### ⚙️ Under the Hood + +- Use `__future__.annotations` on `singer_sdk.helpers._singer` + +### 📚 Documentation Improvements + +- [#950](https://github.com/meltano/sdk/issues/950) Document missing initializers for authentication and pagination helpers. +- [#947](https://github.com/meltano/sdk/issues/947) Remove stale autodoc page for RecordsWitoutSchemaException. +- [#942](https://github.com/meltano/sdk/issues/942) Add docs preview links to PR description. + +## v0.9.0 (2022-08-24) + +### ✨ New + +- [#842](https://github.com/meltano/sdk/issues/842) Allow authenticating more generic requests +- [#919](https://github.com/meltano/sdk/issues/919) add `ConnectionError` to list of backoff exceptions for auto-retry + +### 🐛 Fixes + +- [#917](https://github.com/meltano/sdk/issues/917) Allow Singer schemas to include the `required` and `enum` fields +- [#759](https://github.com/meltano/sdk/issues/759) Use recent start_date as starting_replication_value + +### ⚙️ Under the Hood + +- [#908](https://github.com/meltano/sdk/issues/908) Allow overriding the bulk insert statement in `SQLSink` + +### 📚 Documentation Improvements + +- [#914](https://github.com/meltano/sdk/issues/914) Bump Pygments and update dbt example +- [#900](https://github.com/meltano/sdk/issues/900) Generate documentation for constructor parameters ## v0.8.0 (2022-08-05) @@ -18,7 +463,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - [#869](https://github.com/meltano/sdk/issues/869) Cleanup whitespace in backoff code samples - ## v0.7.0 (2022-07-21) ### ✨ New @@ -43,6 +487,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fix missing typing-extensions for Python<3.10 (#776) ## 0.6.0 - (2022-06-30) + --- ### New @@ -63,8 +508,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Reduces number of log lines printed when unmapped properties are ignored from the source API ([!300](https://gitlab.com/meltano/sdk/-/merge_requests/300)) - Thanks, _**[Eric Boucher](https://gitlab.com/ericboucher)**_! - Tap and Target SDK: Remove trailing parenthesis from logged version ([#766](https://github.com/meltano/sdk/issues/766), [#767](https://github.com/meltano/sdk/pull/767)). - ## 0.5.0 - (2022-05-19) + --- ### New @@ -74,9 +519,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changes -- Target SDK: Use generic SQLALchemy markers for parameterized queries in SQL targets ([#376](https://gitlab.com/meltano/sdk/-/issues/376), [!287](https://gitlab.com/meltano/sdk/-/merge_requests/287)) - _Thanks, **[Thomas Briggs](https://gitlab.com/tbriggs2)**_! +- Target SDK: Use generic SQLALchemy markers for parameterized queries in SQL targets ([#376](https://gitlab.com/meltano/sdk/-/issues/376), [!287](https://gitlab.com/meltano/sdk/-/merge_requests/287)) - _Thanks, **[Thomas Briggs](https://gitlab.com/tbriggs2)**_! -- Target SDK: Explicitly specify column names when inserting rows in SQL targets ([#385](https://gitlab.com/meltano/sdk/-/issues/385), [!294](https://gitlab.com/meltano/sdk/-/merge_requests/294)) - _Thanks, **[Thomas Briggs](https://gitlab.com/tbriggs2)**_! +- Target SDK: Explicitly specify column names when inserting rows in SQL targets ([#385](https://gitlab.com/meltano/sdk/-/issues/385), [!294](https://gitlab.com/meltano/sdk/-/merge_requests/294)) - _Thanks, **[Thomas Briggs](https://gitlab.com/tbriggs2)**_! ### Fixes @@ -84,8 +529,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap and Target SDK: Do not fail `--about` option if tap or target is not configured ([#379](https://gitlab.com/meltano/sdk/-/issues/379), [!291](https://gitlab.com/meltano/sdk/-/merge_requests/291)). - ## 0.4.9 - (2022-05-12) + --- ### New @@ -101,16 +546,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap and Target SDK: Fixed a bug where setting stream map property to `null` did not remove it from SCHEMA message ([#370](https://gitlab.com/meltano/sdk/-/issues/370), [!286](https://gitlab.com/meltano/sdk/-/merge_requests/286)) - _Thanks, **[Ryan Whitten](https://gitlab.com/rwhitten577)**_! - Tap and Target SDK: Fixed a bug where flattening resulted in an invalid SCHEMA message ([!286](https://gitlab.com/meltano/sdk/-/merge_requests/286)) - _Thanks, **[Ryan Whitten](https://gitlab.com/rwhitten577)**_! - ## 0.4.8 - (2022-05-05) + --- ### Fixes - Target SDK: Use `maxLength` in SQL targets for string fields if the schema provides it ([#371](https://gitlab.com/meltano/sdk/-/issues/371), [!284](https://gitlab.com/meltano/sdk/-/merge_requests/284)) - _Thanks, **[Thomas Briggs](https://gitlab.com/tbriggs2)**_! - ## 0.4.7 - (2022-04-28) + --- ### Fixes @@ -120,8 +565,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Fixed a bug where a parent stream emitted schema messages when it's not selected, but at least one of its child streams is ([#366](https://gitlab.com/meltano/sdk/-/issues/366), [!280](https://gitlab.com/meltano/sdk/-/merge_requests/280)) - Tap SDK: Bump `pyjwt` dependency to `~=2.3` ([!281](https://gitlab.com/meltano/sdk/-/merge_requests/281)) - _Thanks, **[Eric Boucher](https://gitlab.com/ericboucher)**_! - ## 0.4.6 - (2022-04-21) + --- ### Fixes @@ -129,8 +574,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Raise more descriptive exceptions when wrapped JSON typing classes needs to be instantiated ([#55](https://gitlab.com/meltano/sdk/-/issues/55), [#360](https://gitlab.com/meltano/sdk/-/issues/360), [!270](https://gitlab.com/meltano/sdk/-/merge_requests/270)). - Support JSONPath extensions in `records_jsonpath` and `next_page_token_jsonpath` ([#361](https://gitlab.com/meltano/sdk/-/issues/361), [!271](https://gitlab.com/meltano/sdk/-/merge_requests/271)). - ## 0.4.5 - (2022-04-08) + --- ### Fixes @@ -140,8 +585,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Resolve issue where "falsey" defaults like '0', '', and 'False' would not be properly applied to tap settings config. ([#357](https://gitlab.com/meltano/sdk/-/issues/357), [!265](https://gitlab.com/meltano/sdk/-/merge_requests/265)) - Return to stable `poetry-core` version in cookiecutter templates ([#338](https://gitlab.com/meltano/sdk/-/issues/338), [!260](https://gitlab.com/meltano/sdk/-/merge_requests/260)) - ## 0.4.4 - (2022-03-03) + --- ### New @@ -149,6 +594,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Define all built-in JSON Schema string formats as separate types ([#336](https://gitlab.com/meltano/sdk/-/issues/336), [!250](https://gitlab.com/meltano/sdk/-/merge_requests/250)) - _Thanks, **[Reuben Frankel](https://gitlab.com/ReubenFrankel)**_! ## 0.4.3 - (2022-02-18) + --- ### New @@ -159,21 +605,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Deprecate Python 3.6 ([#316](https://gitlab.com/meltano/sdk/-/issues/316), [!246](https://gitlab.com/meltano/sdk/-/merge_requests/246)) - ## 0.4.2 - (2022-02-04) + --- ### New - Add record and schema flattening in Stream Maps ([!236](https://gitlab.com/meltano/sdk/-/merge_requests/236)), - ### Fixes - Resolve issues when aliasing stream maps using the keywords `__alias__`, `__source__`, or `__else__` ([#301](https://gitlab.com/meltano/sdk/-/issues/301), [#302](https://gitlab.com/meltano/sdk/-/issues/302), [!243](https://gitlab.com/meltano/sdk/-/merge_requests/243)) - ## 0.4.1 - (2022-01-27) + --- ### Changes @@ -182,6 +627,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add `--test=schema` option to emit tap SCHEMA messages only ([!218](https://gitlab.com/meltano/sdk/-/merge_requests/218)) - _Thanks, **[Laurent Savaëte](https://gitlab.com/LaurentS)**_! ## 0.4.0 - (2022-01-21) + --- ### New @@ -191,6 +637,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added Licence tracking to SDK GitLab Project ([#166](https://gitlab.com/meltano/sdk/-/issues/166), [!237](https://gitlab.com/meltano/sdk/-/merge_requests/237)) ## 0.3.18 - (2022-01-13) + --- ### New @@ -204,6 +651,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Make the `expires_in` property optional in OAuth response ([#297](https://gitlab.com/meltano/sdk/-/issues/297), [!232](https://gitlab.com/meltano/sdk/-/merge_requests/232)) -- _Thanks, **[Daniel Ferguson](https://gitlab.com/daniel-ferguson)**!_ ## 0.3.17 - (2021-12-16) + --- ### New @@ -211,13 +659,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Add configurable timeout for HTTP requests ([#287](https://gitlab.com/meltano/sdk/-/issues/287), [!217](https://gitlab.com/meltano/sdk/-/merge_requests/217), [!225](https://gitlab.com/meltano/sdk/-/merge_requests/225)) -- _Thanks, **[Josh Lloyd](https://gitlab.com/jlloyd3)**!_ - Tap and Target SDK: Adds support for Python 3.10 ([#293](https://gitlab.com/meltano/sdk/-/issues/293), [!224](https://gitlab.com/meltano/sdk/-/merge_requests/224)) - ### Fixes - Resolve lint errors when ArrayType is used to wrap other types ([!223](https://gitlab.com/meltano/sdk/-/merge_requests/223)) -- _Thanks, **[David Wallace](https://gitlab.com/dwallace0723)**!_ - ## 0.3.16 - (2021-12-09) + --- ### Fixes @@ -225,17 +672,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Fix datelike type parsing bug with nested schemas ([#283](https://gitlab.com/meltano/sdk/-/issues/283), [!219](https://gitlab.com/meltano/sdk/-/merge_requests/219)) - Tap SDK: Resolved bug in `--test` which caused child streams to not use record limiting ([#268](https://gitlab.com/meltano/sdk/-/issues/268), [!204](https://gitlab.com/meltano/sdk/-/merge_requests/204), [!220](https://gitlab.com/meltano/sdk/-/merge_requests/220)) -- _Thanks, **[Derek Visch](https://gitlab.com/vischous)**!_ - ## 0.3.15 - (2021-12-03) + --- ### Fixes -- Tap SDK: Fixed mapped __key_properties__ not being passed to the emitted schema message ([#281](https://gitlab.com/meltano/sdk/-/issues/281), [!209](https://gitlab.com/meltano/sdk/-/merge_requests/209)) +- Tap SDK: Fixed mapped `__key_properties__` not being passed to the emitted schema message ([#281](https://gitlab.com/meltano/sdk/-/issues/281), [!209](https://gitlab.com/meltano/sdk/-/merge_requests/209)) - Tap SDK: Fixed missing schema during development causing sync to fail [#284](https://gitlab.com/meltano/sdk/-/issues/284), [!212](https://gitlab.com/meltano/sdk/-/merge_requests/212) -- _Thanks, **[Fred Reimer](https://gitlab.com/freimer)**!_ - ## 0.3.14 - (2021-11-18) + --- ### New @@ -252,6 +699,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Target SDK: Document options for the target CLI and accept multiple config files as input ([!183](https://gitlab.com/meltano/sdk/-/merge_requests/183)) ## 0.3.13 - (2021-10-28) + --- ### New @@ -260,8 +708,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Target SDK: Add target mock classes and tap-to-target scenario tests ([#198](https://gitlab.com/meltano/sdk/-/issues/198), [!138](https://gitlab.com/meltano/sdk/-/merge_requests/138)) - Tap and Target SDK: Create expanded list of capabilities ([#186](https://gitlab.com/meltano/sdk/-/issues/186), [!141](https://gitlab.com/meltano/sdk/-/merge_requests/141)) - ## 0.3.12 - (2021-10-21) + --- ### Fixes @@ -271,8 +719,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap and Target SDK: Add `dataclasses` as an explicit third-party dependency for Python 3.6 ([#245](https://gitlab.com/meltano/sdk/-/issues/245), [!189](https://gitlab.com/meltano/sdk/-/merge_requests/189)) - Tap and Target SDK: Allows `--discover` and `--about` execution without requiring settings validation ([#235](https://gitlab.com/meltano/sdk/-/issues/235), [!188](https://gitlab.com/meltano/sdk/-/merge_requests/188)) - ## 0.3.11 - (2021-10-07) + --- ### New @@ -281,7 +729,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changes - - Tap and Target SDK: Autogenerated docstrings for arguments, return types, and exceptions raised ([!166](https://gitlab.com/meltano/sdk/-/merge_requests/166)). - Tap and Target SDK: Support Black by default by bumping min Python version to 3.6.2. (#224, !169) @@ -290,21 +737,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixes a bug where tox invocations after initial setup failed ([!179](https://gitlab.com/meltano/sdk/-/merge_requests/179)) -- _Thanks, **[Jon Watson](https://gitlab.com/jawats)**!_. - Tap SDK: Fixes a bug in `Stream.get_starting_timestamp()` and `Stream.get_starting_replication_key_value()` calls where results where not cached breaking stream sorting ([!157](https://gitlab.com/meltano/sdk/-/merge_requests/157)) - ## 0.3.10 - (2021-09-30) + --- ### Changes - Tap and Target SDK: Prevents the leaking of sensitive configuration values when JSON schema validation fails ([!173](https://gitlab.com/meltano/sdk/-/merge_requests/173)) -- _Thanks, **[Kevin Mullins](https://gitlab.com/zyzil)**!_. - ## 0.3.9 - (2021-09-23) + --- ### New -- Add description attribute to `Property` class for JSON schemas ([#159](https://gitlab.com/meltano/sdk/-/issues/159), [!164](https://gitlab.com/meltano/sdk/-/merge_requests/164)) -- _Thanks, **[Stephen Bailey](https://gitlab.com/stkbailey)**!_ +- Add description attribute to `Property` class for JSON schemas ([#159](https://gitlab.com/meltano/sdk/-/issues/159), [!164](https://gitlab.com/meltano/sdk/-/merge_requests/164)) -- _Thanks, **[Stephen Bailey](https://gitlab.com/stkbailey)**!_ ### Changes @@ -315,6 +762,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Fixes issue where stream map schema generation fails when overriding the value of an existing property. ([#196](https://gitlab.com/meltano/sdk/-/issues/196), [!165](https://gitlab.com/meltano/sdk/-/merge_requests/165)) ## 0.3.8 - (2021-09-16) + --- ### Fixes @@ -322,8 +770,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap and Target SDK: Resolves `2to3` compatibility issues when installed with `setuptools>=58.0`. - Resolve issue preventing repo from being cloned on Windows. - ## 0.3.7 - (2021-09-09) + --- ### New @@ -335,8 +783,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Improved record parsing and validation performance, especially with large record objects ([#161](https://gitlab.com/meltano/sdk/-/issues/161), [!146](https://gitlab.com/meltano/sdk/-/merge_requests/146)) - Tap SDK: Changed the signature of `Stream.apply_catalog` to reflect new catalog parsing flow ([#161](https://gitlab.com/meltano/sdk/-/issues/161), [!146](https://gitlab.com/meltano/sdk/-/merge_requests/146)) - ## 0.3.6 - (2021-08-26) + --- ### New @@ -355,16 +803,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Fixes a bug in state tracking where timezone-aware timestamps are appended again with `+0:00` (#176, !142) -- _Thanks, **[Joshua Adeyemi](https://gitlab.com/joshua.a.adeyemi)**!_ - Tap SDK: Improve performance by reusing a single authenticator instance (#168, #173, !136) - ## 0.3.5 - (2021-08-17) + --- ### Fixes - Tap SDK: Fixed a bug where not using a catalog file resulted in all streams being selected but all properties being removed from the schema and records (#190, !132) - ## v0.3.4 + --- ### New @@ -381,6 +829,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Tap SDK: Fixed a bug where replication key signposts were not correctly applied for streams which defined them (#180, !129) ## v0.3.3 + --- ### New diff --git a/README.md b/README.md index 9ddb089e4..10780508c 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,44 @@ -# Meltano SDK for Taps and Targets - -[![Python Versions](https://img.shields.io/pypi/pyversions/singer-sdk)](https://pypi.org/project/singer-sdk) -[![Downloads](https://img.shields.io/pypi/dw/singer-sdk?color=blue)](https://pypi.org/project/singer-sdk) -[![PyPI Version](https://img.shields.io/pypi/v/singer-sdk?color=blue)](https://pypi.org/project/singer-sdk) -[![Documentation Status](https://readthedocs.org/projects/meltano-sdk/badge/?version=latest)](https://sdk.meltano.com/en/latest/?badge=latest) -[![codecov](https://codecov.io/gh/meltano/sdk/branch/main/graph/badge.svg?token=kS1zkemAgo)](https://codecov.io/gh/meltano/sdk) -[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/meltano/sdk/main.svg)](https://results.pre-commit.ci/latest/github/meltano/sdk/main) +<h1 align="center">Meltano Singer SDK</h1> +<h3 align="center"> The Tap and Target SDKs are the fastest way to build custom data extractors and loaders! Taps and targets built on the SDK are automatically compliant with the -[Singer Spec](https://hub.meltano.com/singer/spec), the +<a href="https://hub.meltano.com/singer/spec">Singer Spec</a>, the de-facto open source standard for extract and load pipelines. +</h3> + +--- + +</br> + +<div align="center"> + <img alt="Meltano Singer SDK Logo" src="https://user-images.githubusercontent.com/11428666/231584532-ffa694e6-60f9-4fd6-b2ee-5ff3e39d3ad6.svg" width="600"/> +</div> + +</br> + +<div align="center"> + <a href="https://pypi.org/project/singer-sdk"> + <img alt="Python Versions" src="https://img.shields.io/pypi/pyversions/singer-sdk"/> + </a> + <a href="https://pypi.org/project/singer-sdk"> + <img alt="Downloads" src="https://img.shields.io/pypi/dw/singer-sdk?color=blue"/> + </a> + <a href="https://pypi.org/project/singer-sdk"> + <img alt="PyPI Version" src="https://img.shields.io/pypi/v/singer-sdk?color=blue"/> + </a> + <a href="https://sdk.meltano.com/en/latest/?badge=latest"> + <img alt="Documentation Status" src="https://readthedocs.org/projects/meltano-sdk/badge/?version=latest"/> + </a> + <a href="https://codecov.io/gh/meltano/sdk"> + <img alt="codecov" src="https://codecov.io/gh/meltano/sdk/branch/main/graph/badge.svg?token=kS1zkemAgo"/> + </a> + <a href="https://results.pre-commit.ci/latest/github/meltano/sdk/main"> + <img alt="pre-commit.ci status" src="https://results.pre-commit.ci/badge/github/meltano/sdk/main.svg"/> + </a> +</div> + +--- ## Future-proof extractors and loaders, with less code @@ -19,6 +47,21 @@ makes learning the SDK a great investment. Furthermore, as new features and capa are added to the SDK, your taps and targets can always take advantage of the latest capabilities and bug fixes, simply by updating your SDK dependency to the latest version. +## Meltano + +*Not familiar with Meltano?* [Meltano](https://docs.meltano.com/getting-started/meltano-at-a-glance) is your CLI for ELT+ that: + +- **Starts simple**: Meltano is pip-installable and comes in a prepackaged docker container, you can have your first ELT pipeline running within minutes. +- **Has DataOps out-of-the-box**: Meltano provides tools that make DataOps best practices easy to use in every project. +- **Integrates with everything**: 300+ natively supported data sources & targets, as well as additional plugins like great expectations or dbt are natively available. +- **Is easily customizable**: Meltano isn't just extensible, it's built to be extended! The Singer SDK (for Connectors) & EDK (for Meltano Components) are easy to use. Meltano Hub helps you find all of the connectors and components created across the data community. +- **Is a mature system**: Developed since 2018, runs in production at large companies like GitLab, and currently powers over a million pipeline runs monthly. +- **Has first class ELT tooling built-in**: Extract data from any data source, load into any target, use inline maps to transform on data on the fly, and test the incoming data, all in one package. + +If you want to get started with Meltano, we suggest you: +- head over to the [Installation](https://docs.meltano.com/getting-started/installation) +- or if you have it installed, go through the [Meltano Tutorial](https://docs.meltano.com/getting-started/part1). + ## Documentation - See our [online documentation](https://sdk.meltano.com) for instructions on how diff --git a/cookiecutter/mapper-template/README.md b/cookiecutter/mapper-template/README.md new file mode 100644 index 000000000..70e2e47e8 --- /dev/null +++ b/cookiecutter/mapper-template/README.md @@ -0,0 +1,24 @@ +# Singer Mapper Template + +To use this cookie cutter template: + +```bash +pip3 install pipx +pipx ensurepath +# You may need to reopen your shell at this point +pipx install cookiecutter +``` + +Initialize Cookiecutter template directly from Git: + +```bash +cookiecutter https://github.com/meltano/sdk --directory="cookiecutter/mapper-template" +``` + +Or locally from an already-cloned `sdk` repo: + +```bash +cookiecutter ./sdk/cookiecutter/mapper-template +``` + +See the [dev guide](https://sdk.meltano.com/en/latest/dev_guide.html). diff --git a/cookiecutter/mapper-template/cookiecutter.json b/cookiecutter/mapper-template/cookiecutter.json new file mode 100644 index 000000000..c42b1cf06 --- /dev/null +++ b/cookiecutter/mapper-template/cookiecutter.json @@ -0,0 +1,19 @@ +{ + "name": "MyMapperName", + "admin_name": "FirstName LastName", + "admin_email": "firstname.lastname@example.com", + "mapper_id": "mapper-{{ cookiecutter.name.lower() }}", + "library_name": "{{ cookiecutter.mapper_id.replace('-', '_') }}", + "variant": "None (Skip)", + "include_ci_files": ["GitHub", "None (Skip)"], + "license": ["Apache-2.0"], + "__prompts__": { + "name": "The name of the mapper, in CamelCase", + "admin_name": "Provide your [bold yellow]full name[/]", + "admin_email": "Provide your [bold yellow]email[/]", + "mapper_id": "The ID of the tap, in kebab-case", + "library_name": "The name of the library, in snake_case. This is how the library will be imported in Python.", + "include_ci_files": "Whether to include CI files for a common CI services", + "license": "The license for the project" + } +} diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} new file mode 100644 index 000000000..0cfc81005 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} @@ -0,0 +1,30 @@ +### A CI workflow template that runs linting and python testing +### TODO: Modify as needed or as desired. + +name: Test {{cookiecutter.mapper_id}} + +on: [push] + +jobs: + pytest: + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: {{ '${{secrets.GITHUB_TOKEN}}' }} + strategy: + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python {{ '${{ matrix.python-version }}' }} + uses: actions/setup-python@v4 + with: + python-version: {{ '${{ matrix.python-version }}' }} + - name: Install Poetry + run: | + pip install poetry + - name: Install dependencies + run: | + poetry install + - name: Test with pytest + run: | + poetry run pytest diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} new file mode 100644 index 000000000..933e6b1c2 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} @@ -0,0 +1,26 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: pip + directory: "/" + schedule: + interval: "daily" + commit-message: + prefix: "chore(deps): " + prefix-development: "chore(deps-dev): " + - package-ecosystem: pip + directory: "/.github/workflows" + schedule: + interval: daily + commit-message: + prefix: "ci: " + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "ci: " diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.gitignore b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.gitignore new file mode 100644 index 000000000..475019c31 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.gitignore @@ -0,0 +1,136 @@ +# Secrets and internal config files +**/.secrets/* + +# Ignore meltano internal cache and sqlite systemdb + +.meltano/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.pre-commit-config.yaml b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.pre-commit-config.yaml new file mode 100644 index 000000000..6d9bbbfd5 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.pre-commit-config.yaml @@ -0,0 +1,36 @@ +ci: + autofix_prs: true + autoupdate_schedule: weekly + autoupdate_commit_msg: 'chore: pre-commit autoupdate' + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-json + - id: check-toml + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + +- repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.23.3 + hooks: + - id: check-dependabot + - id: check-github-workflows + +- repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.0.282 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + +- repo: https://github.com/psf/black + rev: 23.7.0 + hooks: + - id: black + +- repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.4.1 + hooks: + - id: mypy diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.secrets/.gitignore b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.secrets/.gitignore new file mode 100644 index 000000000..33c6acd03 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/.secrets/.gitignore @@ -0,0 +1,10 @@ +# IMPORTANT! This folder is hidden from git - if you need to store config files or other secrets, +# make sure those are never staged for commit into your git repo. You can store them here or another +# secure location. +# +# Note: This may be redundant with the global .gitignore for, and is provided +# for redundancy. If the `.secrets` folder is not needed, you may delete it +# from the project. + +* +!.gitignore diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/README.md b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/README.md new file mode 100644 index 000000000..ded365fb2 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/README.md @@ -0,0 +1,128 @@ +# {{ cookiecutter.mapper_id }} + +`{{ cookiecutter.mapper_id }}` is a Singer mapper for {{ cookiecutter.name }}. + +Built with the [Meltano Mapper SDK](https://sdk.meltano.com) for Singer Mappers. + +<!-- + +Developer TODO: Update the below as needed to correctly describe the install procedure. For instance, if you do not have a PyPi repo, or if you want users to directly install from your git repo, you can modify this step as appropriate. + +## Installation + +Install from PyPi: + +```bash +pipx install {{ cookiecutter.mapper_id }} +``` + +Install from GitHub: + +```bash +pipx install git+https://github.com/ORG_NAME/{{ cookiecutter.mapper_id }}.git@main +``` + +--> + +## Configuration + +### Accepted Config Options + +<!-- +Developer TODO: Provide a list of config options accepted by the mapper. + +This section can be created by copy-pasting the CLI output from: + +``` +{{ cookiecutter.mapper_id }} --about --format=markdown +``` +--> + +A full list of supported settings and capabilities for this +mapper is available by running: + +```bash +{{ cookiecutter.mapper_id }} --about +``` + +### Configure using environment variables + +This Singer mapper will automatically import any environment variables within the working directory's +`.env` if the `--config=ENV` is provided, such that config values will be considered if a matching +environment variable is set either in the terminal context or in the `.env` file. + +### Source Authentication and Authorization + +<!-- +Developer TODO: If your mapper requires special access on the source system, or any special authentication requirements, provide those here. +--> + +## Usage + +You can easily run `{{ cookiecutter.mapper_id }}` by itself or in a pipeline using [Meltano](https://meltano.com/). + +### Executing the Mapper Directly + +```bash +{{ cookiecutter.mapper_id }} --version +{{ cookiecutter.mapper_id }} --help +``` + +## Developer Resources + +Follow these instructions to contribute to this project. + +### Initialize your Development Environment + +```bash +pipx install poetry +poetry install +``` + +### Create and Run Tests + +Create tests within the `tests` subfolder and + then run: + +```bash +poetry run pytest +``` + +You can also test the `{{cookiecutter.mapper_id}}` CLI interface directly using `poetry run`: + +```bash +poetry run {{cookiecutter.mapper_id}} --help +``` + +### Testing with [Meltano](https://www.meltano.com) + +_**Note:** This mapper will work in any Singer environment and does not require Meltano. +Examples here are for convenience and to streamline end-to-end orchestration scenarios._ + +<!-- +Developer TODO: +Your project comes with a custom `meltano.yml` project file already created. Open the `meltano.yml` and follow any "TODO" items listed in +the file. +--> + +Next, install Meltano (if you haven't already) and any needed plugins: + +```bash +# Install meltano +pipx install meltano +# Initialize meltano within this directory +cd {{ cookiecutter.mapper_id }} +meltano install +``` + +Now you can test and orchestrate using Meltano: + +```bash +# Run a test `run` pipeline: +meltano run tap-smoke-test {{ cookiecutter.mapper_id }} target-jsonl +``` + +### SDK Dev Guide + +See the [dev guide](https://sdk.meltano.com/en/latest/dev_guide.html) for more instructions on how to use the SDK to +develop your own taps, targets, and mappers. diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/meltano.yml b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/meltano.yml new file mode 100644 index 000000000..019015d06 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/meltano.yml @@ -0,0 +1,31 @@ +version: 1 +send_anonymous_usage_stats: true +project_id: "{{cookiecutter.mapper_id}}" +default_environment: test +environments: +- name: test +plugins: + extractors: + - name: tap-smoke-test + variant: meltano + pip_url: git+https://github.com/meltano/tap-smoke-test.git + config: + streams: + - stream_name: animals + input_filename: https://raw.githubusercontent.com/meltano/tap-smoke-test/main/demo-data/animals-data.jsonl + loaders: + - name: target-jsonl + variant: andyh1203 + pip_url: target-jsonl + mappers: + - name: "{{cookiecutter.mapper_id}}" + pip_url: -e . + namespace: "{{cookiecutter.library_name}}" + # TODO: replace these with the actual settings + settings: + - name: example_config + kind: string + mappings: + - name: example + config: + example_config: foo diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/output/.gitignore b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/output/.gitignore new file mode 100644 index 000000000..80ff9d2a6 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/output/.gitignore @@ -0,0 +1,4 @@ +# This directory is used as a target by target-jsonl, so ignore all files + +* +!.gitignore diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/pyproject.toml b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/pyproject.toml new file mode 100644 index 000000000..f63f07b02 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/pyproject.toml @@ -0,0 +1,64 @@ +[tool.poetry] +{%- if cookiecutter.variant != "None (Skip)" %} +name = "{{cookiecutter.variant}}-{{cookiecutter.mapper_id}}" +{%- else %} +name = "{{cookiecutter.mapper_id}}" +{%- endif %} +version = "0.0.1" +description = "`{{cookiecutter.mapper_id}}` is a Singer mapper {{cookiecutter.name}}, built with the Meltano Singer SDK." +readme = "README.md" +authors = ["{{ cookiecutter.admin_name }} <{{ cookiecutter.admin_email }}>"] +keywords = [ + "ELT", + "Mapper", + "{{cookiecutter.name}}", +] +license = "Apache-2.0" +{%- if cookiecutter.variant != "None (Skip)" %} +packages = [ + { include = "{{cookiecutter.library_name}}" }, +] +{%- endif %} + +[tool.poetry.dependencies] +python = "<3.12,>=3.7.1" +singer-sdk = { version="^0.31.1" } +fs-s3fs = { version = "^1.1.1", optional = true } + +[tool.poetry.group.dev.dependencies] +pytest = "^7.4.0" +singer-sdk = { version="^0.30.0", extras = ["testing"] } + +[tool.poetry.extras] +s3 = ["fs-s3fs"] + +[tool.mypy] +python_version = "3.9" +warn_unused_configs = true + +[tool.ruff] +ignore = [ + "ANN101", # missing-type-self + "ANN102", # missing-type-cls +] +select = ["ALL"] +src = ["{{cookiecutter.library_name}}"] +target-version = "py37" + + +[tool.ruff.flake8-annotations] +allow-star-arg-any = true + +[tool.ruff.isort] +known-first-party = ["{{cookiecutter.library_name}}"] + +[tool.ruff.pydocstyle] +convention = "google" + +[build-system] +requires = ["poetry-core>=1.0.8"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.scripts] +# CLI declaration +{{cookiecutter.mapper_id}} = '{{cookiecutter.library_name}}.mapper:{{cookiecutter.name}}Mapper.cli' diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tests/__init__.py b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tests/__init__.py new file mode 100644 index 000000000..7caba56f7 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tests/__init__.py @@ -0,0 +1 @@ +"""Test suite for {{ cookiecutter.mapper_id }}.""" diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tests/conftest.py b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tests/conftest.py new file mode 100644 index 000000000..6bb3ec2d7 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tests/conftest.py @@ -0,0 +1,3 @@ +"""Test Configuration.""" + +pytest_plugins = ("singer_sdk.testing.pytest_plugin",) diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tox.ini b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tox.ini new file mode 100644 index 000000000..70b9e4ac7 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/tox.ini @@ -0,0 +1,19 @@ +# This file can be used to customize tox tests as well as other test frameworks like flake8 and mypy + +[tox] +envlist = py37, py38, py39, py310, py311 +isolated_build = true + +[testenv] +allowlist_externals = poetry +commands = + poetry install -v + poetry run pytest + +[testenv:pytest] +# Run the python tests. +# To execute, run `tox -e pytest` +envlist = py37, py38, py39, py310, py311 +commands = + poetry install -v + poetry run pytest diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} new file mode 100644 index 000000000..62913ff3a --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + + Copyright {% now 'utc', '%Y' %} {{ cookiecutter.admin_name }} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{{cookiecutter.library_name}}/__init__.py b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{{cookiecutter.library_name}}/__init__.py new file mode 100644 index 000000000..5781fbbc4 --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{{cookiecutter.library_name}}/__init__.py @@ -0,0 +1 @@ +"""{{ cookiecutter.name }} Mapper.""" diff --git a/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{{cookiecutter.library_name}}/mapper.py b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{{cookiecutter.library_name}}/mapper.py new file mode 100644 index 000000000..c8c3d23ec --- /dev/null +++ b/cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/{{cookiecutter.library_name}}/mapper.py @@ -0,0 +1,96 @@ +"""{{ cookiecutter.name }} mapper class.""" + +from __future__ import annotations + +import typing as t +from typing import TYPE_CHECKING + +import singer_sdk.typing as th +from singer_sdk import _singerlib as singer +from singer_sdk.mapper import PluginMapper +from singer_sdk.mapper_base import InlineMapper + +if TYPE_CHECKING: + from pathlib import PurePath + + +class {{ cookiecutter.name }}Mapper(InlineMapper): + """Sample mapper for {{ cookiecutter.name }}.""" + + name = "{{ cookiecutter.mapper_id }}" + + config_jsonschema = th.PropertiesList( + # TODO: Replace or remove this example config based on your needs + th.Property( + "example_config", + th.StringType, + description="An example config, replace or remove based on your needs.", + ), + ).to_dict() + + def __init__( + self, + *, + config: dict | PurePath | str | list[PurePath | str] | None = None, + parse_env_config: bool = False, + validate_config: bool = True, + ) -> None: + """Create a new inline mapper. + + Args: + config: Mapper configuration. Can be a dictionary, a single path to a + configuration file, or a list of paths to multiple configuration + files. + parse_env_config: Whether to look for configuration values in environment + variables. + validate_config: True to require validation of config settings. + """ + super().__init__( + config=config, + parse_env_config=parse_env_config, + validate_config=validate_config, + ) + + self.mapper = PluginMapper(plugin_config=dict(self.config), logger=self.logger) + + def map_schema_message(self, message_dict: dict) -> t.Iterable[singer.Message]: + """Map a schema message to zero or more new messages. + + Args: + message_dict: A SCHEMA message JSON dictionary. + """ + yield singer.SchemaMessage.from_dict(message_dict) + + def map_record_message( + self, + message_dict: dict, + ) -> t.Iterable[singer.RecordMessage]: + """Map a record message to zero or more new messages. + + Args: + message_dict: A RECORD message JSON dictionary. + """ + yield singer.RecordMessage.from_dict(message_dict) + + def map_state_message(self, message_dict: dict) -> t.Iterable[singer.Message]: + """Map a state message to zero or more new messages. + + Args: + message_dict: A STATE message JSON dictionary. + """ + yield singer.StateMessage.from_dict(message_dict) + + def map_activate_version_message( + self, + message_dict: dict, + ) -> t.Iterable[singer.Message]: + """Map a version message to zero or more new messages. + + Args: + message_dict: An ACTIVATE_VERSION message JSON dictionary. + """ + yield singer.ActivateVersionMessage.from_dict(message_dict) + + +if __name__ == "__main__": + {{ cookiecutter.name }}Mapper.cli() diff --git a/cookiecutter/tap-template/cookiecutter.json b/cookiecutter/tap-template/cookiecutter.json index 243a3d589..e297aae54 100644 --- a/cookiecutter/tap-template/cookiecutter.json +++ b/cookiecutter/tap-template/cookiecutter.json @@ -1,24 +1,30 @@ { - "source_name": "MySourceName", - "admin_name": "FirstName LastName", - "tap_id": "tap-{{ cookiecutter.source_name.lower() }}", - "library_name": "{{ cookiecutter.tap_id.replace('-', '_') }}", - "stream_type": [ - "REST", - "GraphQL", - "SQL", - "Other" - ], - "auth_method": [ - "API Key", - "Bearer Token", - "Basic Auth", - "OAuth2", - "JWT", - "Custom or N/A" - ], - "include_cicd_sample_template": [ - "GitHub", - "None (Skip)" - ] + "source_name": "MySourceName", + "admin_name": "FirstName LastName", + "admin_email": "firstname.lastname@example.com", + "tap_id": "tap-{{ cookiecutter.source_name.lower() }}", + "library_name": "{{ cookiecutter.tap_id.replace('-', '_') }}", + "variant": "None (Skip)", + "stream_type": ["REST", "GraphQL", "SQL", "Other"], + "auth_method": [ + "API Key", + "Bearer Token", + "Basic Auth", + "OAuth2", + "JWT", + "Custom or N/A" + ], + "include_ci_files": ["GitHub", "None (Skip)"], + "license": ["Apache-2.0"], + "__prompts__": { + "source_name": "The name of the source, in CamelCase", + "admin_name": "Provide your [bold yellow]full name[/]", + "admin_email": "Provide your [bold yellow]email[/]", + "tap_id": "The ID of the tap, in kebab-case", + "library_name": "The name of the library, in snake_case. This is how the library will be imported in Python.", + "stream_type": "The type of stream the source provides", + "auth_method": "The [bold red]authentication[/] method used by the source, for REST and GraphQL sources", + "include_ci_files": "Whether to include CI files for a common CI services", + "license": "The license for the project" + } } diff --git a/cookiecutter/tap-template/cookiecutter.tests.yml b/cookiecutter/tap-template/cookiecutter.tests.yml index 11e17f8ef..eb6a2d832 100644 --- a/cookiecutter/tap-template/cookiecutter.tests.yml +++ b/cookiecutter/tap-template/cookiecutter.tests.yml @@ -57,3 +57,10 @@ tests: stream_type: Other auth_method: Custom or N/A include_cicd_sample_template: GitHub + + - source_name: OtherCustomTemplateTest + tap_id: test-tap-other-custom-type + variant: meltanolabs + stream_type: Other + auth_method: Custom or N/A + include_cicd_sample_template: "None (Skip)" \ No newline at end of file diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} new file mode 100644 index 000000000..0ea2f9ae7 --- /dev/null +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} @@ -0,0 +1,30 @@ +### A CI workflow template that runs linting and python testing +### TODO: Modify as needed or as desired. + +name: Test {{cookiecutter.tap_id}} + +on: [push] + +jobs: + pytest: + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: {{ '${{secrets.GITHUB_TOKEN}}' }} + strategy: + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python {{ '${{ matrix.python-version }}' }} + uses: actions/setup-python@v4 + with: + python-version: {{ '${{ matrix.python-version }}' }} + - name: Install Poetry + run: | + pip install poetry + - name: Install dependencies + run: | + poetry install + - name: Test with pytest + run: | + poetry run pytest diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/workflows/{% if cookiecutter.include_cicd_sample_template == 'GitHub' %}ci_workflow.yml{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/workflows/{% if cookiecutter.include_cicd_sample_template == 'GitHub' %}ci_workflow.yml{%endif%} deleted file mode 100644 index 9cc605a5c..000000000 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/workflows/{% if cookiecutter.include_cicd_sample_template == 'GitHub' %}ci_workflow.yml{%endif%} +++ /dev/null @@ -1,58 +0,0 @@ -### A CI workflow template that runs linting and python testing -### TODO: Modify as needed or as desired. - -name: Test {{cookiecutter.tap_id}} - -on: [push] - -jobs: - linting: - - runs-on: ubuntu-latest - strategy: - matrix: - # Only lint using the primary version used for dev - python-version: [3.9] - - steps: - - uses: actions/checkout@v2 - - name: Set up Python {{ '${{ matrix.python-version }}' }} - uses: actions/setup-python@v2 - with: - python-version: {{ '${{ matrix.python-version }}' }} - - name: Install Poetry - uses: snok/install-poetry@v1 - with: - version: 1.1.8 - - name: Install dependencies - run: | - poetry install - - name: Run lint command from tox.ini - run: | - poetry run tox -e lint - - pytest: - - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: {{ '${{secrets.GITHUB_TOKEN}}' }} - strategy: - matrix: - python-version: [3.7, 3.8, 3.9] - - steps: - - uses: actions/checkout@v2 - - name: Set up Python {{ '${{ matrix.python-version }}' }} - uses: actions/setup-python@v2 - with: - python-version: {{ '${{ matrix.python-version }}' }} - - name: Install Poetry - uses: snok/install-poetry@v1 - with: - version: 1.1.11 - - name: Install dependencies - run: | - poetry install - - name: Test with pytest - run: | - poetry run pytest --capture=no diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} new file mode 100644 index 000000000..933e6b1c2 --- /dev/null +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} @@ -0,0 +1,26 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: pip + directory: "/" + schedule: + interval: "daily" + commit-message: + prefix: "chore(deps): " + prefix-development: "chore(deps-dev): " + - package-ecosystem: pip + directory: "/.github/workflows" + schedule: + interval: daily + commit-message: + prefix: "ci: " + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "ci: " diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.pre-commit-config.yaml b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.pre-commit-config.yaml new file mode 100644 index 000000000..12c29e27b --- /dev/null +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/.pre-commit-config.yaml @@ -0,0 +1,42 @@ +ci: + autofix_prs: true + autoupdate_schedule: weekly + autoupdate_commit_msg: 'chore: pre-commit autoupdate' + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-json + - id: check-toml + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + +- repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.23.3 + hooks: + - id: check-dependabot + - id: check-github-workflows + +- repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.0.282 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + +- repo: https://github.com/psf/black + rev: 23.7.0 + hooks: + - id: black + +- repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.4.1 + hooks: + - id: mypy + additional_dependencies: + {%- if cookiecutter.stream_type == "SQL" %} + - sqlalchemy-stubs + {%- else %} + - types-requests + {%- endif %} diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/README.md b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/README.md index f727a3de9..0454d048c 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/README.md +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/README.md @@ -4,19 +4,39 @@ Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps. +<!-- + +Developer TODO: Update the below as needed to correctly describe the install procedure. For instance, if you do not have a PyPi repo, or if you want users to directly install from your git repo, you can modify this step as appropriate. + ## Installation -- [ ] `Developer TODO:` Update the below as needed to correctly describe the install procedure. For instance, if you do not have a PyPi repo, or if you want users to directly install from your git repo, you can modify this step as appropriate. +Install from PyPi: ```bash pipx install {{ cookiecutter.tap_id }} ``` +Install from GitHub: + +```bash +pipx install git+https://github.com/ORG_NAME/{{ cookiecutter.tap_id }}.git@main +``` + +--> + ## Configuration ### Accepted Config Options -- [ ] `Developer TODO:` Provide a list of config options accepted by the tap. +<!-- +Developer TODO: Provide a list of config options accepted by the tap. + +This section can be created by copy-pasting the CLI output from: + +``` +{{ cookiecutter.tap_id }} --about --format=markdown +``` +--> A full list of supported settings and capabilities for this tap is available by running: @@ -33,7 +53,9 @@ environment variable is set either in the terminal context or in the `.env` file ### Source Authentication and Authorization -- [ ] `Developer TODO:` If your tap requires special access on the source system, or any special authentication requirements, provide those here. +<!-- +Developer TODO: If your tap requires special access on the source system, or any special authentication requirements, provide those here. +--> ## Usage @@ -49,7 +71,7 @@ You can easily run `{{ cookiecutter.tap_id }}` by itself or in a pipeline using ## Developer Resources -- [ ] `Developer TODO:` As a first step, scan the entire project for the text "`TODO:`" and complete any recommended steps, deleting the "TODO" references once completed. +Follow these instructions to contribute to this project. ### Initialize your Development Environment @@ -60,7 +82,7 @@ poetry install ### Create and Run Tests -Create tests within the `{{ cookiecutter.library_name }}/tests` subfolder and +Create tests within the `tests` subfolder and then run: ```bash @@ -78,8 +100,11 @@ poetry run {{cookiecutter.tap_id}} --help _**Note:** This tap will work in any Singer environment and does not require Meltano. Examples here are for convenience and to streamline end-to-end orchestration scenarios._ -Your project comes with a custom `meltano.yml` project file already created. Open the `meltano.yml` and follow any _"TODO"_ items listed in +<!-- +Developer TODO: +Your project comes with a custom `meltano.yml` project file already created. Open the `meltano.yml` and follow any "TODO" items listed in the file. +--> Next, install Meltano (if you haven't already) and any needed plugins: diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/meltano.yml b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/meltano.yml index 1ca496a86..e6a1cfba5 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/meltano.yml +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/meltano.yml @@ -1,6 +1,9 @@ version: 1 send_anonymous_usage_stats: true project_id: "{{cookiecutter.tap_id}}" +default_environment: test +environments: +- name: test plugins: extractors: - name: "{{cookiecutter.tap_id}}" @@ -10,6 +13,8 @@ plugins: - state - catalog - discover + - about + - stream-maps config: start_date: '2010-01-01T00:00:00Z' settings: diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/mypy.ini b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/mypy.ini deleted file mode 100644 index ba621de2b..000000000 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/mypy.ini +++ /dev/null @@ -1,6 +0,0 @@ -[mypy] -python_version = 3.9 -warn_unused_configs = True - -[mypy-backoff.*] -ignore_missing_imports = True diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/pyproject.toml b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/pyproject.toml index 640d5c2f1..c5381bdaf 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/pyproject.toml +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/pyproject.toml @@ -1,33 +1,67 @@ [tool.poetry] +{%- if cookiecutter.variant != "None (Skip)" %} +name = "{{cookiecutter.variant}}-{{cookiecutter.tap_id}}" +{%- else %} name = "{{cookiecutter.tap_id}}" +{%- endif %} version = "0.0.1" -description = "`{{cookiecutter.tap_id}}` is a Singer tap for {{cookiecutter.source_name}}, built with the Meltano SDK for Singer Taps." -authors = ["{{ cookiecutter.admin_name }}"] +description = "`{{cookiecutter.tap_id}}` is a Singer tap for {{cookiecutter.source_name}}, built with the Meltano Singer SDK." +readme = "README.md" +authors = ["{{ cookiecutter.admin_name }} <{{ cookiecutter.admin_email }}>"] keywords = [ "ELT", "{{cookiecutter.source_name}}", ] -license = "Apache 2.0" +license = "Apache-2.0" +{%- if cookiecutter.variant != "None (Skip)" %} +packages = [ + { include = "{{cookiecutter.library_name}}" }, +] +{%- endif %} [tool.poetry.dependencies] -python = "<3.11,>=3.7.1" -requests = "^2.25.1" -singer-sdk = "^0.8.0" - -[tool.poetry.dev-dependencies] -pytest = "^6.2.5" -tox = "^3.24.4" -flake8 = "^3.9.2" -black = "^21.9b0" -pydocstyle = "^6.1.1" -mypy = "^0.910" -types-requests = "^2.26.1" -isort = "^5.10.1" - -[tool.isort] -profile = "black" -multi_line_output = 3 # Vertical Hanging Indent -src_paths = "{{cookiecutter.library_name}}" +python = "<3.12,>=3.7.1" +singer-sdk = { version="^0.31.1" } +fs-s3fs = { version = "^1.1.1", optional = true } +{%- if cookiecutter.stream_type in ["REST", "GraphQL"] %} +requests = "^2.31.0" +{%- endif %} +{%- if cookiecutter.auth_method in ("OAuth2", "JWT") %} +cached-property = "^1" # Remove after Python 3.7 support is dropped +{%- endif %} + +[tool.poetry.group.dev.dependencies] +pytest = "^7.4.0" +singer-sdk = { version="^0.31.1", extras = ["testing"] } + +[tool.poetry.extras] +s3 = ["fs-s3fs"] + +[tool.mypy] +python_version = "3.9" +warn_unused_configs = true +{%- if cookiecutter.stream_type == 'SQL' %} +plugins = "sqlmypy" +{%- endif %} + +[tool.ruff] +ignore = [ + "ANN101", # missing-type-self + "ANN102", # missing-type-cls +] +select = ["ALL"] +src = ["{{cookiecutter.library_name}}"] +target-version = "py37" + + +[tool.ruff.flake8-annotations] +allow-star-arg-any = true + +[tool.ruff.isort] +known-first-party = ["{{cookiecutter.library_name}}"] + +[tool.ruff.pydocstyle] +convention = "google" [build-system] requires = ["poetry-core>=1.0.8"] diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tests/__init__.py b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/__init__.py similarity index 100% rename from cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tests/__init__.py rename to cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/__init__.py diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/conftest.py b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/conftest.py new file mode 100644 index 000000000..6bb3ec2d7 --- /dev/null +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/conftest.py @@ -0,0 +1,3 @@ +"""Test Configuration.""" + +pytest_plugins = ("singer_sdk.testing.pytest_plugin",) diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tests/{{ 'test' }}_core.py b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/{{ 'test' }}_core.py similarity index 57% rename from cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tests/{{ 'test' }}_core.py rename to cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/{{ 'test' }}_core.py index 966579da0..26eef158d 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tests/{{ 'test' }}_core.py +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tests/{{ 'test' }}_core.py @@ -2,25 +2,21 @@ import datetime -from singer_sdk.testing import get_standard_tap_tests +from singer_sdk.testing import get_tap_test_class from {{ cookiecutter.library_name }}.tap import Tap{{ cookiecutter.source_name }} SAMPLE_CONFIG = { - "start_date": datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d") + "start_date": datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d"), # TODO: Initialize minimal tap config } # Run standard built-in tap tests from the SDK: -def test_standard_tap_tests(): - """Run standard tap tests from the SDK.""" - tests = get_standard_tap_tests( - Tap{{ cookiecutter.source_name }}, - config=SAMPLE_CONFIG - ) - for test in tests: - test() +TestTap{{ cookiecutter.source_name }} = get_tap_test_class( + tap_class=Tap{{ cookiecutter.source_name }}, + config=SAMPLE_CONFIG, +) # TODO: Create additional tests as appropriate for your tap. diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tox.ini b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tox.ini index 0eda0353f..70b9e4ac7 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tox.ini +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/tox.ini @@ -1,53 +1,19 @@ # This file can be used to customize tox tests as well as other test frameworks like flake8 and mypy [tox] -envlist = py38 -; envlist = py37, py38, py39 +envlist = py37, py38, py39, py310, py311 isolated_build = true [testenv] -whitelist_externals = poetry - +allowlist_externals = poetry commands = poetry install -v poetry run pytest - poetry run black --check {{cookiecutter.library_name}}/ - poetry run flake8 {{cookiecutter.library_name}} - poetry run pydocstyle {{cookiecutter.library_name}} - poetry run mypy {{cookiecutter.library_name}} --exclude='{{cookiecutter.library_name}}/tests' [testenv:pytest] # Run the python tests. # To execute, run `tox -e pytest` -envlist = py37, py38, py39 +envlist = py37, py38, py39, py310, py311 commands = poetry install -v poetry run pytest - -[testenv:format] -# Attempt to auto-resolve lint errors before they are raised. -# To execute, run `tox -e format` -commands = - poetry install -v - poetry run black {{cookiecutter.library_name}}/ - poetry run isort {{cookiecutter.library_name}} - -[testenv:lint] -# Raise an error if lint and style standards are not met. -# To execute, run `tox -e lint` -commands = - poetry install -v - poetry run black --check --diff {{cookiecutter.library_name}}/ - poetry run isort --check {{cookiecutter.library_name}} - poetry run flake8 {{cookiecutter.library_name}} - poetry run pydocstyle {{cookiecutter.library_name}} - # refer to mypy.ini for specific settings - poetry run mypy {{cookiecutter.library_name}} --exclude='{{cookiecutter.library_name}}/tests' - -[flake8] -ignore = W503 -max-line-length = 88 -max-complexity = 10 - -[pydocstyle] -ignore = D105,D203,D213 diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} new file mode 100644 index 000000000..62913ff3a --- /dev/null +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + + Copyright {% now 'utc', '%Y' %} {{ cookiecutter.admin_name }} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/__init__.py b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/__init__.py index e69de29bb..b5c6e813f 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/__init__.py +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/__init__.py @@ -0,0 +1 @@ +"""Tap for {{ cookiecutter.source_name }}.""" diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tap.py b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tap.py index c62a43250..df3f9f754 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tap.py +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/tap.py @@ -1,66 +1,70 @@ """{{ cookiecutter.source_name }} tap class.""" -from typing import List +from __future__ import annotations -from singer_sdk import {{ 'SQL' if cookiecutter.stream_type == 'SQL' else '' }}Tap, {{ 'SQL' if cookiecutter.stream_type == 'SQL' else '' }}Stream +from singer_sdk import {{ 'SQL' if cookiecutter.stream_type == 'SQL' else '' }}Tap from singer_sdk import typing as th # JSON schema typing helpers {%- if cookiecutter.stream_type == "SQL" %} + from {{ cookiecutter.library_name }}.client import {{ cookiecutter.source_name }}Stream {%- else %} -# TODO: Import your custom stream types here: -from {{ cookiecutter.library_name }}.streams import ( - {{ cookiecutter.source_name }}Stream, -{%- if cookiecutter.stream_type in ("GraphQL", "REST", "Other") %} - UsersStream, - GroupsStream, -{%- endif %} -) -{%- endif %} -{%- if cookiecutter.stream_type in ("GraphQL", "REST", "Other") %} -# TODO: Compile a list of custom stream types here -# OR rewrite discover_streams() below with your custom logic. -STREAM_TYPES = [ - UsersStream, - GroupsStream, -] +# TODO: Import your custom stream types here: +from {{ cookiecutter.library_name }} import streams {%- endif %} class Tap{{ cookiecutter.source_name }}({{ 'SQL' if cookiecutter.stream_type == 'SQL' else '' }}Tap): """{{ cookiecutter.source_name }} tap class.""" + name = "{{ cookiecutter.tap_id }}" + {%- if cookiecutter.stream_type == "SQL" %} + default_stream_class = {{ cookiecutter.source_name }}Stream + {%- endif %} + # TODO: Update this section with the actual config values you expect: config_jsonschema = th.PropertiesList( th.Property( "auth_token", th.StringType, required=True, - description="The token to authenticate against the API service" + secret=True, # Flag config as protected. + description="The token to authenticate against the API service", ), th.Property( "project_ids", th.ArrayType(th.StringType), required=True, - description="Project IDs to replicate" + description="Project IDs to replicate", ), th.Property( "start_date", th.DateTimeType, - description="The earliest record date to sync" + description="The earliest record date to sync", ), th.Property( "api_url", th.StringType, default="https://api.mysample.com", - description="The url for the API service" + description="The url for the API service", ), ).to_dict() {%- if cookiecutter.stream_type in ("GraphQL", "REST", "Other") %} - def discover_streams(self) -> List[Stream]: - """Return a list of discovered streams.""" - return [stream_class(tap=self) for stream_class in STREAM_TYPES] + def discover_streams(self) -> list[streams.{{ cookiecutter.source_name }}Stream]: + """Return a list of discovered streams. + + Returns: + A list of discovered streams. + """ + return [ + streams.GroupsStream(self), + streams.UsersStream(self), + ] {%- endif %} + + +if __name__ == "__main__": + Tap{{ cookiecutter.source_name }}.cli() diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'GraphQL' == cookiecutter.stream_type %}client.py{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'GraphQL' == cookiecutter.stream_type %}client.py{%endif%} index f96869d44..66505556d 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'GraphQL' == cookiecutter.stream_type %}client.py{%endif%} +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'GraphQL' == cookiecutter.stream_type %}client.py{%endif%} @@ -1,11 +1,14 @@ """GraphQL client handling, including {{ cookiecutter.source_name }}Stream base class.""" -import requests -from pathlib import Path -from typing import Any, Dict, Optional, Union, List, Iterable +from __future__ import annotations +from typing import Iterable + +import requests # noqa: TCH002 from singer_sdk.streams import {{ cookiecutter.stream_type }}Stream + {%- if cookiecutter.auth_method in ("OAuth2", "JWT") %} + from {{ cookiecutter.library_name }}.auth import {{ cookiecutter.source_name }}Authenticator {%- endif %} @@ -13,26 +16,32 @@ from {{ cookiecutter.library_name }}.auth import {{ cookiecutter.source_name }}A class {{ cookiecutter.source_name }}Stream({{ cookiecutter.stream_type }}Stream): """{{ cookiecutter.source_name }} stream class.""" - # TODO: Set the API's base URL here: @property def url_base(self) -> str: """Return the API URL root, configurable via tap settings.""" - return self.config["api_url"] - - # Alternatively, use a static string for url_base: - # url_base = "https://api.mysample.com" + # TODO: hardcode a value here, or retrieve it from self.config + return "https://api.mysample.com" {%- if cookiecutter.auth_method in ("OAuth2", "JWT") %} + @property def authenticator(self) -> {{ cookiecutter.source_name }}Authenticator: - """Return a new authenticator object.""" + """Return a new authenticator object. + + Returns: + An authenticator instance. + """ return {{ cookiecutter.source_name }}Authenticator.create_for_stream(self) {%- endif %} @property def http_headers(self) -> dict: - """Return the http headers needed.""" + """Return the http headers needed. + + Returns: + A dictionary of HTTP headers. + """ headers = {} if "user_agent" in self.config: headers["User-Agent"] = self.config.get("user_agent") @@ -43,13 +52,31 @@ class {{ cookiecutter.source_name }}Stream({{ cookiecutter.stream_type }}Stream) return headers def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result rows.""" + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ # TODO: Parse response body and return a set of records. resp_json = response.json() - for row in resp_json.get("<TODO>"): - yield row + yield from resp_json.get("<TODO>") + + def post_process( + self, + row: dict, + context: dict | None = None, # noqa: ARG002 + ) -> dict | None: + """As needed, append or transform raw data to match expected structure. + + Args: + row: An individual record from the stream. + context: The stream context. - def post_process(self, row: dict, context: Optional[dict] = None) -> dict: - """As needed, append or transform raw data to match expected structure.""" + Returns: + The updated record dictionary, or ``None`` to skip the record. + """ # TODO: Delete this method if not needed. return row diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'Other' == cookiecutter.stream_type %}client.py{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'Other' == cookiecutter.stream_type %}client.py{%endif%} index 27d8be064..c2def6322 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'Other' == cookiecutter.stream_type %}client.py{%endif%} +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'Other' == cookiecutter.stream_type %}client.py{%endif%} @@ -1,8 +1,8 @@ """Custom client handling, including {{ cookiecutter.source_name }}Stream base class.""" -import requests -from pathlib import Path -from typing import Any, Dict, Optional, Union, List, Iterable +from __future__ import annotations + +from typing import Iterable from singer_sdk.streams import Stream @@ -10,15 +10,25 @@ from singer_sdk.streams import Stream class {{ cookiecutter.source_name }}Stream(Stream): """Stream class for {{ cookiecutter.source_name }} streams.""" - def get_records(self, context: Optional[dict]) -> Iterable[dict]: - """Return a generator of row-type dictionary objects. + def get_records( + self, + context: dict | None, # noqa: ARG002 + ) -> Iterable[dict]: + """Return a generator of record-type dictionary objects. The optional `context` argument is used to identify a specific slice of the stream if partitioning is required for the stream. Most implementations do not require partitioning and should ignore the `context` argument. + + Args: + context: Stream partition or context dictionary. + + Raises: + NotImplementedError: If the implementation is TODO """ # TODO: Write logic to extract data from the upstream source. - # rows = mysource.getall() - # for row in rows: - # yield row.to_dict() - raise NotImplementedError("The method is not yet implemented (TODO)") + # records = mysource.getall() # noqa: ERA001 + # for record in records: + # yield record.to_dict() # noqa: ERA001 + errmsg = "The method is not yet implemented (TODO)" + raise NotImplementedError(errmsg) diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'REST' == cookiecutter.stream_type %}client.py{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'REST' == cookiecutter.stream_type %}client.py{%endif%} index b6a393749..dae2269df 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'REST' == cookiecutter.stream_type %}client.py{%endif%} +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'REST' == cookiecutter.stream_type %}client.py{%endif%} @@ -1,120 +1,175 @@ """REST client handling, including {{ cookiecutter.source_name }}Stream base class.""" -import requests -from pathlib import Path -from typing import Any, Dict, Optional, Union, List, Iterable +from __future__ import annotations -from memoization import cached +{% if cookiecutter.auth_method in ("OAuth2", "JWT") -%} +import sys +{% endif -%} +from pathlib import Path +from typing import Any, Callable, Iterable +import requests +{% if cookiecutter.auth_method == "API Key" -%} +from singer_sdk.authenticators import APIKeyAuthenticator from singer_sdk.helpers.jsonpath import extract_jsonpath +from singer_sdk.pagination import BaseAPIPaginator # noqa: TCH002 from singer_sdk.streams import {{ cookiecutter.stream_type }}Stream -{%- if cookiecutter.auth_method == "API Key" %} -from singer_sdk.authenticators import APIKeyAuthenticator -{%- elif cookiecutter.auth_method == "Bearer Token" %} + +{% elif cookiecutter.auth_method == "Bearer Token" -%} from singer_sdk.authenticators import BearerTokenAuthenticator -{%- elif cookiecutter.auth_method == "Basic Auth" %} +from singer_sdk.helpers.jsonpath import extract_jsonpath +from singer_sdk.pagination import BaseAPIPaginator # noqa: TCH002 +from singer_sdk.streams import {{ cookiecutter.stream_type }}Stream + +{% elif cookiecutter.auth_method == "Basic Auth" -%} from singer_sdk.authenticators import BasicAuthenticator -{%- elif cookiecutter.auth_method in ("OAuth2", "JWT") %} +from singer_sdk.helpers.jsonpath import extract_jsonpath +from singer_sdk.pagination import BaseAPIPaginator # noqa: TCH002 +from singer_sdk.streams import {{ cookiecutter.stream_type }}Stream + +{% elif cookiecutter.auth_method == "Custom or N/A" -%} +from singer_sdk.helpers.jsonpath import extract_jsonpath +from singer_sdk.pagination import BaseAPIPaginator # noqa: TCH002 +from singer_sdk.streams import {{ cookiecutter.stream_type }}Stream + +{% elif cookiecutter.auth_method in ("OAuth2", "JWT") -%} +from singer_sdk.helpers.jsonpath import extract_jsonpath +from singer_sdk.pagination import BaseAPIPaginator # noqa: TCH002 +from singer_sdk.streams import {{ cookiecutter.stream_type }}Stream from {{ cookiecutter.library_name }}.auth import {{ cookiecutter.source_name }}Authenticator -{%- endif %} +{% endif -%} + +{%- if cookiecutter.auth_method in ("OAuth2", "JWT") -%} +if sys.version_info >= (3, 8): + from functools import cached_property +else: + from cached_property import cached_property + +{% endif -%} +_Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest] SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") class {{ cookiecutter.source_name }}Stream({{ cookiecutter.stream_type }}Stream): """{{ cookiecutter.source_name }} stream class.""" - # TODO: Set the API's base URL here: - url_base = "https://api.mysample.com" - - # OR use a dynamic url_base: - # @property - # def url_base(self) -> str: - # """Return the API URL root, configurable via tap settings.""" - # return self.config["api_url"] + @property + def url_base(self) -> str: + """Return the API URL root, configurable via tap settings.""" + # TODO: hardcode a value here, or retrieve it from self.config + return "https://api.mysample.com" records_jsonpath = "$[*]" # Or override `parse_response`. - next_page_token_jsonpath = "$.next_page" # Or override `get_next_page_token`. + + # Set this value or override `get_new_paginator`. + next_page_token_jsonpath = "$.next_page" # noqa: S105 {%- if cookiecutter.auth_method in ("OAuth2", "JWT") %} - @property - @cached - def authenticator(self) -> {{ cookiecutter.source_name }}Authenticator: - """Return a new authenticator object.""" + @cached_property + def authenticator(self) -> _Auth: + """Return a new authenticator object. + + Returns: + An authenticator instance. + """ return {{ cookiecutter.source_name }}Authenticator.create_for_stream(self) {%- elif cookiecutter.auth_method == "API Key" %} @property def authenticator(self) -> APIKeyAuthenticator: - """Return a new authenticator object.""" + """Return a new authenticator object. + + Returns: + An authenticator instance. + """ return APIKeyAuthenticator.create_for_stream( self, key="x-api-key", - value=self.config.get("api_key"), - location="header" + value=self.config.get("auth_token", ""), + location="header", ) {%- elif cookiecutter.auth_method == "Bearer Token" %} @property def authenticator(self) -> BearerTokenAuthenticator: - """Return a new authenticator object.""" + """Return a new authenticator object. + + Returns: + An authenticator instance. + """ return BearerTokenAuthenticator.create_for_stream( self, - token=self.config.get("api_key") + token=self.config.get("auth_token", ""), ) {%- elif cookiecutter.auth_method == "Basic Auth" %} @property def authenticator(self) -> BasicAuthenticator: - """Return a new authenticator object.""" + """Return a new authenticator object. + + Returns: + An authenticator instance. + """ return BasicAuthenticator.create_for_stream( self, - username=self.config.get("username"), - password=self.config.get("password"), + username=self.config.get("username", ""), + password=self.config.get("password", ""), ) {%- endif %} @property def http_headers(self) -> dict: - """Return the http headers needed.""" + """Return the http headers needed. + + Returns: + A dictionary of HTTP headers. + """ headers = {} if "user_agent" in self.config: headers["User-Agent"] = self.config.get("user_agent") {%- if cookiecutter.auth_method not in ("OAuth2", "JWT") %} # If not using an authenticator, you may also provide inline auth headers: - # headers["Private-Token"] = self.config.get("auth_token") + # headers["Private-Token"] = self.config.get("auth_token") # noqa: ERA001 {%- endif %} return headers - def get_next_page_token( - self, response: requests.Response, previous_token: Optional[Any] - ) -> Optional[Any]: - """Return a token for identifying next page or None if no more pages.""" - # TODO: If pagination is required, return a token which can be used to get the - # next page. If this is the final page, return "None" to end the - # pagination loop. - if self.next_page_token_jsonpath: - all_matches = extract_jsonpath( - self.next_page_token_jsonpath, response.json() - ) - first_match = next(iter(all_matches), None) - next_page_token = first_match - else: - next_page_token = response.headers.get("X-Next-Page", None) - - return next_page_token + def get_new_paginator(self) -> BaseAPIPaginator: + """Create a new pagination helper instance. + + If the source API can make use of the `next_page_token_jsonpath` + attribute, or it contains a `X-Next-Page` header in the response + then you can remove this method. + + If you need custom pagination that uses page numbers, "next" links, or + other approaches, please read the guide: https://sdk.meltano.com/en/v0.25.0/guides/pagination-classes.html. + + Returns: + A pagination helper instance. + """ + return super().get_new_paginator() def get_url_params( - self, context: Optional[dict], next_page_token: Optional[Any] - ) -> Dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization.""" + self, + context: dict | None, # noqa: ARG002 + next_page_token: Any | None, # noqa: ANN401 + ) -> dict[str, Any]: + """Return a dictionary of values to be used in URL parameterization. + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary of URL query parameters. + """ params: dict = {} if next_page_token: params["page"] = next_page_token @@ -124,21 +179,49 @@ class {{ cookiecutter.source_name }}Stream({{ cookiecutter.stream_type }}Stream) return params def prepare_request_payload( - self, context: Optional[dict], next_page_token: Optional[Any] - ) -> Optional[dict]: + self, + context: dict | None, # noqa: ARG002 + next_page_token: Any | None, # noqa: ARG002, ANN401 + ) -> dict | None: """Prepare the data payload for the REST API request. By default, no payload will be sent (return None). + + Args: + context: The stream context. + next_page_token: The next page index or value. + + Returns: + A dictionary with the JSON body for a POST requests. """ # TODO: Delete this method if no payload is required. (Most REST APIs.) return None def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result rows.""" + """Parse the response and return an iterator of result records. + + Args: + response: The HTTP ``requests.Response`` object. + + Yields: + Each record from the source. + """ # TODO: Parse response body and return a set of records. yield from extract_jsonpath(self.records_jsonpath, input=response.json()) - def post_process(self, row: dict, context: Optional[dict]) -> dict: - """As needed, append or transform raw data to match expected structure.""" + def post_process( + self, + row: dict, + context: dict | None = None, # noqa: ARG002 + ) -> dict | None: + """As needed, append or transform raw data to match expected structure. + + Args: + row: An individual record from the stream. + context: The stream context. + + Returns: + The updated record dictionary, or ``None`` to skip the record. + """ # TODO: Delete this method if not needed. return row diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' != cookiecutter.stream_type %}streams.py{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' != cookiecutter.stream_type %}streams.py{%endif%} index b6bb35daa..8272cbc24 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' != cookiecutter.stream_type %}streams.py{%endif%} +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' != cookiecutter.stream_type %}streams.py{%endif%} @@ -1,7 +1,9 @@ """Stream type classes for {{ cookiecutter.tap_id }}.""" +from __future__ import annotations + +import typing as t from pathlib import Path -from typing import Any, Dict, Optional, Union, List, Iterable from singer_sdk import typing as th # JSON Schema typing helpers @@ -18,25 +20,26 @@ SCHEMAS_DIR = Path(__file__).parent / Path("./schemas") class UsersStream({{ cookiecutter.source_name }}Stream): """Define custom stream.""" + name = "users" # Optionally, you may also use `schema_filepath` in place of `schema`: - # schema_filepath = SCHEMAS_DIR / "users.json" + # schema_filepath = SCHEMAS_DIR / "users.json" # noqa: ERA001 schema = th.PropertiesList( th.Property("name", th.StringType), th.Property( "id", th.StringType, - description="The user's system ID" + description="The user's system ID", ), th.Property( "age", th.IntegerType, - description="The user's age in years" + description="The user's age in years", ), th.Property( "email", th.StringType, - description="The user's email address" + description="The user's email address", ), th.Property( "address", @@ -46,13 +49,13 @@ class UsersStream({{ cookiecutter.source_name }}Stream): th.Property( "state", th.StringType, - description="State name in ISO 3166-2 format" + description="State name in ISO 3166-2 format", ), th.Property("zip", th.StringType), - ) + ), ), ).to_dict() - primary_keys = ["id"] + primary_keys: t.ClassVar[list[str]] = ["id"] replication_key = None graphql_query = """ users { @@ -72,13 +75,14 @@ class UsersStream({{ cookiecutter.source_name }}Stream): class GroupsStream({{ cookiecutter.source_name }}Stream): """Define custom stream.""" + name = "groups" schema = th.PropertiesList( th.Property("name", th.StringType), th.Property("id", th.StringType), th.Property("modified", th.DateTimeType), ).to_dict() - primary_keys = ["id"] + primary_keys: t.ClassVar[list[str]] = ["id"] replication_key = "modified" graphql_query = """ groups { @@ -96,37 +100,38 @@ class GroupsStream({{ cookiecutter.source_name }}Stream): class UsersStream({{ cookiecutter.source_name }}Stream): """Define custom stream.""" + name = "users" {%- if cookiecutter.stream_type == "REST" %} path = "/users" {%- endif %} - primary_keys = ["id"] + primary_keys: t.ClassVar[list[str]] = ["id"] replication_key = None # Optionally, you may also use `schema_filepath` in place of `schema`: - # schema_filepath = SCHEMAS_DIR / "users.json" + # schema_filepath = SCHEMAS_DIR / "users.json" # noqa: ERA001 schema = th.PropertiesList( th.Property("name", th.StringType), th.Property( "id", th.StringType, - description="The user's system ID" + description="The user's system ID", ), th.Property( "age", th.IntegerType, - description="The user's age in years" + description="The user's age in years", ), th.Property( "email", th.StringType, - description="The user's email address" + description="The user's email address", ), th.Property("street", th.StringType), th.Property("city", th.StringType), th.Property( "state", th.StringType, - description="State name in ISO 3166-2 format" + description="State name in ISO 3166-2 format", ), th.Property("zip", th.StringType), ).to_dict() @@ -134,11 +139,12 @@ class UsersStream({{ cookiecutter.source_name }}Stream): class GroupsStream({{ cookiecutter.source_name }}Stream): """Define custom stream.""" + name = "groups" {%- if cookiecutter.stream_type == "REST" %} path = "/groups" {%- endif %} - primary_keys = ["id"] + primary_keys: t.ClassVar[list[str]] = ["id"] replication_key = "modified" schema = th.PropertiesList( th.Property("name", th.StringType), diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' == cookiecutter.stream_type %}client.py{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' == cookiecutter.stream_type %}client.py{%endif%} index 8e227bc38..a34cee4d0 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' == cookiecutter.stream_type %}client.py{%endif%} +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if 'SQL' == cookiecutter.stream_type %}client.py{%endif%} @@ -3,16 +3,26 @@ This includes {{ cookiecutter.source_name }}Stream and {{ cookiecutter.source_name }}Connector. """ -import sqlalchemy +from __future__ import annotations +from typing import Any, Iterable + +import sqlalchemy # noqa: TCH002 from singer_sdk import SQLConnector, SQLStream class {{ cookiecutter.source_name }}Connector(SQLConnector): """Connects to the {{ cookiecutter.source_name }} SQL source.""" - def get_sqlalchemy_url(cls, config: dict) -> str: - """Concatenate a SQLAlchemy URL for use in connecting to the source.""" + def get_sqlalchemy_url(self, config: dict) -> str: + """Concatenate a SQLAlchemy URL for use in connecting to the source. + + Args: + config: A dict with connection parameters + + Returns: + SQLAlchemy connection string + """ # TODO: Replace this with a valid connection string for your source: return ( f"awsathena+rest://{config['aws_access_key_id']}:" @@ -23,15 +33,26 @@ class {{ cookiecutter.source_name }}Connector(SQLConnector): ) @staticmethod - def to_jsonschema_type(sql_type: sqlalchemy.types.TypeEngine) -> dict: + def to_jsonschema_type( + from_type: str + | sqlalchemy.types.TypeEngine + | type[sqlalchemy.types.TypeEngine], + ) -> dict: """Returns a JSON Schema equivalent for the given SQL type. Developers may optionally add custom logic before calling the default implementation inherited from the base class. + + Args: + from_type: The SQL type as a string or as a TypeEngine. If a TypeEngine is + provided, it may be provided as a class or a specific object instance. + + Returns: + A compatible JSON Schema type definition. """ - # Optionally, add custom logic before calling the super(). + # Optionally, add custom logic before calling the parent SQLConnector method. # You may delete this method if overrides are not needed. - return super().to_jsonschema_type(sql_type) + return SQLConnector.to_jsonschema_type(from_type) @staticmethod def to_sql_type(jsonschema_type: dict) -> sqlalchemy.types.TypeEngine: @@ -39,10 +60,16 @@ class {{ cookiecutter.source_name }}Connector(SQLConnector): Developers may optionally add custom logic before calling the default implementation inherited from the base class. + + Args: + jsonschema_type: A dict + + Returns: + SQLAlchemy type """ - # Optionally, add custom logic before calling the super(). + # Optionally, add custom logic before calling the parent SQLConnector method. # You may delete this method if overrides are not needed. - return super().to_sql_type(jsonschema_type) + return SQLConnector.to_sql_type(jsonschema_type) class {{ cookiecutter.source_name }}Stream(SQLStream): @@ -50,8 +77,8 @@ class {{ cookiecutter.source_name }}Stream(SQLStream): connector_class = {{ cookiecutter.source_name }}Connector - def get_records(self, partition: Optional[dict]) -> Iterable[Dict[str, Any]]: - """Return a generator of row-type dictionary objects. + def get_records(self, partition: dict | None) -> Iterable[dict[str, Any]]: + """Return a generator of record-type dictionary objects. Developers may optionally add custom logic before calling the default implementation inherited from the base class. diff --git a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if cookiecutter.auth_method in ('OAuth2', 'JWT')%}auth.py{%endif%} b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if cookiecutter.auth_method in ('OAuth2', 'JWT')%}auth.py{%endif%} index f4ff1b16c..762cca155 100644 --- a/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if cookiecutter.auth_method in ('OAuth2', 'JWT')%}auth.py{%endif%} +++ b/cookiecutter/tap-template/{{cookiecutter.tap_id}}/{{cookiecutter.library_name}}/{%if cookiecutter.auth_method in ('OAuth2', 'JWT')%}auth.py{%endif%} @@ -1,9 +1,13 @@ """{{ cookiecutter.source_name }} Authentication.""" -{% if cookiecutter.auth_method not in ("Basic Auth", "OAuth2", "JWT") %} +from __future__ import annotations + +{%- if cookiecutter.auth_method not in ("Basic Auth", "OAuth2", "JWT") %} + # TODO: Delete this file or add custom authentication logic as needed. {%- elif cookiecutter.auth_method == "OAuth2" %} + from singer_sdk.authenticators import OAuthAuthenticator, SingletonMeta @@ -14,25 +18,38 @@ class {{ cookiecutter.source_name }}Authenticator(OAuthAuthenticator, metaclass= @property def oauth_request_body(self) -> dict: - """Define the OAuth request body for the {{ cookiecutter.source_name }} API.""" + """Define the OAuth request body for the AutomaticTestTap API. + + Returns: + A dict with the request body + """ # TODO: Define the request body needed for the API. return { - 'resource': 'https://analysis.windows.net/powerbi/api', - 'scope': self.oauth_scopes, - 'client_id': self.config["client_id"], - 'username': self.config["username"], - 'password': self.config["password"], - 'grant_type': 'password', + "resource": "https://analysis.windows.net/powerbi/api", + "scope": self.oauth_scopes, + "client_id": self.config["client_id"], + "username": self.config["username"], + "password": self.config["password"], + "grant_type": "password", } @classmethod - def create_for_stream(cls, stream) -> "{{ cookiecutter.source_name }}Authenticator": + def create_for_stream(cls, stream) -> {{ cookiecutter.source_name }}Authenticator: # noqa: ANN001 + """Instantiate an authenticator for a specific Singer stream. + + Args: + stream: The Singer stream instance. + + Returns: + A new authenticator. + """ return cls( stream=stream, auth_endpoint="TODO: OAuth Endpoint URL", oauth_scopes="TODO: OAuth Scopes", ) {%- elif cookiecutter.auth_method == "JWT" %} + from singer_sdk.authenticators import OAuthJWTAuthenticator @@ -40,10 +57,21 @@ class {{ cookiecutter.source_name }}Authenticator(OAuthJWTAuthenticator): """Authenticator class for {{ cookiecutter.source_name }}.""" @classmethod - def create_for_stream(cls, stream) -> "{{ cookiecutter.source_name }}Authenticator": + def create_for_stream( + cls, + stream, # noqa: ANN001 + ) -> {{ cookiecutter.source_name }}Authenticator: + """Instantiate an authenticator for a specific Singer stream. + + Args: + stream: The Singer stream instance. + + Returns: + A new authenticator. + """ return cls( stream=stream, auth_endpoint="TODO: OAuth Endpoint URL", oauth_scopes="TODO: OAuth Scopes", ) -{% endif %} +{%- endif %} diff --git a/cookiecutter/target-template/cookiecutter.json b/cookiecutter/target-template/cookiecutter.json index c07a7ac6a..c7c31835a 100644 --- a/cookiecutter/target-template/cookiecutter.json +++ b/cookiecutter/target-template/cookiecutter.json @@ -1,7 +1,21 @@ { - "destination_name": "MyDestinationName", - "admin_name": "FirstName LastName", - "target_id": "target-{{ cookiecutter.destination_name.lower() }}", - "library_name": "{{ cookiecutter.target_id.replace('-', '_') }}", - "serialization_method": ["Per record", "Per batch", "SQL"] -} \ No newline at end of file + "destination_name": "MyDestinationName", + "admin_name": "FirstName LastName", + "admin_email": "firstname.lastname@example.com", + "target_id": "target-{{ cookiecutter.destination_name.lower() }}", + "library_name": "{{ cookiecutter.target_id.replace('-', '_') }}", + "variant": "None (Skip)", + "serialization_method": ["Per record", "Per batch", "SQL"], + "include_ci_files": ["GitHub", "None (Skip)"], + "license": ["Apache-2.0"], + "__prompts__": { + "name": "The name of the mapper, in CamelCase", + "admin_name": "Provide your [bold yellow]full name[/]", + "admin_email": "Provide your [bold yellow]email[/]", + "mapper_id": "The ID of the tap, in kebab-case", + "library_name": "The name of the library, in snake_case. This is how the library will be imported in Python.", + "serialization_method": "The serialization method to use for loading data", + "include_ci_files": "Whether to include CI files for a common CI services", + "license": "The license for the project" + } +} diff --git a/cookiecutter/target-template/cookiecutter.tests.yml b/cookiecutter/target-template/cookiecutter.tests.yml index 514f220bf..e0747fe8f 100644 --- a/cookiecutter/target-template/cookiecutter.tests.yml +++ b/cookiecutter/target-template/cookiecutter.tests.yml @@ -8,3 +8,7 @@ tests: - destination_name: SQLSinkTest target_id: target-sqlsink-test serialization_method: SQL + - destination_name: TargetRecordSinkTest + target_id: target-recordsink-test + variant: meltanolabs + serialization_method: Per record \ No newline at end of file diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} b/cookiecutter/target-template/{{cookiecutter.target_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} new file mode 100644 index 000000000..4544911a6 --- /dev/null +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/.github/workflows/{% if cookiecutter.include_ci_files == 'GitHub' %}test.yml{%endif%} @@ -0,0 +1,30 @@ +### A CI workflow template that runs linting and python testing +### TODO: Modify as needed or as desired. + +name: Test {{cookiecutter.target_id}} + +on: [push] + +jobs: + pytest: + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: {{ '${{secrets.GITHUB_TOKEN}}' }} + strategy: + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python {{ '${{ matrix.python-version }}' }} + uses: actions/setup-python@v4 + with: + python-version: {{ '${{ matrix.python-version }}' }} + - name: Install Poetry + run: | + pip install poetry + - name: Install dependencies + run: | + poetry install + - name: Test with pytest + run: | + poetry run pytest diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} b/cookiecutter/target-template/{{cookiecutter.target_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} new file mode 100644 index 000000000..933e6b1c2 --- /dev/null +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/.github/{% if cookiecutter.include_ci_files == 'GitHub' %}dependabot.yml{%endif%} @@ -0,0 +1,26 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: pip + directory: "/" + schedule: + interval: "daily" + commit-message: + prefix: "chore(deps): " + prefix-development: "chore(deps-dev): " + - package-ecosystem: pip + directory: "/.github/workflows" + schedule: + interval: daily + commit-message: + prefix: "ci: " + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "ci: " diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/.pre-commit-config.yaml b/cookiecutter/target-template/{{cookiecutter.target_id}}/.pre-commit-config.yaml new file mode 100644 index 000000000..d8bad86cf --- /dev/null +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/.pre-commit-config.yaml @@ -0,0 +1,42 @@ +ci: + autofix_prs: true + autoupdate_schedule: weekly + autoupdate_commit_msg: 'chore: pre-commit autoupdate' + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-json + - id: check-toml + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + +- repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.23.3 + hooks: + - id: check-dependabot + - id: check-github-workflows + +- repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.0.282 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + +- repo: https://github.com/psf/black + rev: 23.7.0 + hooks: + - id: black + +- repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.4.1 + hooks: + - id: mypy + additional_dependencies: + {%- if cookiecutter.serialization_method != "SQL" %} + - sqlalchemy-stubs + {%- else %} + - types-requests + {%- endif %} diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/README.md b/cookiecutter/target-template/{{cookiecutter.target_id}}/README.md index 0fb9f105e..983be1ce5 100644 --- a/cookiecutter/target-template/{{cookiecutter.target_id}}/README.md +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/README.md @@ -4,19 +4,39 @@ Build with the [Meltano Target SDK](https://sdk.meltano.com). +<!-- + +Developer TODO: Update the below as needed to correctly describe the install procedure. For instance, if you do not have a PyPi repo, or if you want users to directly install from your git repo, you can modify this step as appropriate. + ## Installation -- [ ] `Developer TODO:` Update the below as needed to correctly describe the install procedure. For instance, if you do not have a PyPi repo, or if you want users to directly install from your git repo, you can modify this step as appropriate. +Install from PyPi: ```bash pipx install {{ cookiecutter.target_id }} ``` +Install from GitHub: + +```bash +pipx install git+https://github.com/ORG_NAME/{{ cookiecutter.target_id }}.git@main +``` + +--> + ## Configuration ### Accepted Config Options -- [ ] `Developer TODO:` Provide a list of config options accepted by the target. +<!-- +Developer TODO: Provide a list of config options accepted by the target. + +This section can be created by copy-pasting the CLI output from: + +``` +{{ cookiecutter.target_id }} --about --format=markdown +``` +--> A full list of supported settings and capabilities for this target is available by running: @@ -33,7 +53,9 @@ environment variable is set either in the terminal context or in the `.env` file ### Source Authentication and Authorization -- [ ] `Developer TODO:` If your target requires special access on the source system, or any special authentication requirements, provide those here. +<!-- +Developer TODO: If your target requires special access on the destination system, or any special authentication requirements, provide those here. +--> ## Usage @@ -50,7 +72,7 @@ tap-carbon-intensity | {{ cookiecutter.target_id }} --config /path/to/{{ cookiec ## Developer Resources -- [ ] `Developer TODO:` As a first step, scan the entire project for the text "`TODO:`" and complete any recommended steps, deleting the "TODO" references once completed. +Follow these instructions to contribute to this project. ### Initialize your Development Environment @@ -61,7 +83,7 @@ poetry install ### Create and Run Tests -Create tests within the `{{ cookiecutter.library_name }}/tests` subfolder and +Create tests within the `tests` subfolder and then run: ```bash @@ -79,8 +101,11 @@ poetry run {{cookiecutter.target_id}} --help _**Note:** This target will work in any Singer environment and does not require Meltano. Examples here are for convenience and to streamline end-to-end orchestration scenarios._ -Your project comes with a custom `meltano.yml` project file already created. Open the `meltano.yml` and follow any _"TODO"_ items listed in +<!-- +Developer TODO: +Your project comes with a custom `meltano.yml` project file already created. Open the `meltano.yml` and follow any "TODO" items listed in the file. +--> Next, install Meltano (if you haven't already) and any needed plugins: @@ -98,10 +123,10 @@ Now you can test and orchestrate using Meltano: # Test invocation: meltano invoke {{ cookiecutter.target_id }} --version # OR run a test `elt` pipeline with the Carbon Intensity sample tap: -meltano elt tap-carbon-intensity {{ cookiecutter.target_id }} +meltano run tap-carbon-intensity {{ cookiecutter.target_id }} ``` ### SDK Dev Guide -See the [dev guide](https://sdk.meltano.com/en/latest/dev_guide.html) for more instructions on how to use the Meltano SDK to +See the [dev guide](https://sdk.meltano.com/en/latest/dev_guide.html) for more instructions on how to use the Meltano Singer SDK to develop your own Singer taps and targets. diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/meltano.yml b/cookiecutter/target-template/{{cookiecutter.target_id}}/meltano.yml index 000712603..da876d4df 100644 --- a/cookiecutter/target-template/{{cookiecutter.target_id}}/meltano.yml +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/meltano.yml @@ -1,6 +1,9 @@ version: 1 send_anonymous_usage_stats: true project_id: "{{cookiecutter.target_id}}" +default_environment: test +environments: +- name: test plugins: extractors: [] loaders: @@ -8,9 +11,9 @@ plugins: namespace: "{{cookiecutter.library_name}}" pip_url: -e . capabilities: - - state - - catalog - - discover + - about + - stream-maps + - record-flattening config: start_date: '2010-01-01T00:00:00Z' settings: diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/pyproject.toml b/cookiecutter/target-template/{{cookiecutter.target_id}}/pyproject.toml index ac8526c05..523561b8d 100644 --- a/cookiecutter/target-template/{{cookiecutter.target_id}}/pyproject.toml +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/pyproject.toml @@ -1,33 +1,56 @@ [tool.poetry] +{%- if cookiecutter.variant != "None (Skip)" %} +name = "{{cookiecutter.variant}}-{{cookiecutter.target_id}}" +{%- else %} name = "{{cookiecutter.target_id}}" +{%- endif %} version = "0.0.1" -description = "`{{cookiecutter.target_id}}` is a Singer target for {{cookiecutter.destination_name}}, built with the Meltano SDK for Singer Targets." -authors = ["{{ cookiecutter.admin_name }}"] +description = "`{{cookiecutter.target_id}}` is a Singer target for {{cookiecutter.destination_name}}, built with the Meltano Singer SDK." +readme = "README.md" +authors = ["{{ cookiecutter.admin_name }} <{{ cookiecutter.admin_email }}>"] keywords = [ "ELT", "{{cookiecutter.destination_name}}", ] -license = "Apache 2.0" +license = "Apache-2.0" +{%- if cookiecutter.variant != "None (Skip)" %} +packages = [ + { include = "{{cookiecutter.library_name}}" }, +] +{%- endif %} [tool.poetry.dependencies] -python = "<3.11,>=3.7.1" -requests = "^2.25.1" -singer-sdk = "^0.8.0" +python = "<3.12,>=3.7.1" +singer-sdk = { version="^0.31.1" } +fs-s3fs = { version = "^1.1.1", optional = true } +{%- if cookiecutter.serialization_method != "SQL" %} +requests = "^2.31.0" +{%- endif %} [tool.poetry.dev-dependencies] -pytest = "^6.2.5" -tox = "^3.24.4" -flake8 = "^3.9.2" -black = "^21.9b0" -pydocstyle = "^6.1.1" -mypy = "^0.910" -types-requests = "^2.26.1" -isort = "^5.10.1" - -[tool.isort] -profile = "black" -multi_line_output = 3 # Vertical Hanging Indent -src_paths = "{{cookiecutter.library_name}}" +pytest = "^7.4.0" +singer-sdk = { version="^0.31.1", extras = ["testing"] } + +[tool.poetry.extras] +s3 = ["fs-s3fs"] + +[tool.ruff] +ignore = [ + "ANN101", # missing-type-self + "ANN102", # missing-type-cls +] +select = ["ALL"] +src = ["{{cookiecutter.library_name}}"] +target-version = "py37" + +[tool.ruff.flake8-annotations] +allow-star-arg-any = true + +[tool.ruff.isort] +known-first-party = ["{{cookiecutter.library_name}}"] + +[tool.ruff.pydocstyle] +convention = "google" [build-system] requires = ["poetry-core>=1.0.8"] diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/tests/__init__.py b/cookiecutter/target-template/{{cookiecutter.target_id}}/tests/__init__.py similarity index 100% rename from cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/tests/__init__.py rename to cookiecutter/target-template/{{cookiecutter.target_id}}/tests/__init__.py diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/tests/conftest.py b/cookiecutter/target-template/{{cookiecutter.target_id}}/tests/conftest.py new file mode 100644 index 000000000..6bb3ec2d7 --- /dev/null +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/tests/conftest.py @@ -0,0 +1,3 @@ +"""Test Configuration.""" + +pytest_plugins = ("singer_sdk.testing.pytest_plugin",) diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/tests/{{ 'test' }}_core.py b/cookiecutter/target-template/{{cookiecutter.target_id}}/tests/{{ 'test' }}_core.py new file mode 100644 index 000000000..2403b2a3a --- /dev/null +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/tests/{{ 'test' }}_core.py @@ -0,0 +1,39 @@ +"""Tests standard target features using the built-in SDK tests library.""" + +from __future__ import annotations + +import typing as t + +import pytest +from singer_sdk.testing import get_target_test_class + +from {{ cookiecutter.library_name }}.target import Target{{ cookiecutter.destination_name }} + +# TODO: Initialize minimal target config +SAMPLE_CONFIG: dict[str, t.Any] = {} + + +# Run standard built-in target tests from the SDK: +StandardTargetTests = get_target_test_class( + target_class=Target{{ cookiecutter.destination_name }}, + config=SAMPLE_CONFIG, +) + + +class TestTarget{{ cookiecutter.destination_name }}(StandardTargetTests): # type: ignore[misc, valid-type] # noqa: E501 + """Standard Target Tests.""" + + @pytest.fixture(scope="class") + def resource(self): # noqa: ANN201 + """Generic external resource. + + This fixture is useful for setup and teardown of external resources, + such output folders, tables, buckets etc. for use during testing. + + Example usage can be found in the SDK samples test suite: + https://github.com/meltano/sdk/tree/main/tests/samples + """ + return "resource" + + +# TODO: Create additional tests as appropriate for your target. diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/tox.ini b/cookiecutter/target-template/{{cookiecutter.target_id}}/tox.ini new file mode 100644 index 000000000..70b9e4ac7 --- /dev/null +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/tox.ini @@ -0,0 +1,19 @@ +# This file can be used to customize tox tests as well as other test frameworks like flake8 and mypy + +[tox] +envlist = py37, py38, py39, py310, py311 +isolated_build = true + +[testenv] +allowlist_externals = poetry +commands = + poetry install -v + poetry run pytest + +[testenv:pytest] +# Run the python tests. +# To execute, run `tox -e pytest` +envlist = py37, py38, py39, py310, py311 +commands = + poetry install -v + poetry run pytest diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} b/cookiecutter/target-template/{{cookiecutter.target_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} new file mode 100644 index 000000000..fa2bea100 --- /dev/null +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/{%if 'Apache-2.0' == cookiecutter.license %}LICENSE{%endif%} @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {% now 'utc', '%Y' %} {{ cookiecutter.admin_name }} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/__init__.py b/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/__init__.py index e69de29bb..a5f25ee1b 100644 --- a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/__init__.py +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/__init__.py @@ -0,0 +1 @@ +"""Target for {{ cookiecutter.destination_name }}.""" diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/sinks.py b/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/sinks.py index bf1bfb664..4e84d1284 100644 --- a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/sinks.py +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/sinks.py @@ -12,7 +12,15 @@ {%- set sinkclass = sinkclass_mapping[cookiecutter.serialization_method] %} -from singer_sdk.sinks import {% if sinkclass == "SQLSink" %}SQLConnector, {% endif %}{{ sinkclass }} +{%- if sinkclass == "SQLSink" %} + +from singer_sdk.connectors import SQLConnector +from singer_sdk.sinks import {{ sinkclass }} +{%- else %} + +from singer_sdk.sinks import {{ sinkclass }} +{%- endif %} + {%- if sinkclass == "SQLSink" %} @@ -44,10 +52,15 @@ class {{ cookiecutter.destination_name }}Sink({{ sinkclass }}): {% if sinkclass == "RecordSink" -%} def process_record(self, record: dict, context: dict) -> None: - """Process the record.""" + """Process the record. + + Args: + record: Individual record in the stream. + context: Stream partition or context dictionary. + """ # Sample: # ------ - # client.write(record) + # client.write(record) # noqa: ERA001 {%- elif sinkclass == "BatchSink" -%} @@ -58,6 +71,9 @@ def start_batch(self, context: dict) -> None: Developers may optionally add additional markers to the `context` dict, which is unique to this batch. + + Args: + context: Stream partition or context dictionary. """ # Sample: # ------ @@ -69,6 +85,10 @@ def process_record(self, record: dict, context: dict) -> None: Developers may optionally read or write additional markers within the passed `context` dict from the current batch. + + Args: + record: Individual record in the stream. + context: Stream partition or context dictionary. """ # Sample: # ------ @@ -76,7 +96,11 @@ def process_record(self, record: dict, context: dict) -> None: # csvfile.write(record) def process_batch(self, context: dict) -> None: - """Write out any prepped records and return once fully written.""" + """Write out any prepped records and return once fully written. + + Args: + context: Stream partition or context dictionary. + """ # Sample: # ------ # client.upload(context["file_path"]) # Upload file diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/target.py b/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/target.py index 883375cde..f28bb4e94 100644 --- a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/target.py +++ b/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/target.py @@ -4,8 +4,8 @@ {%- set target_class = "SQLTarget" if cookiecutter.serialization_method == "SQL" else "Target" %} -from singer_sdk.target_base import {{ target_class }} from singer_sdk import typing as th +from singer_sdk.target_base import {{ target_class }} from {{ cookiecutter.library_name }}.sinks import ( {{ cookiecutter.destination_name }}Sink, @@ -16,25 +16,37 @@ class Target{{ cookiecutter.destination_name }}({{ target_class }}): """Sample target for {{ cookiecutter.destination_name }}.""" name = "{{ cookiecutter.target_id }}" + config_jsonschema = th.PropertiesList( {%- if cookiecutter.serialization_method == "SQL" %} th.Property( "sqlalchemy_url", th.StringType, + secret=True, # Flag config as protected. description="SQLAlchemy connection string", ), {%- else %} th.Property( "filepath", th.StringType, - description="The path to the target output file" + description="The path to the target output file", ), th.Property( "file_naming_scheme", th.StringType, - description="The scheme with which output files will be named" + description="The scheme with which output files will be named", + ), + th.Property( + "auth_token", + th.StringType, + secret=True, # Flag config as protected. + description="The path to the target output file", ), {%- endif %} ).to_dict() default_sink_class = {{ cookiecutter.destination_name }}Sink + + +if __name__ == "__main__": + Target{{ cookiecutter.destination_name }}.cli() diff --git a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/tests/{{ 'test' }}_core.py b/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/tests/{{ 'test' }}_core.py deleted file mode 100644 index 608fb86c3..000000000 --- a/cookiecutter/target-template/{{cookiecutter.target_id}}/{{cookiecutter.library_name}}/tests/{{ 'test' }}_core.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Tests standard target features using the built-in SDK tests library.""" - -import datetime - -from typing import Dict, Any - -from singer_sdk.testing import get_standard_target_tests - -from {{ cookiecutter.library_name }}.target import Target{{ cookiecutter.destination_name }} - -SAMPLE_CONFIG: Dict[str, Any] = { - # TODO: Initialize minimal target config -} - - -# Run standard built-in target tests from the SDK: -def test_standard_target_tests(): - """Run standard target tests from the SDK.""" - tests = get_standard_target_tests( - Target{{ cookiecutter.destination_name }}, - config=SAMPLE_CONFIG, - ) - for test in tests: - test() - - -# TODO: Create additional tests as appropriate for your target. diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index f78e6aa8f..89eadbbba 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -1,4 +1,4 @@ -# Contributing to the SDK +# Contributing Guide _**Note:** The SDK currently works with Python versions 3.7 through 3.10.x. Python 3.6 is no longer supported._ @@ -65,7 +65,11 @@ For example: - Run pre-commit hooks: `pre-commit run --all`. - We use `black`, `flake8`, `isort`, `mypy` and `pyupgrade`. The project-wide max line length is `88`. + We use [Ruff](https://github.com/charliermarsh/ruff), + [black](https://black.readthedocs.io/en/stable/index.html), + [flake8](https://flake8.pycqa.org/en/latest/) and + [mypy](https://mypy.readthedocs.io/en/stable/). + The project-wide max line length is `88`. - Build documentation: `nox -rs docs` @@ -97,6 +101,63 @@ To view the code coverage report in HTML format: nox -rs coverage -- html && open ./htmlcov/index.html ``` +### Platform-specific Testing + +To mark a test as platform-specific, use the `@pytest.mark.<platform>` decorator: + +```python +import pytest + +@pytest.mark.windows +def test_windows_only(): + pass +``` + +Supported platform markers are `windows`, `darwin`, and `linux`. + +### Snapshot Testing + +We use [pytest-snapshot](https://pypi.org/project/pytest-snapshot/) for snapshot testing. + +#### Adding a new snapshot + +To add a new snapshot, use the `snapshot` fixture and mark the test with the +`@pytest.mark.snapshot` decorator. The fixture will create a new snapshot file +if one does not already exist. If a snapshot file already exists, the fixture +will compare the snapshot to the actual value and fail the test if they do not +match. + +The `tests/snapshots` directory is where snapshot files should be stored and +it's available as the `snapshot_dir` fixture. + +```python +@pytest.mark.snapshot +def test_snapshot(snapshot, snapshot_dir): + # Configure the snapshot directory + snapshot.snapshot_dir = snapshot_dir.joinpath("test_snapshot_subdir") + + snapshot_name = "test_snapshot" + expected_content = "Hello, World!" + snapshot.assert_match(expected_content, snapshot_name) +``` + +#### Generating or updating snapshots + +To update or generate snapshots, run the nox `update_snapshots` session + +```bash +nox -rs update_snapshots +``` + +or use the `--snapshot-update` flag + +```bash +poetry run pytest --snapshot-update -m 'snapshot' +``` + +This will run all tests with the `snapshot` marker and update any snapshots that have changed. +Commit the updated snapshots to your branch if they are expected to change. + ## Testing Updates to Docs Documentation runs on Sphinx, using ReadtheDocs style template, and hosting from @@ -118,7 +179,7 @@ This repo uses the [semantic-prs](https://github.com/Ezard/semantic-prs) GitHub Pull requests should be named according to the conventional commit syntax to streamline changelog and release notes management. We encourage (but do not require) the use of conventional commits in commit messages as well. -In general, PR titles should follow the format "<type>: <desc>", where type is any one of these: +In general, PR titles should follow the format `<type>: <desc>`, where type is any one of these: - `ci` - `chore` @@ -157,8 +218,7 @@ Type hints allow us to spend less time reading documentation. Public modules are All public modules in the SDK are checked for the presence of docstrings in classes and functions. We follow the [Google Style convention](https://www.sphinx-doc.org/en/master/usage/extensions/example_google.html) for Python docstrings so functions are required to have a description of every argument and the return value, if applicable. - ### What is Poetry and why do we need it? For more info on `Poetry` and `Pipx`, please see the topic in our -[python tips](python_tips.md) guide. +[python tips](./python_tips.md) guide. diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css index b53275d1a..bdb170b90 100644 --- a/docs/_static/css/custom.css +++ b/docs/_static/css/custom.css @@ -1,134 +1,25 @@ -body { - font-family: "IBM Plex Sans", -apple-system, BlinkMacSystemFont, "Segoe UI", - Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", - "Helvetica Neue", sans-serif; - color: #2c3e50; - -moz-osx-font-smoothing: grayscale; +@font-face { + font-family: 'Hanken Grotesk'; + src: url('../fonts/HankenGrotesk-Regular.woff2') format('woff2'), + url('../fonts/HankenGrotesk-Regular.woff') format('woff'); + font-weight: normal; + font-style: normal; + font-display: swap; } -a, -a:visited { - color: #3438bf; -} -a:hover { - color: #3438bf; - text-decoration: underline; -} - -.wy-nav-top { - background-color: #3438bf; -} - -.wy-nav-top a { - color: white; - text-decoration: none !important; -} - -.wy-nav-side { - background-color: #3438bf; -} - -.wy-side-nav-search { - background-color: #3438bf; -} - -.wy-side-nav-search > a { - color: #fcfcfc; - text-decoration: none !important; - font-size: 24px; - font-weight: bold; -} - -.wy-side-nav-search > a img.logo { - display: inline-block; - height: 2.3rem; - padding: 0; - vertical-align: text-bottom; - position: relative; - top: 1px; -} - -.wy-side-nav-search > a::after { - display: inline-block; - content: "SDK"; - letter-spacing: -1px; -} - -.wy-side-nav-search > div.version { - color: hsla(0, 0%, 100%, 0.7); -} - -.wy-side-nav-search input[type="text"] { - border-color: #3438bf; -} - -.wy-menu-vertical a:hover { - background-color: #3438bf; -} - -.wy-menu-vertical a, -.wy-menu-vertical a:hover, -.wy-menu-vertical a:visited { - color: white; -} - -.wy-menu-vertical li.current > a, -.wy-menu-vertical li.on a { - background-color: white; -} - -.wy-nav-content-wrap, -.wy-nav-content { - background-color: white; -} - -.rst-versions, -.rst-versions .rst-current-version { - background-color: #3438bf; -} - -.rst-versions .rst-current-version { - color: #fbbf52; -} - -.rst-versions .rst-other-versions { - color: white; -} - -.rst-versions a { - color: white; - text-decoration: underline; -} - -.rst-versions .rst-other-versions dl { - margin: 5px 0; -} - -.rst-versions .rst-other-versions dt { - margin-bottom: 3px; -} - -.rst-versions .rst-other-versions dd a { - border: 1px solid white; - border-radius: 5px; - padding: 3px 6px; - text-decoration: none; -} - -.rst-versions .rst-other-versions dd a:hover { - color: #3438bf; - background-color: white; +@font-face { + font-family: 'Plus Jakarta Sans'; + src: url('../fonts/PlusJakartaSans-Regular.woff2') format('woff2'), + url('../fonts/PlusJakartaSans-Regular.woff') format('woff'); + font-weight: normal; + font-style: normal; + font-display: swap; } -.rst-content code.literal { - color: #62626e; +h1 { + color: var(--color-brand-content); } -h1, -h2, -h3, -h4, -h5, -h6 { - font-family: inherit; +h1, h2, h3, h4, h5, h6 { + font-family: "Plus Jakarta Sans"; } diff --git a/docs/_static/fonts/HankenGrotesk-Regular.woff b/docs/_static/fonts/HankenGrotesk-Regular.woff new file mode 100644 index 000000000..3c37d1086 Binary files /dev/null and b/docs/_static/fonts/HankenGrotesk-Regular.woff differ diff --git a/docs/_static/fonts/HankenGrotesk-Regular.woff2 b/docs/_static/fonts/HankenGrotesk-Regular.woff2 new file mode 100644 index 000000000..b590f738b Binary files /dev/null and b/docs/_static/fonts/HankenGrotesk-Regular.woff2 differ diff --git a/docs/_static/fonts/PlusJakartaSans-Regular.woff b/docs/_static/fonts/PlusJakartaSans-Regular.woff new file mode 100644 index 000000000..fe04e8de5 Binary files /dev/null and b/docs/_static/fonts/PlusJakartaSans-Regular.woff differ diff --git a/docs/_static/fonts/PlusJakartaSans-Regular.woff2 b/docs/_static/fonts/PlusJakartaSans-Regular.woff2 new file mode 100644 index 000000000..c4d6797c8 Binary files /dev/null and b/docs/_static/fonts/PlusJakartaSans-Regular.woff2 differ diff --git a/docs/_static/img/favicon.png b/docs/_static/img/favicon.png new file mode 100644 index 000000000..4a111130a Binary files /dev/null and b/docs/_static/img/favicon.png differ diff --git a/docs/_static/img/logo-light.svg b/docs/_static/img/logo-light.svg new file mode 100644 index 000000000..da640c865 --- /dev/null +++ b/docs/_static/img/logo-light.svg @@ -0,0 +1,15 @@ +<svg width="218" height="37" viewBox="0 0 218 37" fill="none" xmlns="http://www.w3.org/2000/svg"> +<path d="M91.526 28.1817C91.0512 27.6427 90.7072 27.0262 90.4979 26.3285C90.2848 25.6308 90.1839 24.6932 90.1839 23.5119V5.81453H94.113V23.0947C94.113 24.4348 94.2588 25.4684 94.5579 26.1993C94.857 26.9118 95.242 27.5209 95.7168 28.0267V28.1854H91.5222H91.526V28.1817Z" fill="#080216"/> +<path d="M100.382 28.1817C99.9749 27.7129 99.6534 27.1481 99.4067 26.4873C99.1786 25.8265 99.0628 24.8335 99.0628 23.5119V15.6156H96.8982V12.5332H99.1151V8.20297H102.943V12.5332H105.553V15.6156H102.966V23.0689C102.966 24.4569 103.126 25.5201 103.441 26.251C103.758 26.9635 104.136 27.5541 104.577 28.0267V28.1854H100.382V28.1817Z" fill="#080216"/> +<path d="M46.1744 12.5184H49.9092V14.4233H50.1185C50.8101 13.2309 52.2569 12.1308 54.4252 12.1308C56.5936 12.1308 58.2198 13.0537 59.1245 14.5118H59.3675C60.4517 12.8765 62.0181 12.1308 64.0668 12.1308C67.5623 12.1308 69.8502 14.486 69.8502 17.7863V28.178H65.936V19.1263C65.936 16.9815 64.9416 15.7596 63.0761 15.7596C61.2106 15.7596 60.0031 17.0996 60.0031 19.2149V28.178H56.0851V18.8864C56.0851 16.9815 54.9412 15.7596 53.1355 15.7596C51.3298 15.7596 50.0961 17.1882 50.0961 19.1817V28.178H46.1819V12.5184H46.1744Z" fill="#080216"/> +<path d="M72.1083 20.4368C72.1083 15.4938 75.4505 12.1271 80.0899 12.1271C85.178 12.1271 87.9482 15.8519 87.9482 20.2596V21.4778H75.8991C76.0187 23.8589 77.6786 25.4684 80.2058 25.4684C82.1312 25.4684 83.6116 24.5714 84.1537 23.2941H87.7987C87.0174 26.4836 84.1836 28.5656 80.0862 28.5656C75.4168 28.5656 72.1045 25.1103 72.1045 20.4331H72.1083V20.4368ZM84.2172 18.7424C84.0378 16.6271 82.3816 15.228 80.0899 15.228C77.7983 15.228 76.2057 16.7748 75.9627 18.7424H84.2172Z" fill="#080216"/> +<path d="M107.377 20.4368C107.377 15.5233 110.39 12.1271 114.727 12.1271C117.467 12.1271 119.004 13.7366 119.546 14.5377H119.755V12.511H123.673V28.1743H119.815V26.1809H119.606C119.153 26.838 117.736 28.5619 114.847 28.5619C110.42 28.5619 107.377 25.2875 107.377 20.4294V20.4368ZM119.849 20.3814C119.849 17.4319 118.043 15.6452 115.572 15.6452C113.101 15.6452 111.355 17.6091 111.355 20.3814C111.355 23.1538 113.071 25.0845 115.602 25.0845C118.133 25.0845 119.845 23.0024 119.845 20.3814H119.849Z" fill="#080216"/> +<path d="M126.929 12.5184H130.664V14.4233H130.874C131.505 13.2604 133.075 12.1308 135.334 12.1308C138.885 12.1308 141.053 14.6041 141.053 17.9376V28.1817H137.139V18.9491C137.139 17.0443 135.906 15.7596 134.096 15.7596C132.201 15.7596 130.844 17.251 130.844 19.2149V28.178H126.929V12.5184Z" fill="#080216"/> +<path d="M143.311 20.3482C143.311 15.7005 146.747 12.1271 151.745 12.1271C156.744 12.1271 160.18 15.7005 160.18 20.3482C160.18 24.9959 156.684 28.5693 151.745 28.5693C146.807 28.5693 143.311 25.0254 143.311 20.3482ZM156.205 20.3482C156.205 17.6386 154.336 15.6452 151.745 15.6452C149.155 15.6452 147.285 17.6386 147.285 20.3482C147.285 23.0578 149.155 25.0513 151.745 25.0513C154.336 25.0513 156.205 23.0873 156.205 20.3482Z" fill="#080216"/> +<path d="M39.1087 7.39083C37.6058 9.41011 34.3757 9.86417 34.0169 8.66072C33.6505 7.45727 35.5309 6.62298 36.5179 6.50854C30.8018 4.96178 31.8373 9.07787 28.6783 9.47287C30.4242 10.3293 32.2411 11.7579 33.4673 11.9314C35.5721 12.2342 38.5217 11.0787 39.1087 7.38713" fill="#080216"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M16.5619 2.22264C16.4535 1.584 16.0048 1.38096 15.2945 1.09671H15.2908C19.9302 -1.07393 22.8911 0.491295 24.0874 1.46956C24.7492 2.01222 24.9996 2.52904 25.2389 3.01632C25.4296 3.40763 25.609 3.78048 25.9866 4.13487C26.5511 4.66645 27.6054 5.42322 28.0278 5.72593C28.0689 5.75546 28.1063 5.7813 28.1362 5.80345C28.4503 6.03233 28.5848 6.6599 28.297 7.09181C28.0091 7.52003 27.2539 8.40231 26.3417 8.25465C25.4333 8.10699 23.4407 7.76367 21.7957 7.48311C20.1546 7.19886 19.0143 6.28705 19.0143 6.28705C18.8087 7.43512 19.5041 8.4503 20.8125 8.78993C21.0667 8.85638 21.3696 8.92282 21.7135 9.00035C23.2538 9.34735 25.5567 9.86786 27.4633 11.0787C30.0503 12.7214 32.099 15.3203 32.899 19.3109C33.6991 23.3015 32.7009 31.7551 25.5978 35.2252C18.4947 38.6953 11.7654 36.0078 11.7654 36.0078C18.132 35.7125 21.8593 33.1432 23.2837 31.434C24.2818 30.2342 25.2239 28.0783 24.652 25.5939C24.0239 22.8695 21.7098 20.8023 19.3545 21.1493C16.9993 21.4963 16.0011 24.0287 15.56 25.258C15.1188 26.4873 14.4309 27.3954 13.829 27.4951C13.2271 27.5947 12.3635 27.628 11.6046 25.808C10.8457 23.9881 8.8232 20.7284 6.13897 21.954C3.45474 23.1759 2.96874 28.8425 2.96874 28.8425C-0.328607 23.6152 -1.16603 16.9151 1.9257 10.4068C5.01743 3.89861 11.4663 1.50278 11.4663 1.50278C10.3111 2.99048 9.0363 7.4905 10.3336 11.5401C11.6308 15.5898 14.8309 17.6201 16.6665 17.8933C18.5021 18.1702 20.6892 17.0886 20.5695 14.8662C20.4723 13.0426 18.8947 11.7469 17.2236 10.9864C16.031 10.4437 14.8496 9.196 14.7674 7.28008C14.6851 5.36047 15.53 4.14963 15.9413 3.66604C16.2665 3.28581 16.6479 2.76161 16.5544 2.21895L16.5619 2.22264ZM21.2163 4.06473C19.863 4.06473 19.2461 4.74767 19.2461 4.74767C19.2498 4.71444 19.2536 4.67384 19.2611 4.62954C19.3171 4.14594 19.4368 3.13815 19.934 2.64717C20.4761 2.11189 21.392 1.70951 22.6818 2.64717C23.9715 3.58482 24.3753 5.29402 24.3753 5.29402C23.9229 4.91379 22.5734 4.06473 21.22 4.06473H21.2163Z" fill="#080216"/> +<path d="M168.224 0H166.153V36.8307H168.224V0Z" fill="#18C3FA"/> +<path d="M177.444 23.1122C177.512 24.3709 178.514 25.3112 180.334 25.3112C181.837 25.3112 182.723 24.7387 182.723 23.7302C182.723 20.7503 174.411 23.408 174.411 16.8111C174.411 14.1307 176.483 12 180.149 12C183.473 12 185.933 13.7175 186.069 17.1069H182.47C182.448 16.0074 181.626 15.2264 180.149 15.2264C178.672 15.2264 178.009 15.8671 178.009 16.7164C178.009 19.556 186.479 16.9704 186.479 23.4535C186.479 26.4296 184.226 28.6323 180.217 28.6323C176.208 28.6323 173.978 26.4107 174 23.1122H177.444Z" fill="#18C3FA"/> +<path d="M188.216 12.2995H193.887C198.989 12.2995 202.082 15.5752 202.082 20.2498C202.082 25.2202 198.985 28.3366 193.887 28.3366H188.216V12.2995ZM193.725 25.1974C196.046 25.1974 198.326 23.8932 198.326 20.2498C198.326 16.6064 196.072 15.4387 193.725 15.4387H191.811V25.1974H193.725Z" fill="#18C3FA"/> +<path d="M212.873 28.3366L209.162 21.5313L207.568 23.4345V28.3366H203.97V12.2995H207.568V18.4148H207.704L212.508 12.2995H216.946L211.641 18.6006L217.037 28.3366H212.87H212.873Z" fill="#18C3FA"/> +</svg> diff --git a/docs/_static/img/logo.svg b/docs/_static/img/logo.svg index 62c65875b..3ac255aaa 100644 --- a/docs/_static/img/logo.svg +++ b/docs/_static/img/logo.svg @@ -1 +1,15 @@ -<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 600 200"><defs><style>.cls-1{fill:#fff;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Layer_2-2" data-name="Layer 2-2"><path class="cls-1" d="M271.59,154.2H259.86V123.78c0-8.67-4.59-12.41-10.37-12.41-5.44,0-10.54,2-13.6,6.8a22.78,22.78,0,0,1,.17,3.4V154H224.34V122.59c0-7.82-4.59-11-10.37-11a15.75,15.75,0,0,0-14.11,7.65v35.18H188.13V102.88h11.73v5.43l.68.34a21.36,21.36,0,0,1,16.32-6.79A18.49,18.49,0,0,1,233,110a24.74,24.74,0,0,1,19.37-8.15c10.54,0,19,7.47,19,22.09ZM330.73,125v6.12H292.49c.85,9.52,6.29,14.11,17.68,14.45a39,39,0,0,0,15.3-3.06l2.21,8.84a39.08,39.08,0,0,1-17.51,4.07c-17.51,0-29.4-10-29.4-26.68,0-14.11,9.85-26.68,25.83-26.85C320.71,101.69,329.88,109.84,330.73,125ZM293,122.76h26c-.51-7.65-5.61-11.39-12.24-11.39C299.29,111.37,294.53,116.13,293,122.76ZM359,155.22c-12.07,0-18.53-5.6-18.53-16.48V79.25h11.73v58.64c0,4.76,3.06,7,8,7.14Zm1.7-52.34h8V86.39h11.72v16.49h14.11v9.17H380.36v24c0,6.12,3.4,8.67,8.16,8.67a17.88,17.88,0,0,0,7.31-1.19l2,9a23.59,23.59,0,0,1-10.71,2c-10.53,0-18.52-6.62-18.52-18.35V112.05h-8ZM441,106.62l.51-.17v-3.57h11.73V154.2H441.55v-5.26l-.68-.17a20.67,20.67,0,0,1-16.15,6.45c-13.93,0-25.15-11.89-25.15-26.68a26.11,26.11,0,0,1,25.83-26.68h.34c7.14-.17,11.9,1.87,15.3,4.76Zm.51,31.1v-22.1a23.9,23.9,0,0,0-14.28-4.25c-8.66,0-16,7.48-16,17s7.14,17,16,17C433.22,145.54,438.66,143.5,441.55,137.72Zm24.82-35h11.72V109l.68.17c3.74-4.41,10-7.47,18.19-7.47,10.37,0,18.87,7.3,18.87,21.58V154.2H504.1V123.78c0-8.67-5.1-12.41-11.39-12.41a17,17,0,0,0-14.62,7V154H466.37Zm85.83-1c16,0,28,11.89,28,26.68s-12.07,26.68-28,26.68-27.88-11.89-27.88-26.68,12.07-26.68,27.88-26.68Zm0,43.85c9.35,0,16.31-7.48,16.31-17s-7.13-17-16.31-17-16.32,7.48-16.32,17,7.14,17,16.32,17Z"/><path class="cls-1" d="M238.27,52.91c-11.38,15.46-36,18.86-38.75,9.68s11.56-15.46,19-16.48c-43.51-11.73-35.69,19.54-59.83,22.6,13.26,6.63,27.2,17.34,36.54,18.7C211.25,90,233.69,81.12,238.27,52.91Z"/><path class="cls-1" d="M130.18,62.59c-2.55-1.36-5.1-2.89-7.48-4.41a8.07,8.07,0,0,1-3.06-9l1-2c.34.34,2.72,5.27,12.75,9.86,3.4,1.53,16,7.64,16.48,7.81,4.59,1,12.07-3.91,10-7.64-1.53-2.21-8-9.18-8.67-13.26-.68-3.23-.51-8.33-7.48-14.79-6.62-6.12-21.07-7.82-31.27-6.46,2.89,2.55,7.82,7.14,3.06,9.69a59,59,0,0,0-7.82,4.59c-7.82,6.29-8.5,19.89.68,28,7,6.12,15.47,15.3,5.44,25.84-7,7.31-23.79,3.57-34.33-7.14-11.73-11.9-12.92-34.5-1.53-52l1.53-2.38a6,6,0,0,0-1.7.34c-2.72,1-5.44,1.87-8,3.06A76.53,76.53,0,0,0,32.45,67.35c-3.91,7.48-18.7,41.81,4.08,76.32.51-11.22,4.08-22.1,8-27.88,4.24-6.12,13.42-11.55,22.09,0,5.61,7.31,6.8,18.53,13.77,18.53s8.5-11.22,13.25-18.53c7.82-11.89,18.36-7.14,22.27-1a40.86,40.86,0,0,1,4.76,28.9c-5.1,25-50.82,24.64-65.95,19.54,0,0,16,14.62,45.89,15,10.71.17,46.06-4.25,60-42.15C177.09,91.15,137.15,66.33,130.18,62.59Zm-2.72-27.53c7-7.48,17.33,2.55,16.82,14.79-9-9.52-13.42-10.37-20.56-7.48C123.55,40.16,126.44,36.08,127.46,35.06Z"/></g></g></svg> \ No newline at end of file +<svg width="218" height="37" viewBox="0 0 218 37" fill="none" xmlns="http://www.w3.org/2000/svg"> +<path d="M91.526 28.1817C91.0512 27.6428 90.7072 27.0263 90.4979 26.3286C90.2848 25.6309 90.1839 24.6932 90.1839 23.5119V5.81456H94.113V23.0948C94.113 24.4348 94.2588 25.4684 94.5579 26.1994C94.857 26.9118 95.242 27.5209 95.7168 28.0267V28.1854H91.5222H91.526V28.1817Z" fill="white"/> +<path d="M100.382 28.1817C99.9749 27.7129 99.6534 27.1481 99.4067 26.4873C99.1786 25.8265 99.0628 24.8335 99.0628 23.5119V15.6157H96.8982V12.5332H99.1151V8.203H102.943V12.5332H105.553V15.6157H102.966V23.0689C102.966 24.457 103.126 25.5201 103.441 26.251C103.758 26.9635 104.136 27.5542 104.577 28.0267V28.1854H100.382V28.1817Z" fill="white"/> +<path d="M46.1744 12.5184H49.9092V14.4233H50.1185C50.8101 13.2309 52.2569 12.1308 54.4252 12.1308C56.5936 12.1308 58.2198 13.0537 59.1245 14.5119H59.3675C60.4517 12.8765 62.0181 12.1308 64.0668 12.1308C67.5623 12.1308 69.8502 14.486 69.8502 17.7863V28.178H65.936V19.1263C65.936 16.9815 64.9416 15.7596 63.0761 15.7596C61.2106 15.7596 60.0031 17.0997 60.0031 19.2149V28.178H56.0851V18.8864C56.0851 16.9815 54.9412 15.7596 53.1355 15.7596C51.3298 15.7596 50.0961 17.1883 50.0961 19.1817V28.178H46.1819V12.5184H46.1744Z" fill="white"/> +<path d="M72.1083 20.4368C72.1083 15.4938 75.4505 12.1271 80.0899 12.1271C85.178 12.1271 87.9482 15.8519 87.9482 20.2596V21.4779H75.8991C76.0187 23.8589 77.6786 25.4684 80.2058 25.4684C82.1312 25.4684 83.6116 24.5714 84.1537 23.2941H87.7987C87.0174 26.4836 84.1836 28.5657 80.0862 28.5657C75.4168 28.5657 72.1045 25.1104 72.1045 20.4331H72.1083V20.4368ZM84.2172 18.7424C84.0378 16.6271 82.3816 15.228 80.0899 15.228C77.7983 15.228 76.2057 16.7748 75.9627 18.7424H84.2172Z" fill="white"/> +<path d="M107.377 20.4368C107.377 15.5234 110.39 12.1271 114.727 12.1271C117.467 12.1271 119.004 13.7367 119.546 14.5377H119.755V12.5111H123.673V28.1744H119.815V26.1809H119.606C119.153 26.838 117.736 28.562 114.847 28.562C110.42 28.562 107.377 25.2876 107.377 20.4295V20.4368ZM119.849 20.3815C119.849 17.4319 118.043 15.6452 115.572 15.6452C113.101 15.6452 111.355 17.6091 111.355 20.3815C111.355 23.1538 113.071 25.0845 115.602 25.0845C118.133 25.0845 119.845 23.0025 119.845 20.3815H119.849Z" fill="white"/> +<path d="M126.929 12.5184H130.664V14.4233H130.874C131.505 13.2604 133.075 12.1308 135.334 12.1308C138.885 12.1308 141.053 14.6042 141.053 17.9376V28.1817H137.139V18.9491C137.139 17.0443 135.906 15.7596 134.096 15.7596C132.201 15.7596 130.844 17.251 130.844 19.2149V28.178H126.929V12.5184Z" fill="white"/> +<path d="M143.311 20.3482C143.311 15.7006 146.747 12.1271 151.745 12.1271C156.744 12.1271 160.18 15.7006 160.18 20.3482C160.18 24.9959 156.684 28.5693 151.745 28.5693C146.807 28.5693 143.311 25.0254 143.311 20.3482ZM156.205 20.3482C156.205 17.6386 154.336 15.6452 151.745 15.6452C149.155 15.6452 147.285 17.6386 147.285 20.3482C147.285 23.0578 149.155 25.0513 151.745 25.0513C154.336 25.0513 156.205 23.0874 156.205 20.3482Z" fill="white"/> +<path d="M39.1087 7.39086C37.6058 9.41014 34.3757 9.8642 34.0169 8.66075C33.6505 7.4573 35.5309 6.62301 36.5179 6.50857C30.8018 4.96181 31.8373 9.0779 28.6783 9.4729C30.4242 10.3293 32.2411 11.758 33.4673 11.9315C35.5721 12.2342 38.5217 11.0787 39.1087 7.38716" fill="white"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M16.5619 2.22267C16.4535 1.58403 16.0048 1.38099 15.2945 1.09674H15.2908C19.9302 -1.0739 22.8911 0.491325 24.0874 1.46959C24.7492 2.01225 24.9996 2.52907 25.2389 3.01635C25.4296 3.40766 25.609 3.78051 25.9866 4.1349C26.5511 4.66648 27.6054 5.42325 28.0278 5.72596C28.0689 5.75549 28.1063 5.78133 28.1362 5.80348C28.4503 6.03236 28.5848 6.65993 28.297 7.09184C28.0091 7.52006 27.2539 8.40234 26.3417 8.25468C25.4333 8.10702 23.4407 7.7637 21.7957 7.48315C20.1546 7.19889 19.0143 6.28708 19.0143 6.28708C18.8087 7.43515 19.5041 8.45033 20.8125 8.78996C21.0667 8.85641 21.3696 8.92285 21.7135 9.00038C23.2538 9.34738 25.5567 9.86789 27.4633 11.0787C30.0503 12.7215 32.099 15.3203 32.899 19.3109C33.6991 23.3015 32.7009 31.7552 25.5978 35.2252C18.4947 38.6953 11.7654 36.0078 11.7654 36.0078C18.132 35.7125 21.8593 33.1432 23.2837 31.434C24.2818 30.2342 25.2239 28.0784 24.652 25.594C24.0239 22.8696 21.7098 20.8023 19.3545 21.1493C16.9993 21.4963 16.0011 24.0287 15.56 25.258C15.1188 26.4873 14.4309 27.3954 13.829 27.4951C13.2271 27.5948 12.3635 27.628 11.6046 25.8081C10.8457 23.9881 8.8232 20.7285 6.13897 21.9541C3.45474 23.176 2.96874 28.8425 2.96874 28.8425C-0.328607 23.6153 -1.16603 16.9151 1.9257 10.4069C5.01743 3.89864 11.4663 1.50281 11.4663 1.50281C10.3111 2.99051 9.0363 7.49053 10.3336 11.5402C11.6308 15.5898 14.8309 17.6202 16.6665 17.8934C18.5021 18.1702 20.6892 17.0886 20.5695 14.8663C20.4723 13.0426 18.8947 11.7469 17.2236 10.9864C16.031 10.4438 14.8496 9.19603 14.7674 7.28011C14.6851 5.3605 15.53 4.14966 15.9413 3.66607C16.2665 3.28584 16.6479 2.76164 16.5544 2.21898L16.5619 2.22267ZM21.2163 4.06476C19.863 4.06476 19.2461 4.7477 19.2461 4.7477C19.2498 4.71447 19.2536 4.67387 19.2611 4.62957C19.3171 4.14597 19.4368 3.13818 19.934 2.6472C20.4761 2.11192 21.392 1.70954 22.6818 2.6472C23.9715 3.58485 24.3753 5.29405 24.3753 5.29405C23.9229 4.91382 22.5734 4.06476 21.22 4.06476H21.2163Z" fill="white"/> +<path d="M168.224 0H166.153V36.8307H168.224V0Z" fill="#18C3FA"/> +<path d="M177.444 23.1122C177.512 24.3709 178.514 25.3112 180.334 25.3112C181.837 25.3112 182.723 24.7387 182.723 23.7302C182.723 20.7503 174.411 23.408 174.411 16.8111C174.411 14.1307 176.483 12 180.149 12C183.473 12 185.933 13.7175 186.069 17.1069H182.47C182.448 16.0074 181.626 15.2264 180.149 15.2264C178.672 15.2264 178.009 15.8671 178.009 16.7164C178.009 19.556 186.479 16.9704 186.479 23.4535C186.479 26.4296 184.226 28.6323 180.217 28.6323C176.208 28.6323 173.978 26.4107 174 23.1122H177.444Z" fill="#18C3FA"/> +<path d="M188.216 12.2995H193.887C198.989 12.2995 202.082 15.5752 202.082 20.2498C202.082 25.2202 198.985 28.3366 193.887 28.3366H188.216V12.2995ZM193.725 25.1974C196.046 25.1974 198.326 23.8932 198.326 20.2498C198.326 16.6064 196.072 15.4387 193.725 15.4387H191.811V25.1974H193.725Z" fill="#18C3FA"/> +<path d="M212.873 28.3366L209.162 21.5313L207.568 23.4345V28.3366H203.97V12.2995H207.568V18.4148H207.704L212.508 12.2995H216.946L211.641 18.6006L217.037 28.3366H212.87H212.873Z" fill="#18C3FA"/> +</svg> diff --git a/docs/_static/img/melty-apple-touch-icon.png b/docs/_static/img/melty-apple-touch-icon.png deleted file mode 100644 index 7b64991cb..000000000 Binary files a/docs/_static/img/melty-apple-touch-icon.png and /dev/null differ diff --git a/docs/_static/img/melty-favicon-16x16.png b/docs/_static/img/melty-favicon-16x16.png deleted file mode 100644 index d726fe28c..000000000 Binary files a/docs/_static/img/melty-favicon-16x16.png and /dev/null differ diff --git a/docs/_static/img/melty-favicon-32x32.png b/docs/_static/img/melty-favicon-32x32.png deleted file mode 100644 index a480a5798..000000000 Binary files a/docs/_static/img/melty-favicon-32x32.png and /dev/null differ diff --git a/docs/_templates/base.html b/docs/_templates/base.html new file mode 100644 index 000000000..27af40b1f --- /dev/null +++ b/docs/_templates/base.html @@ -0,0 +1,16 @@ +{% extends '!base.html' %} +{% block extrahead %} + <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=IBM+Plex+Sans&display=swap"> + <link rel="apple-touch-icon" sizes="180x180" href="{{ pathto('_static/img/favicon.png', 1) }}"> + <link rel="icon" type="image/png" sizes="32x32" href="{{ pathto('_static/img/favicon.png', 1) }}"> + <link rel="icon" type="image/png" sizes="192x192" href="{{ pathto('_static/img/favicon.png', 1) }}"> + <!-- Google tag (gtag.js) --> + <script async src="https://www.googletagmanager.com/gtag/js?id=GTM-WHJMBX2"></script> + <script> + window.dataLayer = window.dataLayer || []; + function gtag(){dataLayer.push(arguments);} + gtag('js', new Date()); + + gtag('config', 'GTM-WHJMBX2'); + </script> +{% endblock %} diff --git a/docs/_templates/class.rst b/docs/_templates/class.rst index f1b00ed50..4ebc9be04 100644 --- a/docs/_templates/class.rst +++ b/docs/_templates/class.rst @@ -5,4 +5,4 @@ .. autoclass:: {{ name }} :members: - :special-members: __init__ + :special-members: __init__, __call__ diff --git a/docs/_templates/footer.html b/docs/_templates/footer.html deleted file mode 100644 index 2ce7aaa27..000000000 --- a/docs/_templates/footer.html +++ /dev/null @@ -1,9 +0,0 @@ -{% extends '!footer.html' %} -{% block extrafooter %} - - The SDK is built with love by the <a href="https://meltano.com">Meltano</a> core team and contributors, with - contributions from developers across the <a href="https://singer.io">Singer</a> open source community. - - <!-- Docs built using <a href="https://www.sphinx-doc.org">Sphinx</a> and <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a> - from <a href="https://readthedocs.org">Read the Docs</a>. --> -{% endblock %} diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html deleted file mode 100644 index e7de610f0..000000000 --- a/docs/_templates/layout.html +++ /dev/null @@ -1,7 +0,0 @@ -{% extends '!layout.html' %} -{% block extrahead %} - <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=IBM+Plex+Sans&display=swap"> - <link rel="apple-touch-icon" sizes="180x180" href="{{ pathto('_static/img/melty-apple-touch-icon.png', 1) }}"> - <link rel="icon" type="image/png" sizes="32x32" href="{{ pathto('_static/img/melty-favicon-32x32.png', 1) }}"> - <link rel="icon" type="image/png" sizes="16x16" href="{{ pathto('_static/img/melty-favicon-16x16.png', 1) }}"> -{% endblock %} diff --git a/docs/_templates/plugin_class.rst b/docs/_templates/plugin_class.rst new file mode 100644 index 000000000..f0f50324a --- /dev/null +++ b/docs/_templates/plugin_class.rst @@ -0,0 +1,10 @@ +{{ fullname }} +{{ "=" * fullname|length }} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ name }} + :members: + :show-inheritance: + :inherited-members: + :special-members: __init__ diff --git a/docs/batch.md b/docs/batch.md new file mode 100644 index 000000000..df6ef1178 --- /dev/null +++ b/docs/batch.md @@ -0,0 +1,119 @@ +# Batch Messages + +```{warning} +The `BATCH` message functionality is currently in preview and is subject to change. +You can [open an issue](https://github.com/meltano/sdk/issues) or [join the discussion](https://github.com/meltano/sdk/discussions/963) on GitHub to provide feedback during the preview period. +``` + +[The Singer message specification](https://github.com/singer-io/getting-started/blob/master/docs/SPEC.md#output) defines the three basic types of messages: `RECORD`, `STATE`, and `SCHEMA`. The `RECORD` message is used to send data from the tap to the target. The `STATE` message is used to send state data from the tap to the target. The `SCHEMA` message is used to send schema data from the tap to the target, and for example, create tables with the correct column types. + +However, the Singer specification can be extended to support additional types of messages. For example, the [`ACTIVATE_VERSION`](https://sdk.meltano.com/en/latest/capabilities.html#singer_sdk.helpers.capabilities.PluginCapabilities.ACTIVATE_VERSION) message is used to manage hard deletes in the target. + +This library's implementation of the `BATCH` message is used to send records in bulk from the tap to the target, using an intermediate filesystem to store _batch_ files. This is useful, for example + +- when the tap outputs records at a much higher rate than the target can consume them, creating backpressure +- when the source system can directly export data in bulk (e.g. a database dump) + +Currently only a local filesystem or AWS S3 are supported, but other filesystems like FTP, etc. could be supported in the future. + +## The `BATCH` Message + +Local +```json +{ + "type": "BATCH", + "stream": "users", + "encoding": { + "format": "jsonl", + "compression": "gzip" + }, + "manifest": [ + "file://path/to/batch/file/1", + "file://path/to/batch/file/2" + ] +} +``` + +AWS S3 +```json +{ + "type": "BATCH", + "stream": "users", + "encoding": { + "format": "jsonl", + "compression": "gzip" + }, + "manifest": [ + "s3://path/to/batch/file/1", + "s3://path/to/batch/file/2" + ] +} +``` + +### `encoding` + +The `encoding` field is used to specify the format and compression of the batch files. Currently only `jsonl` and `gzip` are supported, respectively. + +### `manifest` + +The `manifest` field is used to specify the paths to the batch files. The paths are relative to the `root` directory specified in the [`batch_config`](#batch-configuration) storage configuration. + +## Batch configuration + +When local storage is used, targets do no require special configuration to process `BATCH` messages. Use of AWS S3 assumes S3/AWS credentials are already discoverable via the underlying S3 libraries (`AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY` / `AWS_DEFAULT_REGION`) + +Taps may be configured to specify a root storage `root` directory, file path `prefix`, and `encoding` for batch files using a configuration like the below: + + +In `config.json`: + +```js +{ + // ... + "batch_config": { + "encoding": { + "format": "jsonl", + "compression": "gzip", + }, + "storage": { + "root": "file://tests/core/resources", + "prefix": "test-batch-", + } + } +} +``` + +## Custom batch file creation and processing + +### Tap side + +Taps can optionally customize the batch file creation by implementing the [`get_batches`](singer_sdk.Stream.get_batches). This method should return a _tuple_ of an encoding and a list of batch files: + +```python +class MyStream(Stream): + def get_batches(self, records): + return ( + ParquetEncoding(compression="snappy"), + [ + "s3://my-bucket/my-batch-file-1.parquet", + "s3://my-bucket/my-batch-file-2.parquet", + ] + ) +``` + +### Target side + +Targets can optionally customize the batch file processing by implementing the [`process_batch_files`](singer_sdk.Sink.process_batch_files). + +```python +class MySink(Sink): + def process_batch_files(self, encoding, storage, files): + # process the batch files +``` + +## Known Limitations of `BATCH` + +1. Currently the built-in `BATCH` implementation does not support incremental bookmarks or `STATE` tracking. This work is tracked in [Issue #976](https://github.com/meltano/sdk/issues/976). +2. The `BATCH` implementation is not currently compatible with [Stream Maps](https://sdk.meltano.com/en/latest/stream_maps.html). This is certainly possible to implement in theory, but it would also introduce some performance penalties. This limitation is tracked in [Issue 1117#](https://github.com/meltano/sdk/issues/1117). + +If you are interested in contributing to one or both of these features, please add a comment in the respective issue. diff --git a/docs/classes/singer_sdk.BatchSink.rst b/docs/classes/singer_sdk.BatchSink.rst index 22933fcab..ea6f276fb 100644 --- a/docs/classes/singer_sdk.BatchSink.rst +++ b/docs/classes/singer_sdk.BatchSink.rst @@ -5,4 +5,4 @@ .. autoclass:: BatchSink :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.GraphQLStream.rst b/docs/classes/singer_sdk.GraphQLStream.rst index 8dc8d1062..41953196f 100644 --- a/docs/classes/singer_sdk.GraphQLStream.rst +++ b/docs/classes/singer_sdk.GraphQLStream.rst @@ -5,4 +5,4 @@ .. autoclass:: GraphQLStream :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.InlineMapper.rst b/docs/classes/singer_sdk.InlineMapper.rst index 978ef7fbb..a6e8b5e7b 100644 --- a/docs/classes/singer_sdk.InlineMapper.rst +++ b/docs/classes/singer_sdk.InlineMapper.rst @@ -5,4 +5,6 @@ .. autoclass:: InlineMapper :members: + :show-inheritance: + :inherited-members: :special-members: __init__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.RESTStream.rst b/docs/classes/singer_sdk.RESTStream.rst index 3486f5cc9..9710c6303 100644 --- a/docs/classes/singer_sdk.RESTStream.rst +++ b/docs/classes/singer_sdk.RESTStream.rst @@ -5,4 +5,4 @@ .. autoclass:: RESTStream :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.RecordSink.rst b/docs/classes/singer_sdk.RecordSink.rst index b1767cf83..1f087079e 100644 --- a/docs/classes/singer_sdk.RecordSink.rst +++ b/docs/classes/singer_sdk.RecordSink.rst @@ -5,4 +5,4 @@ .. autoclass:: RecordSink :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.SQLConnector.rst b/docs/classes/singer_sdk.SQLConnector.rst index 76fe26721..d4d097f8f 100644 --- a/docs/classes/singer_sdk.SQLConnector.rst +++ b/docs/classes/singer_sdk.SQLConnector.rst @@ -5,4 +5,4 @@ .. autoclass:: SQLConnector :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.SQLSink.rst b/docs/classes/singer_sdk.SQLSink.rst index 53a2ac4a1..60b8b14a4 100644 --- a/docs/classes/singer_sdk.SQLSink.rst +++ b/docs/classes/singer_sdk.SQLSink.rst @@ -5,4 +5,4 @@ .. autoclass:: SQLSink :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.SQLStream.rst b/docs/classes/singer_sdk.SQLStream.rst index d178a0ed9..f72894088 100644 --- a/docs/classes/singer_sdk.SQLStream.rst +++ b/docs/classes/singer_sdk.SQLStream.rst @@ -5,4 +5,4 @@ .. autoclass:: SQLStream :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.SQLTap.rst b/docs/classes/singer_sdk.SQLTap.rst index 1c49d105a..84433750f 100644 --- a/docs/classes/singer_sdk.SQLTap.rst +++ b/docs/classes/singer_sdk.SQLTap.rst @@ -5,4 +5,6 @@ .. autoclass:: SQLTap :members: + :show-inheritance: + :inherited-members: :special-members: __init__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.SQLTarget.rst b/docs/classes/singer_sdk.SQLTarget.rst index 36c32244f..5c16f3f3f 100644 --- a/docs/classes/singer_sdk.SQLTarget.rst +++ b/docs/classes/singer_sdk.SQLTarget.rst @@ -5,4 +5,6 @@ .. autoclass:: SQLTarget :members: + :show-inheritance: + :inherited-members: :special-members: __init__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.Sink.rst b/docs/classes/singer_sdk.Sink.rst index 0f1c6c78e..79062c29e 100644 --- a/docs/classes/singer_sdk.Sink.rst +++ b/docs/classes/singer_sdk.Sink.rst @@ -5,4 +5,4 @@ .. autoclass:: Sink :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.Stream.rst b/docs/classes/singer_sdk.Stream.rst index b56744ba9..db028a912 100644 --- a/docs/classes/singer_sdk.Stream.rst +++ b/docs/classes/singer_sdk.Stream.rst @@ -5,4 +5,4 @@ .. autoclass:: Stream :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.Tap.rst b/docs/classes/singer_sdk.Tap.rst index ae4cb051b..cd6702fb5 100644 --- a/docs/classes/singer_sdk.Tap.rst +++ b/docs/classes/singer_sdk.Tap.rst @@ -5,4 +5,6 @@ .. autoclass:: Tap :members: + :show-inheritance: + :inherited-members: :special-members: __init__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.Target.rst b/docs/classes/singer_sdk.Target.rst index c6aabb5af..153479355 100644 --- a/docs/classes/singer_sdk.Target.rst +++ b/docs/classes/singer_sdk.Target.rst @@ -5,4 +5,6 @@ .. autoclass:: Target :members: + :show-inheritance: + :inherited-members: :special-members: __init__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.authenticators.APIAuthenticatorBase.rst b/docs/classes/singer_sdk.authenticators.APIAuthenticatorBase.rst new file mode 100644 index 000000000..1b3b608c5 --- /dev/null +++ b/docs/classes/singer_sdk.authenticators.APIAuthenticatorBase.rst @@ -0,0 +1,8 @@ +singer_sdk.authenticators.APIAuthenticatorBase +============================================== + +.. currentmodule:: singer_sdk.authenticators + +.. autoclass:: APIAuthenticatorBase + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.authenticators.APIKeyAuthenticator.rst b/docs/classes/singer_sdk.authenticators.APIKeyAuthenticator.rst index ca48e5d44..28b0d7954 100644 --- a/docs/classes/singer_sdk.authenticators.APIKeyAuthenticator.rst +++ b/docs/classes/singer_sdk.authenticators.APIKeyAuthenticator.rst @@ -5,4 +5,4 @@ .. autoclass:: APIKeyAuthenticator :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.authenticators.BasicAuthenticator.rst b/docs/classes/singer_sdk.authenticators.BasicAuthenticator.rst index 1c803715e..e944212a7 100644 --- a/docs/classes/singer_sdk.authenticators.BasicAuthenticator.rst +++ b/docs/classes/singer_sdk.authenticators.BasicAuthenticator.rst @@ -5,4 +5,4 @@ .. autoclass:: BasicAuthenticator :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.authenticators.BearerTokenAuthenticator.rst b/docs/classes/singer_sdk.authenticators.BearerTokenAuthenticator.rst index 5475fa2b2..add92a271 100644 --- a/docs/classes/singer_sdk.authenticators.BearerTokenAuthenticator.rst +++ b/docs/classes/singer_sdk.authenticators.BearerTokenAuthenticator.rst @@ -5,4 +5,4 @@ .. autoclass:: BearerTokenAuthenticator :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.authenticators.OAuthAuthenticator.rst b/docs/classes/singer_sdk.authenticators.OAuthAuthenticator.rst index 1f3b9bff6..7600d45dc 100644 --- a/docs/classes/singer_sdk.authenticators.OAuthAuthenticator.rst +++ b/docs/classes/singer_sdk.authenticators.OAuthAuthenticator.rst @@ -5,4 +5,4 @@ .. autoclass:: OAuthAuthenticator :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.authenticators.OAuthJWTAuthenticator.rst b/docs/classes/singer_sdk.authenticators.OAuthJWTAuthenticator.rst index 883cf01ac..4fa2d3ea7 100644 --- a/docs/classes/singer_sdk.authenticators.OAuthJWTAuthenticator.rst +++ b/docs/classes/singer_sdk.authenticators.OAuthJWTAuthenticator.rst @@ -5,4 +5,4 @@ .. autoclass:: OAuthJWTAuthenticator :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.authenticators.SimpleAuthenticator.rst b/docs/classes/singer_sdk.authenticators.SimpleAuthenticator.rst index 40b2859df..e83ef6fa1 100644 --- a/docs/classes/singer_sdk.authenticators.SimpleAuthenticator.rst +++ b/docs/classes/singer_sdk.authenticators.SimpleAuthenticator.rst @@ -5,4 +5,4 @@ .. autoclass:: SimpleAuthenticator :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.batch.BaseBatcher.rst b/docs/classes/singer_sdk.batch.BaseBatcher.rst new file mode 100644 index 000000000..4b2588355 --- /dev/null +++ b/docs/classes/singer_sdk.batch.BaseBatcher.rst @@ -0,0 +1,8 @@ +singer_sdk.batch.BaseBatcher +============================ + +.. currentmodule:: singer_sdk.batch + +.. autoclass:: BaseBatcher + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.batch.JSONLinesBatcher.rst b/docs/classes/singer_sdk.batch.JSONLinesBatcher.rst new file mode 100644 index 000000000..e03fa7e07 --- /dev/null +++ b/docs/classes/singer_sdk.batch.JSONLinesBatcher.rst @@ -0,0 +1,8 @@ +singer_sdk.batch.JSONLinesBatcher +================================= + +.. currentmodule:: singer_sdk.batch + +.. autoclass:: JSONLinesBatcher + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.ConfigValidationError.rst b/docs/classes/singer_sdk.exceptions.ConfigValidationError.rst index db945cdde..79f316f90 100644 --- a/docs/classes/singer_sdk.exceptions.ConfigValidationError.rst +++ b/docs/classes/singer_sdk.exceptions.ConfigValidationError.rst @@ -5,4 +5,4 @@ .. autoclass:: ConfigValidationError :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.FatalAPIError.rst b/docs/classes/singer_sdk.exceptions.FatalAPIError.rst index 764ff0bb5..954b42f95 100644 --- a/docs/classes/singer_sdk.exceptions.FatalAPIError.rst +++ b/docs/classes/singer_sdk.exceptions.FatalAPIError.rst @@ -5,4 +5,4 @@ .. autoclass:: FatalAPIError :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.InvalidStreamSortException.rst b/docs/classes/singer_sdk.exceptions.InvalidStreamSortException.rst index 86a87f256..e9bac24af 100644 --- a/docs/classes/singer_sdk.exceptions.InvalidStreamSortException.rst +++ b/docs/classes/singer_sdk.exceptions.InvalidStreamSortException.rst @@ -5,4 +5,4 @@ .. autoclass:: InvalidStreamSortException :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.MapExpressionError.rst b/docs/classes/singer_sdk.exceptions.MapExpressionError.rst index 0b82e5c47..21e543480 100644 --- a/docs/classes/singer_sdk.exceptions.MapExpressionError.rst +++ b/docs/classes/singer_sdk.exceptions.MapExpressionError.rst @@ -5,4 +5,4 @@ .. autoclass:: MapExpressionError :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.MaxRecordsLimitException.rst b/docs/classes/singer_sdk.exceptions.MaxRecordsLimitException.rst index c05322484..8492707f6 100644 --- a/docs/classes/singer_sdk.exceptions.MaxRecordsLimitException.rst +++ b/docs/classes/singer_sdk.exceptions.MaxRecordsLimitException.rst @@ -5,4 +5,4 @@ .. autoclass:: MaxRecordsLimitException :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.RecordsWithoutSchemaException.rst b/docs/classes/singer_sdk.exceptions.RecordsWithoutSchemaException.rst new file mode 100644 index 000000000..10d11cef4 --- /dev/null +++ b/docs/classes/singer_sdk.exceptions.RecordsWithoutSchemaException.rst @@ -0,0 +1,8 @@ +singer_sdk.exceptions.RecordsWithoutSchemaException +=================================================== + +.. currentmodule:: singer_sdk.exceptions + +.. autoclass:: RecordsWithoutSchemaException + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.RecordsWitoutSchemaException.rst b/docs/classes/singer_sdk.exceptions.RecordsWitoutSchemaException.rst deleted file mode 100644 index b3cba6c4f..000000000 --- a/docs/classes/singer_sdk.exceptions.RecordsWitoutSchemaException.rst +++ /dev/null @@ -1,8 +0,0 @@ -singer_sdk.exceptions.RecordsWitoutSchemaException -================================================== - -.. currentmodule:: singer_sdk.exceptions - -.. autoclass:: RecordsWitoutSchemaException - :members: - :special-members: __init__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.RetriableAPIError.rst b/docs/classes/singer_sdk.exceptions.RetriableAPIError.rst index 5a0e7f2eb..38f5c18e5 100644 --- a/docs/classes/singer_sdk.exceptions.RetriableAPIError.rst +++ b/docs/classes/singer_sdk.exceptions.RetriableAPIError.rst @@ -5,4 +5,4 @@ .. autoclass:: RetriableAPIError :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.StreamMapConfigError.rst b/docs/classes/singer_sdk.exceptions.StreamMapConfigError.rst index 52f436a01..fc27ddec4 100644 --- a/docs/classes/singer_sdk.exceptions.StreamMapConfigError.rst +++ b/docs/classes/singer_sdk.exceptions.StreamMapConfigError.rst @@ -5,4 +5,4 @@ .. autoclass:: StreamMapConfigError :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.TapStreamConnectionFailure.rst b/docs/classes/singer_sdk.exceptions.TapStreamConnectionFailure.rst index c172787d2..4876e5018 100644 --- a/docs/classes/singer_sdk.exceptions.TapStreamConnectionFailure.rst +++ b/docs/classes/singer_sdk.exceptions.TapStreamConnectionFailure.rst @@ -5,4 +5,4 @@ .. autoclass:: TapStreamConnectionFailure :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.exceptions.TooManyRecordsException.rst b/docs/classes/singer_sdk.exceptions.TooManyRecordsException.rst index 3ebbb8d98..ab9ddeb82 100644 --- a/docs/classes/singer_sdk.exceptions.TooManyRecordsException.rst +++ b/docs/classes/singer_sdk.exceptions.TooManyRecordsException.rst @@ -5,4 +5,4 @@ .. autoclass:: TooManyRecordsException :members: - :special-members: __init__ \ No newline at end of file + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.BaseAPIPaginator.rst b/docs/classes/singer_sdk.pagination.BaseAPIPaginator.rst new file mode 100644 index 000000000..dfbfbc421 --- /dev/null +++ b/docs/classes/singer_sdk.pagination.BaseAPIPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.BaseAPIPaginator +====================================== + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: BaseAPIPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.BaseHATEOASPaginator.rst b/docs/classes/singer_sdk.pagination.BaseHATEOASPaginator.rst new file mode 100644 index 000000000..1581fb722 --- /dev/null +++ b/docs/classes/singer_sdk.pagination.BaseHATEOASPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.BaseHATEOASPaginator +========================================== + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: BaseHATEOASPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.BaseOffsetPaginator.rst b/docs/classes/singer_sdk.pagination.BaseOffsetPaginator.rst new file mode 100644 index 000000000..1321997c8 --- /dev/null +++ b/docs/classes/singer_sdk.pagination.BaseOffsetPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.BaseOffsetPaginator +========================================= + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: BaseOffsetPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.BasePageNumberPaginator.rst b/docs/classes/singer_sdk.pagination.BasePageNumberPaginator.rst new file mode 100644 index 000000000..15821f06f --- /dev/null +++ b/docs/classes/singer_sdk.pagination.BasePageNumberPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.BasePageNumberPaginator +============================================= + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: BasePageNumberPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.HeaderLinkPaginator.rst b/docs/classes/singer_sdk.pagination.HeaderLinkPaginator.rst new file mode 100644 index 000000000..8820562b9 --- /dev/null +++ b/docs/classes/singer_sdk.pagination.HeaderLinkPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.HeaderLinkPaginator +========================================= + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: HeaderLinkPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.JSONPathPaginator.rst b/docs/classes/singer_sdk.pagination.JSONPathPaginator.rst new file mode 100644 index 000000000..ded926d7f --- /dev/null +++ b/docs/classes/singer_sdk.pagination.JSONPathPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.JSONPathPaginator +======================================= + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: JSONPathPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.LegacyPaginatedStreamProtocol.rst b/docs/classes/singer_sdk.pagination.LegacyPaginatedStreamProtocol.rst new file mode 100644 index 000000000..153d83481 --- /dev/null +++ b/docs/classes/singer_sdk.pagination.LegacyPaginatedStreamProtocol.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.LegacyPaginatedStreamProtocol +=================================================== + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: LegacyPaginatedStreamProtocol + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.LegacyStreamPaginator.rst b/docs/classes/singer_sdk.pagination.LegacyStreamPaginator.rst new file mode 100644 index 000000000..3510a4852 --- /dev/null +++ b/docs/classes/singer_sdk.pagination.LegacyStreamPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.LegacyStreamPaginator +=========================================== + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: LegacyStreamPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.SimpleHeaderPaginator.rst b/docs/classes/singer_sdk.pagination.SimpleHeaderPaginator.rst new file mode 100644 index 000000000..494b92aac --- /dev/null +++ b/docs/classes/singer_sdk.pagination.SimpleHeaderPaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.SimpleHeaderPaginator +=========================================== + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: SimpleHeaderPaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/singer_sdk.pagination.SinglePagePaginator.rst b/docs/classes/singer_sdk.pagination.SinglePagePaginator.rst new file mode 100644 index 000000000..332e86de2 --- /dev/null +++ b/docs/classes/singer_sdk.pagination.SinglePagePaginator.rst @@ -0,0 +1,8 @@ +singer_sdk.pagination.SinglePagePaginator +========================================= + +.. currentmodule:: singer_sdk.pagination + +.. autoclass:: SinglePagePaginator + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.ArrayType.rst b/docs/classes/typing/singer_sdk.typing.ArrayType.rst new file mode 100644 index 000000000..4717acec8 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.ArrayType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.ArrayType +=========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: ArrayType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.BooleanType.rst b/docs/classes/typing/singer_sdk.typing.BooleanType.rst new file mode 100644 index 000000000..d0398a4d3 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.BooleanType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.BooleanType +============================= + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: BooleanType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.Constant.rst b/docs/classes/typing/singer_sdk.typing.Constant.rst new file mode 100644 index 000000000..248f7eb57 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.Constant.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.Constant +========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: Constant + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.CustomType.rst b/docs/classes/typing/singer_sdk.typing.CustomType.rst new file mode 100644 index 000000000..9c53706e7 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.CustomType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.CustomType +============================ + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: CustomType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.DateTimeType.rst b/docs/classes/typing/singer_sdk.typing.DateTimeType.rst new file mode 100644 index 000000000..2f38e55c0 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.DateTimeType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.DateTimeType +============================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: DateTimeType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.DateType.rst b/docs/classes/typing/singer_sdk.typing.DateType.rst new file mode 100644 index 000000000..0ceb97934 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.DateType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.DateType +========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: DateType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.DiscriminatedUnion.rst b/docs/classes/typing/singer_sdk.typing.DiscriminatedUnion.rst new file mode 100644 index 000000000..132e2ca0a --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.DiscriminatedUnion.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.DiscriminatedUnion +==================================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: DiscriminatedUnion + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.DurationType.rst b/docs/classes/typing/singer_sdk.typing.DurationType.rst new file mode 100644 index 000000000..5c4473697 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.DurationType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.DurationType +============================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: DurationType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.EmailType.rst b/docs/classes/typing/singer_sdk.typing.EmailType.rst new file mode 100644 index 000000000..e5fb94668 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.EmailType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.EmailType +=========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: EmailType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.HostnameType.rst b/docs/classes/typing/singer_sdk.typing.HostnameType.rst new file mode 100644 index 000000000..25b44d844 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.HostnameType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.HostnameType +============================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: HostnameType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.IPv4Type.rst b/docs/classes/typing/singer_sdk.typing.IPv4Type.rst new file mode 100644 index 000000000..e164f2eeb --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.IPv4Type.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.IPv4Type +========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: IPv4Type + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.IPv6Type.rst b/docs/classes/typing/singer_sdk.typing.IPv6Type.rst new file mode 100644 index 000000000..d67d4ce05 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.IPv6Type.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.IPv6Type +========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: IPv6Type + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.IntegerType.rst b/docs/classes/typing/singer_sdk.typing.IntegerType.rst new file mode 100644 index 000000000..23475e039 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.IntegerType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.IntegerType +============================= + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: IntegerType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.JSONPointerType.rst b/docs/classes/typing/singer_sdk.typing.JSONPointerType.rst new file mode 100644 index 000000000..c2913e780 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.JSONPointerType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.JSONPointerType +================================= + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: JSONPointerType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.NumberType.rst b/docs/classes/typing/singer_sdk.typing.NumberType.rst new file mode 100644 index 000000000..e71147bee --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.NumberType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.NumberType +============================ + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: NumberType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.ObjectType.rst b/docs/classes/typing/singer_sdk.typing.ObjectType.rst new file mode 100644 index 000000000..5ce49a706 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.ObjectType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.ObjectType +============================ + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: ObjectType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.OneOf.rst b/docs/classes/typing/singer_sdk.typing.OneOf.rst new file mode 100644 index 000000000..e9f159fe9 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.OneOf.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.OneOf +======================= + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: OneOf + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.PropertiesList.rst b/docs/classes/typing/singer_sdk.typing.PropertiesList.rst new file mode 100644 index 000000000..47a68ffca --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.PropertiesList.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.PropertiesList +================================ + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: PropertiesList + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.Property.rst b/docs/classes/typing/singer_sdk.typing.Property.rst new file mode 100644 index 000000000..f2e67d933 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.Property.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.Property +========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: Property + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.RegexType.rst b/docs/classes/typing/singer_sdk.typing.RegexType.rst new file mode 100644 index 000000000..1a04b9d83 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.RegexType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.RegexType +=========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: RegexType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.RelativeJSONPointerType.rst b/docs/classes/typing/singer_sdk.typing.RelativeJSONPointerType.rst new file mode 100644 index 000000000..ff597970c --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.RelativeJSONPointerType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.RelativeJSONPointerType +========================================= + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: RelativeJSONPointerType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.StringType.rst b/docs/classes/typing/singer_sdk.typing.StringType.rst new file mode 100644 index 000000000..633dde7dc --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.StringType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.StringType +============================ + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: StringType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.TimeType.rst b/docs/classes/typing/singer_sdk.typing.TimeType.rst new file mode 100644 index 000000000..929143ef6 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.TimeType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.TimeType +========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: TimeType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.URITemplateType.rst b/docs/classes/typing/singer_sdk.typing.URITemplateType.rst new file mode 100644 index 000000000..77c082b8b --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.URITemplateType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.URITemplateType +================================= + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: URITemplateType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.URIType.rst b/docs/classes/typing/singer_sdk.typing.URIType.rst new file mode 100644 index 000000000..0111c4c22 --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.URIType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.URIType +========================= + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: URIType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/classes/typing/singer_sdk.typing.UUIDType.rst b/docs/classes/typing/singer_sdk.typing.UUIDType.rst new file mode 100644 index 000000000..d4358e74c --- /dev/null +++ b/docs/classes/typing/singer_sdk.typing.UUIDType.rst @@ -0,0 +1,8 @@ +singer_sdk.typing.UUIDType +========================== + +.. currentmodule:: singer_sdk.typing + +.. autoclass:: UUIDType + :members: + :special-members: __init__, __call__ \ No newline at end of file diff --git a/docs/cli_commands.md b/docs/cli_commands.md index 97bf0df9a..d12d245f8 100644 --- a/docs/cli_commands.md +++ b/docs/cli_commands.md @@ -161,7 +161,7 @@ plugins: extractors: - name: my-tap namespace: my_tap - executable: ./my-tap.sh + executable: -e . capabilities: - state - catalog diff --git a/docs/code_samples.md b/docs/code_samples.md index 7338e77b6..0cd79f563 100644 --- a/docs/code_samples.md +++ b/docs/code_samples.md @@ -41,7 +41,7 @@ class TapCountries(Tap): config options and does not require authentication. """ name = "tap-countries" - config_jsonschema = PropertiesList([]).to_dict() + config_jsonschema = th.PropertiesList([]).to_dict() def discover_streams(self) -> List[Stream]: """Return a list containing the two stream types.""" @@ -127,11 +127,11 @@ class ParquetStream(Stream): """Dynamically detect the json schema for the stream. This is evaluated prior to any records being retrieved. """ - properties: List[Property] = [] - for header in FAKECSV.split("\n")[0].split(",") + properties: List[th.Property] = [] + for header in FAKECSV.split("\n")[0].split(","): # Assume string type for all fields - properties.add(header, StringType()) - return PropertiesList(*properties).to_dict() + properties.append(th.Property(header, th.StringType())) + return th.PropertiesList(*properties).to_dict() ``` Here is another example from the Parquet tap. This sample uses a @@ -148,7 +148,7 @@ class ParquetStream(Stream): """Dynamically detect the json schema for the stream. This is evaluated prior to any records being retrieved. """ - properties: List[Property] = [] + properties: List[th.Property] = [] # Get a schema object using the parquet and pyarrow libraries parquet_schema = pq.ParquetFile(self.filepath).schema_arrow @@ -160,10 +160,10 @@ class ParquetStream(Stream): dtype = get_jsonschema_type(str(parquet_schema.types[i])) # Add the new property to our list - properties.append(Property(name, dtype)) + properties.append(th.Property(name, dtype)) # Return the list as a JSON Schema dictionary object - return PropertiesList(*properties).to_dict() + return th.PropertiesList(*properties).to_dict() ``` ### Initialize a collection of tap streams with differing types @@ -255,6 +255,34 @@ class CachedAuthStream(RESTStream): return APIAuthenticatorBase(stream=self) ``` +### Use one of `requests`'s built-in authenticators + +```python +from requests.auth import HTTPDigestAuth +from singer_sdk.streams import RESTStream + +class DigestAuthStream(RESTStream): + """A stream with digest authentication.""" + + @property + def authenticator(self) -> HTTPDigestAuth: + """Stream authenticator.""" + return HTTPDigestAuth( + username=self.config["username"], + password=self.config["password"], + ) +``` + +[`HTTPBasicAuth`](https://requests.readthedocs.io/en/latest/api/#requests.auth.HTTPBasicAuth) and +[`HTTPProxyAuth`](https://requests.readthedocs.io/en/latest/api/#requests.auth.HTTPProxyAuth) +are also available in `requests.auth`. In addition to `requests.auth` classes, the community +has published a few packages with custom authenticator classes, which are compatible with the SDK. +For example: + +- [`requests-aws4auth`](https://github.com/tedder/requests-aws4auth): AWS v4 authentication +- [`requests_auth`](https://github.com/Colin-b/requests_auth): A collection of authenticators + for various services and protocols including Azure, Okta and NTLM. + ### Custom response validation Some APIs deviate from HTTP status codes to report failures. For those cases, @@ -302,6 +330,7 @@ Custom backoff and retry behaviour can be added by overriding the methods: - [`backoff_wait_generator`](singer_sdk.RESTStream.backoff_wait_generator) - [`backoff_max_tries`](singer_sdk.RESTStream.backoff_max_tries) - [`backoff_handler`](singer_sdk.RESTStream.backoff_handler) +- [`backoff_jitter`](singer_sdk.RESTStream.backoff_jitter) For example, to use a constant retry: ``` @@ -311,6 +340,11 @@ def backoff_wait_generator() -> Callable[..., Generator[int, Any, None]]: To utilise a response header as a wait value you can use [`backoff_runtime`](singer_sdk.RESTStream.backoff_runtime), and pass a method that returns a wait value: +**Note**: By default jitter makes this function wait a bit longer than the value provided. +To disable jitter override [`backoff_jitter`](singer_sdk.RESTStream.backoff_jitter). +In sdk versions <=0.21.0 the default jitter function will make the function below not work as you would expect without disabling jitter, +([here](https://github.com/meltano/sdk/issues/1477) for more information) to disable jitter override the `request_decorator` and pass `jitter=None` to the `backoff.on_exception` function. + ``` def backoff_wait_generator() -> Callable[..., Generator[int, Any, None]]: def _backoff_from_headers(retriable_api_error): diff --git a/docs/conf.py b/docs/conf.py index 53102ef6f..b0c33ba9e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,3 +1,5 @@ +# isort: dont-add-imports + # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full @@ -10,22 +12,21 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -import os + import sys +from pathlib import Path -sys.path.insert(0, os.path.abspath("..")) -# sys.path.insert(0, os.path.abspath("../singer_sdk")) -# sys.path.insert(0, os.path.abspath("/Users/ajsteers/Source/sdk")) +sys.path.insert(0, str(Path("..").resolve())) # -- Project information ----------------------------------------------------- -project = "Meltano SDK" -copyright = "2021, Meltano Core Team and Contributors" +project = "Meltano Singer SDK" +copyright = "2021, Meltano Core Team and Contributors" # noqa: A001 author = "Meltano Core Team and Contributors" # The full version, including alpha/beta/rc tags -release = "0.8.0" +release = "0.31.1" # -- General configuration --------------------------------------------------- @@ -38,9 +39,10 @@ "sphinx.ext.napoleon", "sphinx.ext.autosectionlabel", "sphinx.ext.autosummary", - "sphinx_rtd_theme", "sphinx_copybutton", "myst_parser", + "sphinx_reredirects", + "sphinx_inline_tabs", ] # Add any paths that contain templates here, relative to this directory. @@ -60,10 +62,49 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "sphinx_rtd_theme" -html_theme_options = {"logo_only": True, "analytics_id": "G-84WQMSLGTE"} - html_logo = "_static/img/logo.svg" +html_theme = "furo" +html_theme_options = { + # general + "source_repository": "https://github.com/meltano/sdk/", + "source_branch": "main", + "source_directory": "docs/", + "sidebar_hide_name": True, + # branding + "light_css_variables": { + "font-stack": "Hanken Grotesk,-apple-system,Helvetica,sans-serif", + "color-announcement-background": "#3A64FA", + "color-announcement-text": "#EEEBEE", + "color-foreground-primary": "#080216", + "color-background-primary": "#E9E5FB", + "color-link": "#3A64FA", + "color-link-underline": "transparent", + "color-link--hover": "#3A64FA", + "color-link-underline--hover": "#3A64FA", + # brand + "color-brand-primary": "#311772", + "color-brand-content": "#311772", + # sidebar + "color-sidebar-background": "#311772", + "color-sidebar-search-background": "#E9E5FB", + "color-sidebar-item-background--hover": "#18c3fa", + "color-sidebar-item-expander-background--hover": "#311772", + "color-sidebar-brand-text": "white", + "color-sidebar-caption-text": "rgba(255, 255, 255, 0.7)", + "color-sidebar-link-text": "white", + "color-sidebar-link-text--top-level": "white", + }, + "dark_css_variables": { + "color-background-primary": "#080216", + "color-link": "#18c3fa", + "color-link--hover": "#18c3fa", + "color-link-underline--hover": "#18c3fa", + # brand + "color-brand-content": "rgba(255, 255, 255, 0.7)", + # sidebar + "color-sidebar-search-background": "#080216", + }, +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -74,5 +115,8 @@ "css/custom.css", ] -# TODO: set this back to 3 after MyST-Parser 0.18.0 is released -myst_heading_anchors = 4 +myst_heading_anchors = 3 + +redirects = { + "porting.html": "guides/porting.html", +} diff --git a/docs/context_object.md b/docs/context_object.md new file mode 100644 index 000000000..09e5ed469 --- /dev/null +++ b/docs/context_object.md @@ -0,0 +1,18 @@ +# The Context Object + +Many of the methods in the [Stream](classes/singer_sdk.Stream) class and its subclasses accept +a `context` parameter, which is a dictionary that contains information about the stream +partition or parent stream. + +## Best practices for using context + +- The context object MUST NOT contain any sensitive information, such as API keys or secrets. + This is because the context is<br><br> + + 1) sent to the target, + 2) stored in the state file and + 3) logged to the console as a tag in metrics and logs.<br><br> + +- The context object SHOULD NOT be mutated during the stream's lifecycle. This is because the + context is stored in the state file, and mutating it will cause the state file to be + inconsistent with the actual state of the stream. diff --git a/docs/deprecation.md b/docs/deprecation.md new file mode 100644 index 000000000..b27e61b2b --- /dev/null +++ b/docs/deprecation.md @@ -0,0 +1,15 @@ +# Deprecation Timeline + +This page outlines when various features of the Singer SDK will be removed or changed in a backward +incompatible way, following their deprecation, as indicated in the +[deprecation policy](./release_process.md#deprecation-policy). + +## 1.0 + +- The `RESTStream.get_next_page_token` method will no longer be called + as part of the stream pagination process. It is replaced by the + [`RESTStream.get_new_paginator`](singer_sdk.RESTStream.get_new_paginator). + + See the [migration guide](./guides/pagination-classes.md) for more information. + +- The `singer_sdk.testing.get_standard_tap_tests` and `singer_sdk.testing.get_standard_target_tests` functions will be removed. Replace them with `singer_sdk.testing.get_tap_test_class` and `singer_sdk.testing.get_target_test_class` functions respective to generate a richer test suite. diff --git a/docs/dev_guide.md b/docs/dev_guide.md index 5a6f89450..316cca8ad 100644 --- a/docs/dev_guide.md +++ b/docs/dev_guide.md @@ -1,4 +1,4 @@ -# SDK Dev Guide +# Getting Started ## Tap Development Overview @@ -8,16 +8,16 @@ Create taps with the SDK requires overriding just two or three classes: and stream discovery. 2. The stream class. You have different options for your base class depending on the type of data source you are working with: - - `Stream` - The **generic** base class for streams. - - `RESTStream` - The base class for **REST**-type streams. - - `GraphQLStream` - The base class for **GraphQL**-type streams. This class inherits - from `RESTStream`, since GraphQL is built upon REST. + - `Stream` - The **generic** base class for streams. + - `RESTStream` - The base class for **REST**-type streams. + - `GraphQLStream` - The base class for **GraphQL**-type streams. This class inherits + from `RESTStream`, since GraphQL is built upon REST. 3. An optional authenticator class. You can omit this class entirely if you do not require authentication or if you prefer to write custom authentication logic. The supported authenticator classes are: - - `SimpleAuthenticator` - This class is functionally equivalent to overriding - `http_headers` property in the stream class. - - `OAuthAuthenticator` - This class performs an OAuth 2.0 authentication flow. - - `OAuthJWTAuthenticator` - This class performs an JWT (JSON Web Token) authentication - flow. + - `SimpleAuthenticator` - This class is functionally equivalent to overriding + `http_headers` property in the stream class. + - `OAuthAuthenticator` - This class performs an OAuth 2.0 authentication flow. + - `OAuthJWTAuthenticator` - This class performs an JWT (JSON Web Token) authentication + flow. ## Target Development Overview @@ -27,28 +27,32 @@ Create targets with the SDK requires overriding just two classes: and stream discovery. 2. The `Sink` class. You have two different options depending on whether your target prefers writing one record at a time versus writing in batches: - - `RecordSink` writes one record at a time, via the `process_record()` - method. - - `BatchSink` writes one batch at a time. Important class members include: - - `start_batch()` to (optionally) initialize a new batch. - - `process_record()` to enqueue a record to be written. - - `process_batch()` to write any queued records and cleanup local resources. + - `RecordSink` writes one record at a time, via the `process_record()` + method. + - `BatchSink` writes one batch at a time. Important class members include: + - `start_batch()` to (optionally) initialize a new batch. + - `process_record()` to enqueue a record to be written. + - `process_batch()` to write any queued records and cleanup local resources. Note: The `Sink` class can receive records from one stream or from many. See the [Sink documentation](./sinks.md) for more information on differences between a target's `Sink` class versus a tap's `Stream` class. ## Building a New Tap or Target -First, install [cookiecutter](https://cookiecutter.readthedocs.io) if you haven't -done so already: +First, install [cookiecutter](https://cookiecutter.readthedocs.io), +[Poetry](https://python-poetry.org/docs/), and optionally [Tox](https://tox.wiki/): ```bash # Install pipx if you haven't already -pip3 install pipx +pip install pipx pipx ensurepath + # Restart your terminal here, if needed, to get the updated PATH pipx install cookiecutter pipx install poetry + +# Optional: Install Tox if you want to use it to run auto-formatters, linters, tests, etc. +pipx install tox ``` Now you can initialize your new project with the Cookiecutter template for taps: @@ -72,6 +76,24 @@ generated `README.md` file to complete your new tap or target. You can also refe [Meltano Tutorial](https://docs.meltano.com/tutorials/custom-extractor) for a more detailed guide. +````{admonition} Avoid repeating yourself + If you find yourself repeating the same inputs to the cookiecutter, you can create a + `cookiecutterrc` file in your home directory to set default values for the prompts. + + For example, if you want to set the default value for your name and email, and the + default stream type and authentication method, you can add the following to your + `~/.cookiecutterrc` file: + + ```yaml + # ~/.cookiecutterrc + default_context: + admin_name: Johnny B. Goode + admin_email: jbg@example.com + stream_type: REST + auth_method: Bearer Token + ``` +```` + ### Using an existing library In some cases, there may already be a library that connects to the API and all you need the SDK for @@ -97,27 +119,27 @@ Many APIs return the records in an array nested inside an JSON object key. - Response: - ```json - { - "data": { - "records": [ - {"id": 1, "value": "abc"}, - {"id": 2, "value": "def"} - ] - } + ```json + { + "data": { + "records": [ + { "id": 1, "value": "abc" }, + { "id": 2, "value": "def" } + ] } - ``` + } + ``` - Expression: `$.data.records[*]` - Result: - ```json - [ - {"id": 1, "value": "abc"}, - {"id": 2, "value": "def"} - ] - ``` + ```json + [ + { "id": 1, "value": "abc" }, + { "id": 2, "value": "def" } + ] + ``` #### Nested object values example @@ -125,37 +147,37 @@ Some APIs instead return the records as values inside an object where each key i - Response: - ```json - { - "data": { - "1": { - "id": 1, - "value": "abc" - }, - "2": { - "id": 2, - "value": "def" - } + ```json + { + "data": { + "1": { + "id": 1, + "value": "abc" + }, + "2": { + "id": 2, + "value": "def" } } - ``` + } + ``` - Expression: `$.data.*` - Result: - ```json - [ - {"id": 1, "value": "abc"}, - {"id": 2, "value": "def"} - ] - ``` + ```json + [ + { "id": 1, "value": "abc" }, + { "id": 2, "value": "def" } + ] + ``` ## Resources ### Detailed Class Reference -For a detailed reference, please see the [SDK Reference Guide](./reference.md) +For a detailed reference, please see the [SDK Reference Guide](./reference.rst) ### Implementation Details @@ -175,12 +197,59 @@ For a list of sample CLI commands you can run, [click here](./cli_commands.md). We've collected some [Python tips](python_tips.md) which may be helpful for new SDK users. -### VSCode Tips +### IDE Tips + +Using the debugger features of your IDE can help you develop and fix bugs easier and faster. +Also using breakpoints is a great way to become familiar with the internals of the SDK itself. + +#### VSCode Debugging -Ensure the intrepreter you're using in VSCode is set to use poetry. -You can change this by using the command pallete to go to intrepeter settings. +Ensure the interpreter you're using in VSCode is set to use poetry. +You can change this by using the command palette to go to interpreter settings. Doing this will also help with autocompletion. + +In order to launch your plugin via it's CLI with the built-in debugger, VSCode requires a [Launch configuration](https://code.visualstudio.com/docs/editor/debugging#_launch-configurations). +An example launch configuration, added to your `launch.json`, might be as follows: + +```js +{ + // launch.json + "version": "0.2.0", + "configurations": [ + { + "name": "tap-snowflake discovery", + "type": "python", + "request": "launch", + "module": "tap_snowflake.tap", + "args": ["--config", "config.json", "--discover"], + "python": "${command:python.interpreterPath}", + // Set to true to debug third-party library code + "justMyCode": false, + } + ] +} +``` + +#### PyCharm Debugging + +See the JetBrain's [PyCharm documentation](https://www.jetbrains.com/help/pycharm/run-debug-configuration.html) for more detail + +To launch the PyCharm debugger you can select "Edit Configuration" in the main menu to open the debugger configuration. +Click "Add new run configuration". Set the script path to the full path to your tap.py and parameters to something like `--config .secrets/config.json`. +You can pass in additional parameters like `--discover` or `--state my_state_file.json` to test the discovery or state workflows. + +#### Main Method + +The above debugging configurations rely on an equivalent to the following snippet being added to the end of your `tap.py` or `target.py` file: + +```python +if __name__ == "__main__": + TapSnowflake.cli() +``` + +This is automatically included in the most recent version of the tap and target cookiecutters. + ### Testing performance We've had success using [`viztracer`](https://github.com/gaogaotiantian/viztracer) to create flame graphs for SDK-based packages and find if there are any serious performance bottlenecks. @@ -188,13 +257,14 @@ We've had success using [`viztracer`](https://github.com/gaogaotiantian/viztrace You can start doing the same in your package. Start by installing `viztracer`. ```console -$ poetry add --dev viztracer +$ poetry add --group dev viztracer ``` Then simply run your package's CLI as normal, preceded by the `viztracer` command ```console $ poetry run viztracer my-tap +$ poetry run viztracer -- my-target --config=config.json --input=messages.json ``` That command will produce a `result.json` file which you can explore with the `vizviewer` tool. diff --git a/docs/faq.md b/docs/faq.md index e0b18c5e0..63dcb675e 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -6,12 +6,16 @@ Most likely you should delete the project and start over. ## What are all of the Property options? -The property types are documented in the [JSON Schema builder docs](./typing). +The property types are documented in the [JSON Schema helper classes](./reference.rst). However, if you're using an IDE such as VSCode, you should be able to set up the environment to give you autocompletion prompts or hints. Ensure your interpreter is set to poetry if you've followed the [Dev Guide](./dev_guide.md). Checkout this [gif](https://visualstudiomagazine.com/articles/2021/04/20/~/media/ECG/visualstudiomagazine/Images/2021/04/poetry.ashx) for how to change your interpreter. -## I'm having trouble getting the base class to __init__. +### Handling credentials and other secrets in config + +As of SDK version `0.13.0`, developers can use the `secret=True` indication in the `Property` class constructor to flag secrets such as API tokens and passwords. We recommend all developers use this option where applicable so that orchestrators may consider this designation when determining how to store the user's provided config. + +## I'm having trouble getting the base class to **init**. Ensure you're using the `super()` method to inherit methods from the base class. diff --git a/docs/guides/custom-clis.md b/docs/guides/custom-clis.md new file mode 100644 index 000000000..f17cabb5b --- /dev/null +++ b/docs/guides/custom-clis.md @@ -0,0 +1,38 @@ +# Custom CLIs + +## Overview + +By default, packages created with the Singer SDK will have a single command, e.g. `tap-my-source`, which will run the application in a Singer-compatible way. However, you may want to add additional commands to your package. For example, you may want to add a command to initialize the database or platform with certain attributes required by the application to run properly. + +## Adding a custom command + +To add a custom command, you will need to add a new method to your plugin class that returns an instance of [`click.Command`](https://click.palletsprojects.com/en/8.1.x/api/#commands) (or a subclass of it) and decorate it with the `singer_sdk.cli.plugin_cli` decorator. Then you will need to add the command to the `[tool.poetry.scripts]` section of your `pyproject.toml` file. + +```python +# tap_shortcut/tap.py + +class ShortcutTap(Tap): + """Shortcut tap class.""" + + @plugin_cli + def update_schema(cls) -> click.Command: + """Update the OpenAPI schema for this tap.""" + @click.command() + def update(): + response = requests.get( + "https://developer.shortcut.com/api/rest/v3/shortcut.swagger.json", + timeout=5, + ) + with Path("tap_shortcut/openapi.json").open("w") as f: + f.write(response.text) + + return update +``` + +```toml +# pyproject.toml + +[tool.poetry.scripts] +tap-shortcut = "tap_shortcut.tap:ShortcutTap.cli" +tap-shortcut-update-schema = "tap_shortcut.tap:ShortcutTap.update_schema" +``` diff --git a/docs/guides/index.md b/docs/guides/index.md new file mode 100644 index 000000000..e86aa149c --- /dev/null +++ b/docs/guides/index.md @@ -0,0 +1,11 @@ +# In-depth Guides + +The following pages contain useful information for developers building on top of the Singer SDK. + +```{toctree} +:maxdepth: 2 + +porting +pagination-classes +custom-clis +``` diff --git a/docs/guides/pagination-classes.md b/docs/guides/pagination-classes.md new file mode 100644 index 000000000..897712423 --- /dev/null +++ b/docs/guides/pagination-classes.md @@ -0,0 +1,104 @@ +# Migrate your REST tap to use the new pagination classes! + +This guide will walk you through the process of migrating your REST tap to use the new pagination classes. + +## What's changed? + +In a future release of the SDK, the `RESTStream.get_next_page_token` method will no longer be +called as part of the stream pagination process. It is replaced by the +[`RESTStream.get_new_paginator`](singer_sdk.RESTStream.get_new_paginator). + +In the meantime, the `get_next_page_token` method will still be called if you define it +in your stream class, but a deprecation warning will be logged, if +[warnings are enabled](https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS). + +```{note} +If you have not overridden the `get_next_page_token` method in your stream class, you do not need +to make any changes. +``` + +## Why is this change being made? + +The new pagination classes are designed to be more flexible, easier to understand, and easier to test. They also align better with the [Single Responsibility Principle](https://en.wikipedia.org/wiki/Single-responsibility_principle). + +## How to migrate + +### Example: HATEOAS pagination, a.k.a. "next" links + +A common pattern in REST APIs is to use a `next` field in the response to indicate the +URL of the next page of results. The [`BaseHATEOASPaginator`](../../classes/singer_sdk.pagination.BaseHATEOASPaginator) +class can be used to handle this pattern. + +```python +# Original implementation +from urllib.parse import parse_qsl + +class MyStream(RESTStream): + def get_next_page_token(self, response, previous_token): + data = response.json() + return data.get("next") + + def get_url_params(self, context, next_page_token): + params = {} + + if next_page_token: + params.update(parse_qsl(next_page_token.query)) + + return params +``` + +```python +# New implementation + +from singer_sdk.pagination import BaseHATEOASPaginator + +class MyPaginator(BaseHATEOASPaginator): + def get_next_url(self, response): + data = response.json() + return data.get("next") + + +class MyStream(RESTStream): + def get_new_paginator(self): + return MyPaginator() + + def get_url_params(self, context, next_page_token): + params = {} + + # Next page token is a URL, so we can to parse it to extract the query string + if next_page_token: + params.update(parse_qsl(next_page_token.query)) + + return params +``` + +### Example: Offset pagination + +Another common pattern is to use an `offset` parameter to indicate the starting point of the next +page of results. The [`BaseOffsetPaginator`](../../classes/singer_sdk.pagination.BaseOffsetPaginator) +class can be used to handle this pattern. + +```python +# New implementation + +from singer_sdk.pagination import BaseOffsetPaginator + +class MyPaginator(BaseOffsetPaginator): + def has_more(self, response): + data = response.json() + return data.get("has_more", False) + + +class MyStream(RESTStream): + def get_new_paginator(self): + return BaseOffsetPaginator(start_value=0, page_size=250) + + def get_url_params(self, context, next_page_token): + params = {} + + # Next page token is an offset + if next_page_token: + params["offset"] = next_page_token + + return params +``` diff --git a/docs/porting.md b/docs/guides/porting.md similarity index 75% rename from docs/porting.md rename to docs/guides/porting.md index 91e27796f..aec02ea7b 100644 --- a/docs/porting.md +++ b/docs/guides/porting.md @@ -2,7 +2,7 @@ This guide walks you through the process of migrating an existing Singer Tap over to the SDK. -_Want to follow along in a real world porting example? See our recorded pair coding session for the `tap-gitlab` port `[here](http://www.youtube.com/watch?v=XNm5BN_zluw)`._ +_Want to follow along in a real world porting example? See our recorded pair coding session for the `tap-gitlab` port [here](http://www.youtube.com/watch?v=XNm5BN_zluw)._ ## A Clear Slate @@ -11,7 +11,7 @@ When porting over an existing tap, most developers find it easier to start from 1. Within your existing repo, create a new branch. 1. Move _all_ of the files in the old branch into a subfolder called "archive". 1. Commit and push the result to your new branch. (You'll do this several times along the way, which creates a fresh tree and a fresh diff for subsequent commits.) -1. Now follow the steps in the [dev guide](dev_guide.md#building-a-new-tap-or-target) to create a new project using the Tap cookiecutter. +1. Now follow the steps in the [dev guide](../dev_guide.md#building-a-new-tap-or-target) to create a new project using the Tap cookiecutter. 1. Copy all the files from the cookiecutter output into your main repo and commit the result. ## Settings and Readme @@ -103,10 +103,82 @@ _Important: If you've gotten this far, this is a good time to commit your code b Pagination is generally unique for almost every API. There's no single method that solves for very different API's approach to pagination. -Most likely you will use `get_next_page_token` to parse and return whatever the "next page" token is for your source, and you'll use `get_url_params` to define how to pass the "next page" token back to the API when asking for subsequent pages. +Most likely you will use [get_new_paginator](singer_sdk.RESTStream.get_new_paginator) to instantiate a [pagination class](./../classes/singer_sdk.pagination.BaseAPIPaginator.rst) for your source, and you'll use `get_url_params` to define how to pass the "next page" token back to the API when asking for subsequent pages. When you think you have it right, run `poetry run tap-mysource` again, and debug until you are confident the result is including multiple pages back from the API. +You can also add unit tests for your pagination implementation for additional confidence: + +```python +from singer_sdk.pagination import BaseHATEOASPaginator, first + + +class CustomHATEOASPaginator(BaseHATEOASPaginator): + """Paginator for HATEOAS APIs - or "Hypermedia as the Engine of Application State". + + This paginator expects responses to have a key "next" with a value + like "https://api.com/link/to/next-item". + """" + + def get_next_url(self, response: Response) -> str | None: + """Get a parsed HATEOAS link for the next, if the response has one.""" + + try: + return first( + extract_jsonpath("$.links[?(@.rel=='next')].href", response.json()) + ) + except StopIteration: + return None + + +def test_paginator_custom_hateoas(): + """Validate paginator that my custom paginator.""" + + resource_path = "/path/to/resource" + response = Response() + paginator = CustomHATEOASPaginator() + assert not paginator.finished + assert paginator.current_value is None + assert paginator.count == 0 + + response._content = json.dumps( + { + "links": [ + { + "rel": "next", + "href": f"{resource_path}?page=2&limit=100", + } + ] + } + ).encode() + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value.path == resource_path + assert paginator.current_value.query == "page=2&limit=100" + assert paginator.count == 1 + + response._content = json.dumps( + { + "links": [ + { + "rel": "next", + "href": f"{resource_path}?page=3&limit=100", + } + ] + } + ).encode() + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value.path == resource_path + assert paginator.current_value.query == "page=3&limit=100" + assert paginator.count == 2 + + response._content = json.dumps({"links": []}).encode() + paginator.advance(response) + assert paginator.finished + assert paginator.count == 3 +``` + Note: Depending on how well the API is designed, this could take 5 minutes or multiple hours. If you need help, sometimes [PostMan](https://postman.com) or [Thunder Client](https://marketplace.visualstudio.com/items?itemName=rangav.vscode-thunder-client) can be helpful in debugging the APIs specific quirks. ## Run pytest @@ -125,7 +197,7 @@ Notes: - As should be expected, you are free to subclass streams in order to have their behavior be inherited from other stream classes. - For instance, if 3 streams use one pagination method, and 5 other streams use a different method, you can have each stream created as a subclass of a stream that has desired behavior. -- If you have streams which invoke each other in a nested layout, please refer to the `parent_stream_class` property and its [related documentation](parent_streams.md). +- If you have streams which invoke each other in a nested layout, please refer to the `parent_stream_class` property and its [related documentation](../stream_maps.md). - As before, if you do not already have a full JSON Schema file for each stream type, it is generally a good practice to start with just 5-8 properties per stream. You don't have to define all properties up front and before doing so, it is generally more valuable to test that each stream is getting data. ## Run pytest again, add stream properties, and repeat @@ -146,7 +218,7 @@ poetry run pytest The SDK will automatically handle `STATE` for you 99% of the time. However, it is very likely that the legacy version of the tap has a different `STATE` format in comparison with the SDK format. If you want to seamlessly support both old and new STATE formats, you'll need to define a conversion operation. -To handle the conversion operation, you'll override [`Tap.load_state()`](singer_sdk.Tap.load_state). The exact process of converting state is outside of this guide, but please check the [STATE implementation docs](implementation/state.md) for an explanation of general format expectations. +To handle the conversion operation, you'll override [`Tap.load_state()`](singer_sdk.Tap.load_state). The exact process of converting state is outside of this guide, but please check the [STATE implementation docs](../implementation/state.md) for an explanation of general format expectations. ### Leverage Auto Generated README diff --git a/docs/implementation/at_least_once.md b/docs/implementation/at_least_once.md index d349d4729..8643202f9 100644 --- a/docs/implementation/at_least_once.md +++ b/docs/implementation/at_least_once.md @@ -1,4 +1,4 @@ -# [SDK Implementation Details](./index.md) - "At Least Once" Delivery Promise +# "At Least Once" Delivery Promise The Singer Spec promises that each record in the source system will be processed successfully in the target _at least once_. This promises that no record will ever go missing or be omitted, but it _does not_ guarantee that all records will be received _exactly once_. diff --git a/docs/implementation/catalog_metadata.md b/docs/implementation/catalog_metadata.md index f94bb12f6..b255a5530 100644 --- a/docs/implementation/catalog_metadata.md +++ b/docs/implementation/catalog_metadata.md @@ -1,4 +1,4 @@ -# [SDK Implementation Details](./index.md) - Catalog Metadata +# Catalog Metadata The SDK automatically generates catalog metadata during catalog discovery. Selection rules overrided by a user will be respected. diff --git a/docs/implementation/discovery.md b/docs/implementation/discovery.md index 03ed9258c..67aaea901 100644 --- a/docs/implementation/discovery.md +++ b/docs/implementation/discovery.md @@ -1,4 +1,4 @@ -# [SDK Implementation Details](./index.md) - Catalog Discovery +# Catalog Discovery All taps developed using the SDK will automatically support `discovery` as a base capability, which is the process of generating and emitting a catalog that describes the diff --git a/docs/implementation/index.md b/docs/implementation/index.md index eca49569c..06a2cec9a 100644 --- a/docs/implementation/index.md +++ b/docs/implementation/index.md @@ -1,4 +1,4 @@ -# SDK Implementation Details +# Singer Implementation Details This section documents certain behaviors and expectations of the SDK framework. diff --git a/docs/implementation/logging.md b/docs/implementation/logging.md index 3f6740333..da20a5780 100644 --- a/docs/implementation/logging.md +++ b/docs/implementation/logging.md @@ -1,5 +1,79 @@ # Logging -Logs are configurable by the environment variables `<PLUGIN_NAME>_LOGLEVEL` (preferred) or `LOGLEVEL`. Use `LOGLEVEL` when you intend to control the log output for all taps and targets running within the environment. In contrast, we recommend setting `<PLUGIN_NAME>_LOGLEVEL` for more granual control of each tap or target individually. +## Logging levels -From most verbose to least verbose, the accepted values for logging level are `debug`, `info`, `warning`, and `error`. Logging level inputs are case insensitive. +Logging levels are configurable by the environment variables `<PLUGIN_NAME>_LOGLEVEL` (preferred) +or `LOGLEVEL`. Use `LOGLEVEL` when you intend to control the log output for all taps +and targets running within the environment. In contrast, we recommend setting +`<PLUGIN_NAME>_LOGLEVEL` for more granual control of each tap or target individually. + +From most verbose to least verbose, the accepted values for logging level are `debug`, +`info`, `warning`, and `error`. Logging level inputs are case-insensitive. + +To use different logging levels for different loggers, see the [custom logging configuration](#custom-logging-configuration) section below. + +## Default log format + +The default log format is `"{asctime:23s} | {levelname:8s} | {name:20s} | {message}"`. + +This produces logs that look like this: + +``` +2022-12-05 19:46:46,744 | INFO | my_tap | Added 'child' as child stream to 'my_stream' +2022-12-05 19:46:46,744 | INFO | my_tap | Beginning incremental sync of 'my_stream'... +2022-12-05 19:46:46,744 | INFO | my_tap | Tap has custom mapper. Using 1 provided map(s). +2022-12-05 19:46:46,745 | INFO | my_tap | Beginning full_table sync of 'child' with context: {'parent_id': 1}... +2022-12-05 19:46:46,745 | INFO | my_tap | Tap has custom mapper. Using 1 provided map(s). +2022-12-05 19:46:46,746 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "timer", "metric": "sync_duration", "value": 0.0005319118499755859, "tags": {"stream": "child", "context": {"parent_id": 1}, "status": "succeeded"}} +2022-12-05 19:46:46,747 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "counter", "metric": "record_count", "value": 3, "tags": {"stream": "child", "context": {"parent_id": 1}}} +2022-12-05 19:46:46,747 | INFO | my_tap | Beginning full_table sync of 'child' with context: {'parent_id': 2}... +2022-12-05 19:46:46,748 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "timer", "metric": "sync_duration", "value": 0.0004410743713378906, "tags": {"stream": "child", "context": {"parent_id": 2}, "status": "succeeded"}} +2022-12-05 19:46:46,748 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "counter", "metric": "record_count", "value": 3, "tags": {"stream": "child", "context": {"parent_id": 2}}} +2022-12-05 19:46:46,749 | INFO | my_tap | Beginning full_table sync of 'child' with context: {'parent_id': 3}... +2022-12-05 19:46:46,749 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "timer", "metric": "sync_duration", "value": 0.0004508495330810547, "tags": {"stream": "child", "context": {"parent_id": 3}, "status": "succeeded"}} +2022-12-05 19:46:46,750 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "counter", "metric": "record_count", "value": 3, "tags": {"stream": "child", "context": {"parent_id": 3}}} +2022-12-05 19:46:46,750 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "timer", "metric": "sync_duration", "value": 0.0052759647369384766, "tags": {"stream": "my_stream", "context": {}, "status": "succeeded"}} +2022-12-05 19:46:46,750 | INFO | singer_sdk.metrics | INFO METRIC: {"metric_type": "counter", "metric": "record_count", "value": 3, "tags": {"stream": "my_stream", "context": {}}} +``` + +To use a different log format, see the [custom logging configuration](#custom-logging-configuration) section below. + +## Custom logging configuration + +Users of a tap can configure the SDK logging by setting the `SINGER_SDK_LOG_CONFIG` +environment variable. The value of this variable should be a path to a YAML file in the +[Python logging dict format](https://docs.python.org/3/library/logging.config.html#dictionary-schema-details). + +For example, to send [metrics](./metrics.md) (with logger name `singer_sdk.metrics`) to a file, you could use the following config: + +```yaml +version: 1 +disable_existing_loggers: false +formatters: + metrics: + format: "{asctime} {levelname} {message}" + style: "{" +handlers: + metrics: + class: logging.FileHandler + formatter: metrics + filename: metrics.log +loggers: + singer_sdk.metrics: + level: INFO + handlers: [ metrics ] + propagate: yes +``` + +This will send metrics to a `metrics.log`: + +``` +2022-09-29 00:48:52,746 INFO METRIC: {"metric_type": "timer", "metric": "http_request_duration", "value": 0.501743, "tags": {"stream": "continents", "endpoint": "", "http_status_code": 200, "status": "succeeded"}} +2022-09-29 00:48:52,775 INFO METRIC: {"metric_type": "counter", "metric": "http_request_count", "value": 1, "tags": {"stream": "continents", "endpoint": ""}} +2022-09-29 00:48:52,776 INFO METRIC: {"metric_type": "timer", "metric": "sync_duration", "value": 0.7397160530090332, "tags": {"stream": "continents", "context": {}, "status": "succeeded"}} +2022-09-29 00:48:52,776 INFO METRIC: {"metric_type": "counter", "metric": "record_count", "value": 7, "tags": {"stream": "continents", "context": {}}} +2022-09-29 00:48:53,225 INFO METRIC: {"metric_type": "timer", "metric": "http_request_duration", "value": 0.392148, "tags": {"stream": "countries", "endpoint": "", "http_status_code": 200, "status": "succeeded"}} +2022-09-29 00:48:53,302 INFO METRIC: {"metric_type": "counter", "metric": "http_request_count", "value": 1, "tags": {"stream": "countries", "endpoint": ""}} +2022-09-29 00:48:53,302 INFO METRIC: {"metric_type": "timer", "metric": "sync_duration", "value": 0.5258760452270508, "tags": {"stream": "countries", "context": {}, "status": "succeeded"}} +2022-09-29 00:48:53,303 INFO METRIC: {"metric_type": "counter", "metric": "record_count", "value": 250, "tags": {"stream": "countries", "context": {}}} +``` diff --git a/docs/implementation/metrics.md b/docs/implementation/metrics.md index c6a0a13f2..87678cbe5 100644 --- a/docs/implementation/metrics.md +++ b/docs/implementation/metrics.md @@ -1,18 +1,22 @@ -# [SDK Implementation Details](./index.md) - Tap Metrics +# Tap Metrics Metrics logging is specified in the [Singer Spec](https://hub.meltano.com/singer/spec#metrics). The SDK will automatically -emit two types of metrics `record_count` and `http_request_duration`. +emit metrics for `record_count`, `http_request_duration` and `sync_duration`. -Customization options: +## Customization options -Developers may optionally add a `metrics_log_level` config option to their taps, -which will automatically allow this metrics logging to be customized at runtime. +### `metrics_log_level` -When `metrics_log_level` is supported, users can then -set one of these values (case insensitive), `INFO`, `DEBUG`, `NONE`, to override the -default logging level for metrics. This can be helpful for REST-type sources which use -make a large number of REST calls can therefor have very noisy metrics. +Metrics are logged at the `INFO` level. Developers may optionally add a +`metrics_log_level` config option to their taps, `WARNING` or `ERROR` to disable +metrics logging. + +### `SINGER_SDK_LOG_CONFIG` + +Metrics are written by the `singer_sdk.metrics` logger, so the end user can set +`SINGER_SDK_LOG_CONFIG` to a logging config file that defines the format and output +for metrics. See the [logging docs](./logging.md) for an example file. ## Additional Singer Metrics References diff --git a/docs/implementation/record_metadata.md b/docs/implementation/record_metadata.md index 1decf5285..6072a8cd1 100644 --- a/docs/implementation/record_metadata.md +++ b/docs/implementation/record_metadata.md @@ -1,4 +1,4 @@ -# [SDK Implementation Details](./index.md) - Record Metadata +# Record Metadata The SDK can automatically generate `_sdc_` ("Singer Data Capture") metadata properties when performing data loads in SDK-based targets. diff --git a/docs/implementation/state.md b/docs/implementation/state.md index a43a18332..641d3d67c 100644 --- a/docs/implementation/state.md +++ b/docs/implementation/state.md @@ -1,4 +1,4 @@ -# [SDK Implementation Details](./index.md) - Stream State +# Stream State The SDK automatically handles state management and bookmarks. diff --git a/docs/incremental_replication.md b/docs/incremental_replication.md new file mode 100644 index 000000000..757f15802 --- /dev/null +++ b/docs/incremental_replication.md @@ -0,0 +1,62 @@ +# Incremental Replication + +With incremental replication, a Singer tap emits only data that were created or updated since the previous import rather than the full table. + +To support incremental replication, the tap must first define how its replication state will be tracked, e.g. the id of the newest record or the maximal update timestamp in the previous import. + +You'll either have to manage your own [state file](https://hub.meltano.com/singer/spec#state-files-1), or use Meltano. The Singer SDK makes the tap state available through the [context object](./context_object.md) on subsequent runs. Using the state, the tap should then skip returning rows where the replication key comes _strictly before_ than previous maximal replication key value stored in the state. + +## Example Code: Timestamp-Based Incremental Replication + +```py +class CommentsStream(RESTStream): + + replication_key = "date_gmt" + is_sorted = True + + schema = th.PropertiesList( + th.Property("date_gmt", th.DateTimeType, description="date"), + ).to_dict() + + def get_url_params(self, context, next_page_token): + params = {} + + starting_date = self.get_starting_timestamp(context) + if starting_date: + params["after"] = starting_date.isoformat() + + if next_page_token is not None: + params["page"] = next_page_token + + self.logger.info("QUERY PARAMS: %s", params) + return params +``` + +1. First we inform the SDK of the `replication_key`, which automatically triggers incremental import mode. + +2. Second, optionally, set `is_sorted` to true if the records are monotonically increasing (i.e. newer records always come later). With this setting, the sync will be resumable if it's interrupted at any point and the state file will reflect this. Otherwise, the tap has to run to completion so the state can safely reflect the largest replication value seen. + +3. Last, we have to adapt the query to the remote system, in this example by adding a query parameter with the ISO timestamp. + + +```{note} +- The SDK will throw an error if records come out of order when `is_sorted` is true. +- Unlike a `primary_key`, a `replication_key` does not have to be unique +- In incremental replication, it is OK and usually recommended to resend rows where the replication key is equal to previous highest key. Targets are expected to update rows that are re-synced. +``` + +## Manually testing incremental import during development + +To test the tap in standalone mode, manually create a state file and run the tap: + +```shell +$ echo '{"bookmarks": {"documents": {"replication_key": "date_gmt", "replication_key_value": "2023-01-15T12:00:00.120000"}}}' > state_test.json + +$ tap-my-example --config tap_config_test.json --state state_test.json +``` + +## Additional References + +- [Tap SDK State](./implementation/state.md) +- [Context Object](./context_object.md) +- [Example tap with get_starting_replication_key_value](https://github.com/flexponsive/tap-eu-ted/blob/main/tap_eu_ted/client.py) diff --git a/docs/index.rst b/docs/index.rst index 19eed21ef..081c56f24 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,14 +1,16 @@ -.. Meltano SDK documentation master file, created by +.. Meltano Singer SDK documentation master file, created by sphinx-quickstart on Thu Jun 3 14:38:15 2021. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Meltano SDK for Taps and Targets -======================================= +Meltano Singer SDK +================== -The Meltano_ SDK for Taps and Targets is the fastest way to build custom +The Meltano_ Singer SDK for Taps and Targets is the fastest way to build custom data extractors and loaders! Taps and targets built on the SDK are automatically compliant with the -`Singer Spec`_, the de-facto open source standard for extract and load pipelines. +`Singer Spec`_, the de-facto open source standard for extract and load pipelines, and therefore Meltano_. + +If you're looking to add support to Meltano for a new data tool that would be listed on the `Meltano Hub`_ as a utility, check out the `Meltano EDK`_ (Extension Development Kit) instead. Future-proof extractors and loaders, with less code --------------------------------------------------- @@ -33,46 +35,57 @@ read the `source code`_, `log an issue or feature request`_, and `contribute bac to the SDK. We also regularly discuss SDK topics in our `Meltano Slack`_ community, within the `#singer-tap-development`_ and `#singer-target-development`_ Slack channels. -Developer Resources -------------------- .. toctree:: + :caption: Developer Resources :maxdepth: 2 + :hidden: dev_guide - python_tips - code_samples + guides/index + Python Tips <python_tips> + Code Samples <code_samples> cli_commands - capabilities faq -Reference ---------- - .. toctree:: + :caption: Reference :maxdepth: 2 + :hidden: reference - - -Advanced Topics ---------------- + implementation/index + typing + capabilities .. toctree:: - :maxdepth: 1 + :caption: Advanced Concepts + :maxdepth: 2 + :hidden: + incremental_replication parent_streams partitioning + context_object stream_maps - porting + batch sinks + testing + +.. toctree:: + :caption: SDK Development + :maxdepth: 1 + :hidden: + CONTRIBUTING - implementation/index - typing + release_process + deprecation .. _Singer: https://singer.io .. _Singer Spec: https://hub.meltano.com/singer/spec .. _Meltano: https://www.meltano.com +.. _Meltano EDK: https://edk.meltano.com +.. _Meltano Hub: https://hub.meltano.com/utilities/ .. _integrated with Meltano: https://docs.meltano.com/tutorials/custom-extractor#add-the-plugin-to-your-meltano-project .. _contribute back: https://github.com/meltano/sdk/issues?q=is%3Aopen+is%3Aissue+label%3A%22accepting+merge+requests%22 .. _source code: https://github.com/meltano/sdk diff --git a/docs/partitioning.md b/docs/partitioning.md index 37a2f99e2..95103dc45 100644 --- a/docs/partitioning.md +++ b/docs/partitioning.md @@ -5,20 +5,23 @@ which each have their own state and their own distinct queryable domain. ## If you do not require partitioning -In general, developers can simply ignore the `context` arguments in methods like -`Stream.get_records()` if partitioning is not required. +In general, developers can simply ignore the [`context`](./context_object.md) arguments +in methods like [`Stream.get_records()`](singer_sdk.Stream.get_records) if partitioning +is not required. ## If you do want to utilize partitioning -To take advantage of partitioning, first override the `Stream.partitions` property, -returning a list of dictionaries, where each dictionary uniquely defines the construct of -a partition. For instance, a regionally partitioned stream may return the following: +To take advantage of partitioning, first override the +[`Stream.partitions`](singer_sdk.Stream.partitions) property, returning a list of +dictionaries, where each dictionary uniquely defines the construct of a partition. +For instance, a regionally partitioned stream may return the following: `[{"region": "us-east"}, {"region": "us-west"}, ...]` -For any streams which define the `partitions` property, the individual partitions will be -passed one at a time through the `partition` argument of methods which reference the -partition, such as `Stream.get_records()`. +For any streams which define the [`partitions`](singer_sdk.Stream.partitions) property, +the individual partitions will be passed one at a time through the `context` argument +of methods which reference the partition, such as +[`Stream.get_records()`](singer_sdk.Stream.get_records). ## If you are unsure if partitioning will be needed @@ -28,9 +31,9 @@ work regardless of whether partition is an actual partition context or `None`, m no partition is specified. When dealing with state, for example, developers may always call -`Stream.get_context_state(context)` even if `context` is not set. -The method will automatically return the state that is appropriate, either for the partition -or for the stream. +[`Stream.get_context_state(context)`](singer_sdk.Stream.get_context_state) even if +`context` is not set. The method will automatically return the state that is appropriate, +either for the partition or for the stream. ## Additional State Partitioning References diff --git a/docs/reference.rst b/docs/reference.rst index 1641d357c..b59bd6651 100644 --- a/docs/reference.rst +++ b/docs/reference.rst @@ -1,5 +1,5 @@ -SDK Reference -============= +Classes Reference +================= .. currentmodule:: singer_sdk @@ -8,7 +8,7 @@ Plugin Classes .. autosummary:: :toctree: classes - :template: class.rst + :template: plugin_class.rst Tap Target @@ -56,6 +56,7 @@ Authenticator Classes :toctree: classes :template: class.rst + authenticators.APIAuthenticatorBase authenticators.APIKeyAuthenticator authenticators.BasicAuthenticator authenticators.BearerTokenAuthenticator @@ -75,7 +76,7 @@ Exception Types exceptions.InvalidStreamSortException exceptions.MapExpressionError exceptions.MaxRecordsLimitException - exceptions.RecordsWitoutSchemaException + exceptions.RecordsWithoutSchemaException exceptions.RetriableAPIError exceptions.StreamMapConfigError exceptions.TapStreamConnectionFailure @@ -85,6 +86,60 @@ JSON Schema builder classes --------------------------- .. autosummary:: - :template: module.rst + :toctree: classes/typing + :template: class.rst + + typing.PropertiesList + typing.ArrayType + typing.BooleanType + typing.Constant + typing.CustomType + typing.DateTimeType + typing.DateType + typing.DiscriminatedUnion + typing.DurationType + typing.EmailType + typing.HostnameType + typing.IntegerType + typing.IPv4Type + typing.IPv6Type + typing.JSONPointerType + typing.NumberType + typing.ObjectType + typing.OneOf + typing.Property + typing.RegexType + typing.RelativeJSONPointerType + typing.StringType + typing.TimeType + typing.URITemplateType + typing.URIType + typing.UUIDType + +Pagination +---------- + +.. autosummary:: + :toctree: classes + :template: class.rst + + pagination.BaseAPIPaginator + pagination.SinglePagePaginator + pagination.BaseHATEOASPaginator + pagination.HeaderLinkPaginator + pagination.JSONPathPaginator + pagination.SimpleHeaderPaginator + pagination.BasePageNumberPaginator + pagination.BaseOffsetPaginator + pagination.LegacyPaginatedStreamProtocol + pagination.LegacyStreamPaginator + +Batch +----- + +.. autosummary:: + :toctree: classes + :template: class.rst - typing + batch.BaseBatcher + batch.JSONLinesBatcher diff --git a/docs/release_process.md b/docs/release_process.md new file mode 100644 index 000000000..2163e4db8 --- /dev/null +++ b/docs/release_process.md @@ -0,0 +1,29 @@ +# Release Process + +## PyPI releases + +Releases are published to PyPI by a GitHub Actions workflow, triggered when a GitHub [Release](https://github.com/meltano/sdk/releases) is published. + +### Feature releases + +Feature releases are the primary way that new features are added to the Singer SDK. They are released on a roughly monthly cadence. + +### Patch releases + +Patch releases are released as needed to fix bugs or security issues. They are released on an as-needed basis. + +## Release cadence + +Starting with the Singer SDK 1.0, version numbers will use a loose form of [semantic versioning](https://semver.org/). + +SemVer makes it easier to see at a glance how compatible releases are with each other. It also helps to anticipate when compatibility shims will be removed. + +## Deprecation policy + +A [feature release](#feature-releases) may deprecate a feature, but it will not remove it until the next major release. A deprecation will be clearly documented in the changelog and in the code. + +All deprecated features will emit a `SingerDeprecationWarning` when used, so users can raise them as exceptions when running their tests to ensure that they are not using any deprecated features: + +```console +$ pytest -W error::singer_sdk.utils.deprecation.SingerSDKDeprecationWarning +``` diff --git a/docs/stream_maps.md b/docs/stream_maps.md index 967830dd5..e4119d640 100644 --- a/docs/stream_maps.md +++ b/docs/stream_maps.md @@ -4,6 +4,10 @@ SDK-based taps, targets, and mappers automatically support the custom inline mappings feature. Stream mappings can be applied to solve the following real-world applications: +### Note on `null` values + +In all examples below where `null` is used as a value, the special string `"__NULL__"` can be used instead. + ### Stream-Level Mapping Applications - **Stream aliasing:** streams can be aliased to provide custom naming downstream. @@ -69,7 +73,7 @@ Developers simply enable the feature using the instructions below, and then user ## Enabling Stream Maps in SDK-Based Plugins To support inline mapping functions, the developer only needs to declare two plugin settings, -called `stream_maps` and `stream_map_settings`, and declare both settings as `object` type. (For example: +called `stream_maps` and `stream_map_config`, and declare both settings as `object` type. (For example: `Property("stream_maps, ObjectType())` if using the python helper classes or `"stream_maps": {"type": "object"}` if using native JSON Schema declarations.) @@ -103,23 +107,41 @@ The `stream_maps` config expects a mapping of stream names to a structured trans Here is a sample `stream_maps` transformation which removes all references to `email` and adds `email_domain` and `email_hash` as new properties: -`config.json`: +`meltano.yml` or `config.json`: + +````{tab} meltano.yml +```yaml +stream_maps: + # Apply these transforms to the stream called 'customers' + customers: + # drop the PII field from RECORD and SCHEMA messages + email: __NULL__ + # capture just the email domain + email_domain: owner_email.split('@')[-1] + # for uniqueness checks + email_hash: md5(config['hash_seed'] + owner_email) +stream_map_config: + # hash outputs are not able to be replicated without the original seed: + hash_seed: 01AWZh7A6DzGm6iJZZ2T +``` +```` -```js +````{tab} JSON +```json { "stream_maps": { - "customers": { // Apply these transforms to the stream called 'customers' - "email": null, // drop the PII field from RECORD and SCHEMA messages - "email_domain": "owner_email.split('@')[-1]", // capture just the email domain - "email_hash": "md5(config['hash_seed'] + owner_email)", // for uniqueness checks + "customers": { + "email": null, + "email_domain": "owner_email.split('@')[-1]", + "email_hash": "md5(config['hash_seed'] + owner_email)" } }, "stream_map_config": { - // hash outputs are not able to be replicated without the original seed: "hash_seed": "01AWZh7A6DzGm6iJZZ2T" } } ``` +```` If map expressions should have access to special config, such as in the one-way hash algorithm above, define those config arguments within the optional @@ -160,6 +182,8 @@ can be referenced directly by mapping expressions. of the hash's hex digest. - This is defined by the SDK internally with native python: `hashlib.md5(<input>.encode("utf-8")).hexdigest()`. +- `datetime` - This is the datetime module object from the Python standard library. You can access + datetime.datetime, datetime.timedelta, etc. #### Built-in Variable Names @@ -191,26 +215,47 @@ The following logic is applied in determining the SCHEMA of the transformed stre To remove a stream, declare the stream within `stream_maps` config and assign it the value `null`. For example: -```js +````{tab} meltano.yml +```yaml +stream_maps: + # don't sync the stream called 'addresses' + addresses: __NULL__ +``` +```` + +````{tab} JSON +```json { "stream_maps": { - "addresses": null // don't sync the stream called 'addresses' - }, + "addresses": null + } } ``` +```` To remove a property, declare the property within the designated stream's map entry and assign it the value `null`. For example: -```js +````{tab} meltano.yml +```yaml +stream_maps: + customers: + # don't sync the 'email' stream property + email: __NULL__ +``` +```` + +````{tab} JSON +```json { "stream_maps": { "customers": { - "email": null, // don't sync the 'email' stream property + "email": null } - }, + } } ``` +```` ### Remove all undeclared streams or properties @@ -224,49 +269,111 @@ below. To remove all streams except the `customers` stream: -```js +````{tab} meltano.yml +```yaml +stream_maps: + customers: {} + __else__: __NULL__ +``` +```` + +````{tab} JSON +```json { "stream_maps": { "customers": {}, "__else__": null - }, + } } ``` +```` To remove all fields from the `customers` stream except `customer_id`: -```js +````{tab} meltano.yml +```yaml +stream_maps: + customers: + customer_id: customer_id + __else__: __NULL__ +``` +```` + +````{tab} JSON +```json { "stream_maps": { "customers": { "customer_id": "customer_id", "__else__": null - }, - }, + } + } } ``` +```` ### Unset or modify the stream's primary key behavior To override the stream's default primary key properties, add the `__key_properties__` operation within the stream map definition. -```js +````{tab} meltano.yml +```yaml +stream_maps: + customers: + # Remove the original Customer ID column + customer_id: __NULL__ + # Add a new (and still unique) ID column + customer_id_hashed: md5(customer_id) + # Updated key to reflect the new name + __key_properties__: + - customer_id_hashed +``` +```` + +````{tab} JSON +```json { "stream_maps": { "customers": { - "customer_id": null, // Remove the original Customer ID column - "customer_id_hashed": "md5(customer_id)", // Add a new (and still unique) ID column - "__key_properties__": ["customer_id_hashed"] // Updated key to reflect the new name - }, - }, + "customer_id": null, + "customer_id_hashed": "md5(customer_id)", + "__key_properties__": ["customer_id_hashed"] + } + } } ``` +```` Notes: - To sync the stream as if it did not contain a primary key, simply set `__key_properties__` to `null`. - Key properties _must_ be present in the transformed stream result. Otherwise, an error will be raised. +### Add a property with a string literal value + +Some applications, such as multi-tenant, may benefit from adding a property with a hardcoded string literal value. +These values need to be wrapped in double quotes to differentiate them from property names: + +````{tab} meltano.yml +```yaml +stream_maps: + customers: + a_new_field: '\"client-123\"' +``` +```` + +````{tab} JSON +```json +{ + "stream_maps": { + "customers": { + "a_new_field": "\"client-123\"" + } + } +} +``` +```` + #### Q: What is the difference between `primary_keys` and `key_properties`? **A:** These two are _generally_ identical - and will only differ in cases like the above where `key_properties` is manually @@ -287,15 +394,25 @@ To alias a stream, simply add the operation `"__alias__": "new_name"` to the str definition. For example, to alias the `customers` stream as `customer_v2`, use the following: -```js +````{tab} meltano.yml +```yaml +stream_maps: + customers: + __alias__: customers_v2 +``` +```` + +````{tab} JSON +```json { "stream_maps": { "customers": { "__alias__": "customers_v2" - }, - }, + } + } } ``` +```` ## Duplicating or splitting a stream using `__source__` @@ -303,26 +420,43 @@ To create a new stream as a copy of the original, specify the operation `"__source__": "stream_name"`. For example, you can create a copy of the `customers` stream which only contains PII properties using the following: -```js +````{tab} meltano.yml +```yaml +stream_maps: + customers: + # Exclude these since we're capturing them in the pii stream + email: __NULL__ + full_name: __NULL__ + customers_pii: + __source__: customers + # include just the PII and the customer_id + customer_id: customer_id + email: email + full_name: full_name + # exclude anything not declared + __else__: __NULL__ +``` +```` + +````{tab} JSON +```json { "stream_maps": { "customers": { - // exclude these since we're capturing them in the pii stream "email": null, "full_name": null }, "customers_pii": { "__source__": "customers", - // include just the PII and the customer_id "customer_id": "customer_id", "email": "email", "full_name": "full_name", - // exclude anything not declared - "__else__": null, - }, - }, + "__else__": null + } + } } ``` +```` ## Filtering out records from a stream using `__filter__` operation @@ -331,15 +465,25 @@ The `__filter__` operation accept a string expression which must evaluate to `tr For example, to only include customers with emails from the `example.com` company domain: -```js +````{tab} meltano.yml +```yaml +stream_maps: + customers: + __filter__: email.endswith('@example.com') +``` +```` + +````{tab} JSON +```json { "stream_maps": { "customers": { "__filter__": "email.endswith('@example.com')" } - }, + } } ``` +```` ### Understanding Filters' Affects on Parent-Child Streams diff --git a/docs/testing.md b/docs/testing.md new file mode 100644 index 000000000..bc2dca8c7 --- /dev/null +++ b/docs/testing.md @@ -0,0 +1,139 @@ +# Testing Taps & Targets + +The Meltano SDK includes suites of standard tests for both Taps and Targets to help you get started. +These suites cover most common cases out-of-the-box, and tests are added to the standard suites as new errors are encountered by users in their deployments. + +## Test Framework + +The Meltano SDK test framework consists of 4 main components: + +1. A runner class (`TapTestRunner` and `TargetTestRunner`), responsible for executing Taps/Targets and capturing their output. +1. A suite dataclass, containing a list of tests. +1. A test template classes (`TapTestTemplate`, `StreamTestTemplate`, `AttributeTestTemplate` and `TargetTestTemplate`), with methods to `.setup()`, `.test()`, `.validate()` and `.teardown()` (called in that order using `.run()`). +1. `get_tap_test_class` and `get_target_test_class` factory methods. These wrap a `get_test_class` factory method, which takes a runner and a list of suites and return a `pytest` test class. + +## Example Usage + +If you created your Tap/Target using the provided cookiecutter templates, you will find the following snippets in `tests/test_core.py`. +You will also find a `conftest.py` file containing configuration of the SDK as a `pytest` plugin. +This is required for tests to collect correctly: + +```python +# register the singer_sdk pytest plugin +pytest_plugins = ("singer_sdk.testing.pytest_plugin",) +``` + +### Testing Taps + +```python +import datetime + +from singer_sdk.testing import get_tap_test_class + +from example.tap import TapExample + +SAMPLE_CONFIG = { + "start_date": datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d") +} + + +# Run standard built-in tap tests from the SDK: +TestTapExample = get_tap_test_class( + tap_class=TapExample, + config=SAMPLE_CONFIG +) +``` + +### Testing Targets + +```python +import pytest +from typing import Dict, Any + +from singer_sdk.testing import get_target_test_class + +from example.target import TargetExample + +SAMPLE_CONFIG: Dict[str, Any] = { + # TODO: Initialize minimal target config +} + +# Run standard built-in target tests from the SDK: +StandardTargetTests = get_target_test_class( + target_class=TargetExample, + config=SAMPLE_CONFIG +) + + +class TestTargetExample(StandardTargetTests): + """Standard Target Tests.""" + + @pytest.fixture() + def resource(self): + """Generic external resource. + + This fixture is useful for setup and teardown of external resources, + such output folders, tables, buckets etc. for use during testing. + + Example usage can be found in the SDK samples test suite: + https://github.com/meltano/sdk/tree/main/tests/samples + """ + yield "resource" +``` + +## Configuring Tests + +Test suite behaviors can be configured by passing a `SuiteConfig` instance to the `get_test_class` functions: + +```python +from singer_sdk.testing import SuiteConfig, get_tap_test_class + +from tap_stackexchange.tap import TapStackExchange + +SAMPLE_CONFIG = { + "site": "stackoverflow", + "tags": [ + "meltano", + "singer-io", + ], + "metrics_log_level": "debug", +} + +TEST_SUITE_CONFIG = SuiteConfig( + ignore_no_records_for_streams=["tag_synonyms"] +) + +TestTapStackExchange = get_tap_test_class( + tap_class=TapStackExchange, config=SAMPLE_CONFIG, suite_config=TEST_SUITE_CONFIG +) +``` + +Check out [`singer_sdk/testing/config.py`](https://github.com/meltano/sdk/tree/main/singer_sdk/testing/config.py) for available config options. + +## Writing New Tests + +Writing new tests is as easy as subclassing the appropriate class. +Check out [`singer_sdk/testing/tap_tests.py`](https://github.com/meltano/sdk/tree/main/singer_sdk/testing/tap_tests.py) and [`singer_sdk/testing/target_tests.py`](https://github.com/meltano/sdk/tree/main/singer_sdk/testing/target_tests.py) for inspiration. + +```python +class TapCLIPrintsTest(TapTestTemplate): + "Test that the tap is able to print standard metadata." + name = "cli_prints" + + def test(self): + self.tap.print_version() + self.tap.print_about() + self.tap.print_about(format="json") +``` + +Once you have created some tests, add them to a suite: + +```python +my_custom_tap_tests = TestSuite( + kind="tap", tests=[TapCLIPrintsTest] +) +``` + +This suite can now be passed to `get_tap_test_class` or `get_target_test_class` in a list of `custom_suites` along with any other suites, to generate your custom test class. + +If your new test covers a common or general case, consider contributing to the standard test library via a pull request to [meltano/sdk](https://github.com/meltano/sdk). diff --git a/docs/typing.rst b/docs/typing.rst index 931be0da4..d2b689165 100644 --- a/docs/typing.rst +++ b/docs/typing.rst @@ -1,5 +1,6 @@ -JSON Schema builder +JSON Schema Helpers =================== .. automodule:: singer_sdk.typing + :noindex: :members: diff --git a/e2e-tests/cookiecutters/README.md b/e2e-tests/cookiecutters/README.md new file mode 100644 index 000000000..7adc8522c --- /dev/null +++ b/e2e-tests/cookiecutters/README.md @@ -0,0 +1,17 @@ +# CI for Empty Cookiecutters + +Cookiecutters for taps and targets include two kinds of tests: linting and end-to-end testing with pytest. When a new project is created with the cookiecutter we expect: + +- linting tests should pass +- integration tests may fail (because no integration has been implemented yet) + +To automate creation of cookiecutter test projects, we use a [replay file](https://cookiecutter.readthedocs.io/en/stable/advanced/replay.html) generated by cookiecutter. + +## Running Manually + +Run a test against tap-template cookiecutter against the `tap-rest-api_key-github.json` replay file, execute: + +```bash +bash test_cookiecutter.sh ../../cookiecutter/tap-template ./tap-rest-api_key-github.json +bash test_cookiecutter.sh ../../cookiecutter/target-template ./target-per_record.json +``` diff --git a/e2e-tests/cookiecutters/mapper-base.json b/e2e-tests/cookiecutters/mapper-base.json new file mode 100644 index 000000000..390e8a7ba --- /dev/null +++ b/e2e-tests/cookiecutters/mapper-base.json @@ -0,0 +1,14 @@ +{ + "cookiecutter": { + "name": "MyMapperName", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "mapper_id": "mapper-base", + "library_name": "mapper_base", + "variant": "None (Skip)", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../mapper-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-graphql-jwt.json b/e2e-tests/cookiecutters/tap-graphql-jwt.json new file mode 100644 index 000000000..5daf4ab8f --- /dev/null +++ b/e2e-tests/cookiecutters/tap-graphql-jwt.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "GraphQLJWTTemplateTest", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-graphql-jwt", + "library_name": "tap_graphql_jwt", + "variant": "None (Skip)", + "stream_type": "GraphQL", + "auth_method": "JWT", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-other-custom.json b/e2e-tests/cookiecutters/tap-other-custom.json new file mode 100644 index 000000000..3ea01eaf4 --- /dev/null +++ b/e2e-tests/cookiecutters/tap-other-custom.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-other-custom", + "library_name": "tap_other_custom", + "variant": "None (Skip)", + "stream_type": "Other", + "auth_method": "Custom or N/A", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-rest-api_key-github.json b/e2e-tests/cookiecutters/tap-rest-api_key-github.json new file mode 100644 index 000000000..01570aba8 --- /dev/null +++ b/e2e-tests/cookiecutters/tap-rest-api_key-github.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-rest-api_key-github", + "library_name": "tap_rest_api_key_github", + "variant": "None (Skip)", + "stream_type": "REST", + "auth_method": "API Key", + "include_ci_files": "GitHub", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-rest-basic_auth.json b/e2e-tests/cookiecutters/tap-rest-basic_auth.json new file mode 100644 index 000000000..6c7d7fa19 --- /dev/null +++ b/e2e-tests/cookiecutters/tap-rest-basic_auth.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-rest-basic_auth", + "library_name": "tap_rest_basic_auth", + "variant": "None (Skip)", + "stream_type": "REST", + "auth_method": "Basic Auth", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-rest-bearer_token.json b/e2e-tests/cookiecutters/tap-rest-bearer_token.json new file mode 100644 index 000000000..157457462 --- /dev/null +++ b/e2e-tests/cookiecutters/tap-rest-bearer_token.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-rest-bearer_token", + "library_name": "tap_rest_bearer_token", + "variant": "None (Skip)", + "stream_type": "REST", + "auth_method": "Bearer Token", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-rest-custom.json b/e2e-tests/cookiecutters/tap-rest-custom.json new file mode 100644 index 000000000..831135b7a --- /dev/null +++ b/e2e-tests/cookiecutters/tap-rest-custom.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-rest-custom", + "library_name": "tap_rest_custom", + "variant": "None (Skip)", + "stream_type": "REST", + "auth_method": "Custom or N/A", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-rest-jwt.json b/e2e-tests/cookiecutters/tap-rest-jwt.json new file mode 100644 index 000000000..b46807d49 --- /dev/null +++ b/e2e-tests/cookiecutters/tap-rest-jwt.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-rest-jwt", + "library_name": "tap_rest_jwt", + "variant": "None (Skip)", + "stream_type": "REST", + "auth_method": "JWT", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-rest-oauth2.json b/e2e-tests/cookiecutters/tap-rest-oauth2.json new file mode 100644 index 000000000..4a41b80e3 --- /dev/null +++ b/e2e-tests/cookiecutters/tap-rest-oauth2.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-rest-oauth2", + "library_name": "tap_rest_oauth2", + "variant": "None (Skip)", + "stream_type": "REST", + "auth_method": "OAuth2", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/tap-sql-custom.json b/e2e-tests/cookiecutters/tap-sql-custom.json new file mode 100644 index 000000000..3c5996860 --- /dev/null +++ b/e2e-tests/cookiecutters/tap-sql-custom.json @@ -0,0 +1,16 @@ +{ + "cookiecutter": { + "source_name": "AutomaticTestTap", + "admin_name": "Automatic Tester", + "admin_email": "auto.tester@example.com", + "tap_id": "tap-sql-custom", + "library_name": "tap_sql_custom", + "variant": "None (Skip)", + "stream_type": "SQL", + "auth_method": "Custom or N/A", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "../tap-template/", + "_output_dir": "." + } +} diff --git a/e2e-tests/cookiecutters/target-per_record.json b/e2e-tests/cookiecutters/target-per_record.json new file mode 100644 index 000000000..f5dde1cef --- /dev/null +++ b/e2e-tests/cookiecutters/target-per_record.json @@ -0,0 +1,15 @@ +{ + "cookiecutter": { + "destination_name": "MyDestinationName", + "admin_name": "FirstName LastName", + "admin_email": "firstname.lastname@example.com", + "target_id": "target-per_record", + "library_name": "target_per_record", + "variant": "None (Skip)", + "serialization_method": "Per record", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "./sdk/cookiecutter/target-template", + "_output_dir": "." + } + } diff --git a/e2e-tests/cookiecutters/target-sql.json b/e2e-tests/cookiecutters/target-sql.json new file mode 100644 index 000000000..63691d718 --- /dev/null +++ b/e2e-tests/cookiecutters/target-sql.json @@ -0,0 +1,15 @@ +{ + "cookiecutter": { + "destination_name": "MyDestinationName", + "admin_name": "FirstName LastName", + "admin_email": "firstname.lastname@example.com", + "target_id": "target-sql", + "library_name": "target_sql", + "variant": "None (Skip)", + "serialization_method": "SQL", + "include_ci_files": "None (Skip)", + "license": "Apache-2.0", + "_template": "./sdk/cookiecutter/target-template", + "_output_dir": "." + } + } diff --git a/mypy.ini b/mypy.ini deleted file mode 100644 index a70293c00..000000000 --- a/mypy.ini +++ /dev/null @@ -1,43 +0,0 @@ -[mypy] -python_version = 3.8 -warn_unused_configs = True -warn_return_any = True -exclude = singer_sdk/tests -plugins = sqlalchemy.ext.mypy.plugin - -[mypy-singer.*] -# Library 'pipelinewise-singer-tools' does not have type hints: -# - https://github.com/transferwise/pipelinewise-singer-python/issues/25 -ignore_missing_imports = True - -[mypy-singer_sdk.samples.*] -# Missing symbols for samples is okay -ignore_missing_imports = True - -[mypy-backoff.*] -# Frozen due to pipelinewise-singer-tools dependency -ignore_missing_imports = True - -[mypy-bcrypt.*] -ignore_missing_imports = True - -[mypy-joblib.*] -ignore_missing_imports = True - -[mypy-pyarrow.*] -ignore_missing_imports = True - -[mypy-pandas.*] -ignore_missing_imports = True - -[mypy-jsonschema.*] -ignore_missing_imports = True - -[mypy-jsonpath_ng.*] -ignore_missing_imports = True - -[mypy-samples.*] -ignore_missing_imports = True - -[mypy-sqlalchemy.*] -ignore_missing_imports = True diff --git a/noxfile.py b/noxfile.py index 44d50b586..cbb331faf 100644 --- a/noxfile.py +++ b/noxfile.py @@ -5,6 +5,7 @@ import os import shutil import sys +import tempfile from pathlib import Path from textwrap import dedent @@ -19,15 +20,42 @@ {sys.executable} -m pip install nox-poetry""" raise SystemExit(dedent(message)) from None +RUFF_OVERRIDES = """\ +extend = "./pyproject.toml" +extend-ignore = ["TD002", "TD003", "FIX002"] +""" + +COOKIECUTTER_REPLAY_FILES = list(Path("./e2e-tests/cookiecutters").glob("*.json")) + package = "singer_sdk" -python_versions = ["3.10", "3.9", "3.8", "3.7"] +python_versions = ["3.11", "3.10", "3.9", "3.8", "3.7"] main_python_version = "3.10" locations = "singer_sdk", "tests", "noxfile.py", "docs/conf.py" nox.options.sessions = ( "mypy", "tests", "doctest", + "test_cookiecutter", ) +test_dependencies = [ + "coverage[toml]", + "pytest", + "pytest-snapshot", + "pytest-durations", + "freezegun", + "pandas", + "pyarrow", + "requests-mock", + # Cookiecutter tests + "black", + "cookiecutter", + "PyYAML", + "darglint", + "flake8", + "flake8-annotations", + "flake8-docstrings", + "mypy", +] @session(python=python_versions) @@ -37,9 +65,15 @@ def mypy(session: Session) -> None: session.install(".") session.install( "mypy", + "pytest", + "importlib-resources", "sqlalchemy2-stubs", + "types-jsonschema", "types-python-dateutil", + "types-pytz", "types-requests", + "types-simplejson", + "types-PyYAML", ) session.run("mypy", *args) if not session.posargs: @@ -49,24 +83,16 @@ def mypy(session: Session) -> None: @session(python=python_versions) def tests(session: Session) -> None: """Execute pytest tests and compute coverage.""" - session.install(".") - session.install( - "coverage[toml]", - "pytest", - "freezegun", - "pandas", - "pyarrow", - "requests-mock", - # Cookiecutter tests - "black", - "cookiecutter", - "PyYAML", - "darglint", - "flake8", - "flake8-annotations", - "flake8-docstrings", - "mypy", - ) + session.install(".[s3]") + session.install(*test_dependencies) + + sqlalchemy_version = os.environ.get("SQLALCHEMY_VERSION") + if sqlalchemy_version: + # Bypass nox-poetry use of --constraint so we can install a version of + # SQLAlchemy that doesn't match what's in poetry.lock. + session.poetry.session.install( # type: ignore[attr-defined] + f"sqlalchemy=={sqlalchemy_version}", + ) try: session.run( @@ -75,8 +101,8 @@ def tests(session: Session) -> None: "--parallel", "-m", "pytest", - "-x", "-v", + "--durations=10", *session.posargs, ) finally: @@ -84,6 +110,16 @@ def tests(session: Session) -> None: session.notify("coverage", posargs=[]) +@session(python=main_python_version) +def update_snapshots(session: Session) -> None: + """Update pytest snapshots.""" + args = session.posargs or ["-m", "snapshot"] + + session.install(".") + session.install(*test_dependencies) + session.run("pytest", "--snapshot-update", *args) + + @session(python=python_versions) def doctest(session: Session) -> None: """Run examples with xdoctest.""" @@ -148,3 +184,67 @@ def docs_serve(session: Session) -> None: shutil.rmtree(build_dir) session.run("sphinx-autobuild", *args) + + +@nox.parametrize("replay_file_path", COOKIECUTTER_REPLAY_FILES) +@session(python=main_python_version) +def test_cookiecutter(session: Session, replay_file_path) -> None: + """Uses the tap template to build an empty cookiecutter. + + Runs the lint task on the created test project. + """ + cc_build_path = tempfile.gettempdir() + folder_base_path = "./cookiecutter" + + if Path(replay_file_path).name.startswith("tap"): + folder = "tap-template" + elif Path(replay_file_path).name.startswith("target"): + folder = "target-template" + else: + folder = "mapper-template" + template = Path(folder_base_path + "/" + folder).resolve() + replay_file = Path(replay_file_path).resolve() + + if not Path(template).exists(): + return + + if not Path(replay_file).is_file(): + return + + sdk_dir = Path(Path(template).parent).parent + cc_output_dir = Path(replay_file_path).name.replace(".json", "") + cc_test_output = cc_build_path + "/" + cc_output_dir + + if Path(cc_test_output).exists(): + session.run("rm", "-fr", cc_test_output, external=True) + + session.install(".") + session.install("cookiecutter", "pythonsed") + + session.run( + "cookiecutter", + "--replay-file", + str(replay_file), + str(template), + "-o", + cc_build_path, + ) + session.chdir(cc_test_output) + + with Path("ruff.toml").open("w") as ruff_toml: + ruff_toml.write(RUFF_OVERRIDES) + + session.run( + "pythonsed", + "-i.bak", + 's|singer-sdk =.*|singer-sdk = \\{ path = "' + + str(sdk_dir) + + '", develop = true \\}|', + "pyproject.toml", + ) + session.run("poetry", "lock", external=True) + session.run("poetry", "install", external=True) + + session.run("git", "init", external=True) + session.run("git", "add", ".", external=True) + session.run("pre-commit", "run", "--all-files", external=True) diff --git a/poetry.lock b/poetry.lock index a9d5952f5..fe68114e5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,151 +1,417 @@ +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. + [[package]] name = "alabaster" -version = "0.7.12" +version = "0.7.13" description = "A configurable sidebar-enabled Sphinx theme" -category = "main" optional = true +python-versions = ">=3.6" +files = [ + {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, + {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, +] + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + +[[package]] +name = "argcomplete" +version = "3.0.8" +description = "Bash tab completion for argparse" +optional = false +python-versions = ">=3.6" +files = [ + {file = "argcomplete-3.0.8-py3-none-any.whl", hash = "sha256:e36fd646839933cbec7941c662ecb65338248667358dd3d968405a4506a60d9b"}, + {file = "argcomplete-3.0.8.tar.gz", hash = "sha256:b9ca96448e14fa459d7450a4ab5a22bbf9cee4ba7adddf03e65c398b5daeea28"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=0.23,<7", markers = "python_version == \"3.7\""} + +[package.extras] +test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] [[package]] name = "arrow" -version = "1.2.1" +version = "1.2.3" description = "Better dates & times for Python" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "arrow-1.2.3-py3-none-any.whl", hash = "sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2"}, + {file = "arrow-1.2.3.tar.gz", hash = "sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1"}, +] [package.dependencies] python-dateutil = ">=2.7.0" typing-extensions = {version = "*", markers = "python_version < \"3.8\""} -[[package]] -name = "atomicwrites" -version = "1.4.0" -description = "Atomic file writes." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - [[package]] name = "attrs" -version = "21.4.0" +version = "23.1.0" description = "Classes Without Boilerplate" -category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] [[package]] name = "babel" -version = "2.9.1" +version = "2.12.1" description = "Internationalization utilities" -category = "main" optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.7" +files = [ + {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, + {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"}, +] [package.dependencies] -pytz = ">=2015.7" +pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} [[package]] name = "backoff" -version = "1.8.0" +version = "2.2.1" description = "Function decoration for backoff and retry" -category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.2" +description = "Screen-scraping library" +optional = true +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, + {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +html5lib = ["html5lib"] +lxml = ["lxml"] [[package]] name = "binaryornot" version = "0.4.4" description = "Ultra-lightweight pure Python package to check if a file is binary or text." -category = "dev" optional = false python-versions = "*" +files = [ + {file = "binaryornot-0.4.4-py2.py3-none-any.whl", hash = "sha256:b8b71173c917bddcd2c16070412e369c3ed7f0528926f70cac18a6c97fd563e4"}, + {file = "binaryornot-0.4.4.tar.gz", hash = "sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061"}, +] [package.dependencies] chardet = ">=3.0.2" [[package]] name = "black" -version = "22.6.0" +version = "23.3.0" description = "The uncompromising code formatter." -category = "dev" optional = false -python-versions = ">=3.6.2" +python-versions = ">=3.7" +files = [ + {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"}, + {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"}, + {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"}, + {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"}, + {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"}, + {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"}, + {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"}, + {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"}, + {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"}, + {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"}, + {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"}, + {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"}, + {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"}, + {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"}, +] [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" +packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} [package.extras] -uvloop = ["uvloop (>=0.15.2)"] -jupyter = ["tokenize-rt (>=3.2.0)", "ipython (>=7.8.0)"] -d = ["aiohttp (>=3.7.4)"] colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "boto3" +version = "1.26.157" +description = "The AWS SDK for Python" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "boto3-1.26.157-py3-none-any.whl", hash = "sha256:718b236aafc3f106d17cd5c4f513fc2f40bfa995c0cb730ecc893e9c808c0385"}, + {file = "boto3-1.26.157.tar.gz", hash = "sha256:7a8117dfe9ba1f203d73b3df32a4ebdb895813189635f126fa256e1dea37ee8d"}, +] + +[package.dependencies] +botocore = ">=1.29.157,<1.30.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.6.0,<0.7.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.29.157" +description = "Low-level, data-driven core of boto 3." +optional = true +python-versions = ">= 3.7" +files = [ + {file = "botocore-1.29.157-py3-none-any.whl", hash = "sha256:ccbf948c040d68b6a22570e73dd63cb3b07ce33f4032e9b1d502d2fae55c3b80"}, + {file = "botocore-1.29.157.tar.gz", hash = "sha256:af2a7b6417bf3bbf00ab22aa61a2d7d839a8a8a62e7975c18c80c55c88dc7fcf"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = ">=1.25.4,<1.27" + +[package.extras] +crt = ["awscrt (==0.16.9)"] [[package]] name = "certifi" -version = "2021.10.8" +version = "2023.5.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false -python-versions = "*" +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, + {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, +] [[package]] name = "cffi" -version = "1.15.0" +version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] [package.dependencies] pycparser = "*" [[package]] name = "chardet" -version = "4.0.0" -description = "Universal encoding detector for Python 2 and 3" -category = "dev" +version = "5.1.0" +description = "Universal encoding detector for Python 3" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" +files = [ + {file = "chardet-5.1.0-py3-none-any.whl", hash = "sha256:362777fb014af596ad31334fde1e8c327dfdb076e1960d1694662d46a6917ab9"}, + {file = "chardet-5.1.0.tar.gz", hash = "sha256:0d62712b956bc154f85fb0a266e2a3c5913c2967e00348701b32411d6def31e5"}, +] [[package]] name = "charset-normalizer" -version = "2.0.10" +version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" -optional = false -python-versions = ">=3.5.0" - -[package.extras] -unicode_backport = ["unicodedata2"] - -[[package]] -name = "ciso8601" -version = "2.2.0" -description = "Fast ISO8601 date time parser for Python written in C" -category = "main" optional = false -python-versions = "*" +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, +] [[package]] name = "click" -version = "8.1.3" +version = "8.1.7" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -153,36 +419,148 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" -version = "0.4.4" +version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "commitizen" +version = "3.4.0" +description = "Python commitizen client tool" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "commitizen-3.4.0-py3-none-any.whl", hash = "sha256:5c58052099a6512da66a893f09e98e1f0d94ed02720a4e8d5747d4d409d59cfb"}, + {file = "commitizen-3.4.0.tar.gz", hash = "sha256:ab17db8c4f7258d9cdcc620046aa63d2139756ef78b2174cfa9f9c5e383eaf27"}, +] + +[package.dependencies] +argcomplete = ">=1.12.1,<3.1" +charset-normalizer = ">=2.1.0,<4" +colorama = ">=0.4.1,<0.5.0" +decli = ">=0.6.0,<0.7.0" +importlib_metadata = ">=4.13,<7" +jinja2 = ">=2.10.3" +packaging = ">=19" +pyyaml = ">=3.08" +questionary = ">=1.4.0,<2.0.0" +termcolor = ">=1.1,<3" +tomlkit = ">=0.5.3,<1.0.0" +typing-extensions = {version = ">=4.0.1,<5.0.0", markers = "python_version < \"3.8\""} + +[[package]] +name = "commitizen-version-bump" +version = "0.1.0" +description = "Commitizen customized for Meltano projects (https://commitizen-tools.github.io/commitizen/customization)" +optional = false +python-versions = "^3.7" +files = [] +develop = false + +[package.dependencies] +commitizen = ">=3.0.0,<4.0.0" +PyGithub = "^1.57" + +[package.source] +type = "git" +url = "https://github.com/meltano/commitizen-version-bump.git" +reference = "main" +resolved_reference = "e2e6d5d13d39eae1f37e3a275c0d3d3e38c18439" [[package]] name = "cookiecutter" -version = "2.1.1" +version = "2.3.0" description = "A command-line utility that creates projects from project templates, e.g. creating a Python package project from a Python package project template." -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "cookiecutter-2.3.0-py3-none-any.whl", hash = "sha256:7e87944757c6e9f8729cf89a4139b6a35ab4d6dcbc6ae3e7d6360d44ad3ad383"}, + {file = "cookiecutter-2.3.0.tar.gz", hash = "sha256:942a794981747f6d7f439d6e49d39dc91a9a641283614160c93c474c72c29621"}, +] [package.dependencies] +arrow = "*" binaryornot = ">=0.4.4" click = ">=7.0,<9.0.0" Jinja2 = ">=2.7,<4.0.0" -jinja2-time = ">=0.2.0" python-slugify = ">=4.0.0" pyyaml = ">=5.3.1" requests = ">=2.23.0" +rich = "*" [[package]] name = "coverage" -version = "6.4.4" +version = "7.2.7" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "coverage-7.2.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8"}, + {file = "coverage-7.2.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495"}, + {file = "coverage-7.2.7-cp310-cp310-win32.whl", hash = "sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818"}, + {file = "coverage-7.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a"}, + {file = "coverage-7.2.7-cp311-cp311-win32.whl", hash = "sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a"}, + {file = "coverage-7.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562"}, + {file = "coverage-7.2.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d"}, + {file = "coverage-7.2.7-cp312-cp312-win32.whl", hash = "sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511"}, + {file = "coverage-7.2.7-cp312-cp312-win_amd64.whl", hash = "sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3"}, + {file = "coverage-7.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02"}, + {file = "coverage-7.2.7-cp37-cp37m-win32.whl", hash = "sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f"}, + {file = "coverage-7.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f"}, + {file = "coverage-7.2.7-cp38-cp38-win32.whl", hash = "sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e"}, + {file = "coverage-7.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2"}, + {file = "coverage-7.2.7-cp39-cp39-win32.whl", hash = "sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb"}, + {file = "coverage-7.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27"}, + {file = "coverage-7.2.7-pp37.pp38.pp39-none-any.whl", hash = "sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d"}, + {file = "coverage-7.2.7.tar.gz", hash = "sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59"}, +] [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} @@ -192,54 +570,134 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "37.0.4" +version = "41.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"}, + {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"}, + {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"}, + {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"}, + {file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"}, + {file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"}, + {file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"}, +] [package.dependencies] cffi = ">=1.12" [package.extras] -docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] -docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] -pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] -sdist = ["setuptools_rust (>=0.11.4)"] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +nox = ["nox"] +pep8test = ["black", "check-sdist", "mypy", "ruff"] +sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"] +test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] [[package]] name = "darglint" version = "1.8.1" description = "A utility for ensuring Google-style docstrings stay up to date with the source code." -category = "dev" optional = false python-versions = ">=3.6,<4.0" +files = [ + {file = "darglint-1.8.1-py3-none-any.whl", hash = "sha256:5ae11c259c17b0701618a20c3da343a3eb98b3bc4b5a83d31cdd94f5ebdced8d"}, + {file = "darglint-1.8.1.tar.gz", hash = "sha256:080d5106df149b199822e7ee7deb9c012b49891538f14a11be681044f0bb20da"}, +] + +[[package]] +name = "decli" +version = "0.6.1" +description = "Minimal, easy-to-use, declarative cli tool" +optional = false +python-versions = ">=3.7" +files = [ + {file = "decli-0.6.1-py3-none-any.whl", hash = "sha256:7815ac58617764e1a200d7cadac6315fcaacc24d727d182f9878dd6378ccf869"}, + {file = "decli-0.6.1.tar.gz", hash = "sha256:ed88ccb947701e8e5509b7945fda56e150e2ac74a69f25d47ac85ef30ab0c0f0"}, +] [[package]] name = "decorator" version = "5.1.1" description = "Decorators for Humans" -category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "docutils" -version = "0.16" +version = "0.19" description = "Docutils -- Python Documentation Utilities" -category = "main" optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" +files = [ + {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, + {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.1.1" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, +] + +[package.extras] +test = ["pytest (>=6)"] [[package]] name = "flake8" version = "3.9.2" description = "the modular source code checker: pep8 pyflakes and co" -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"}, + {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, +] [package.dependencies] importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} @@ -251,9 +709,12 @@ pyflakes = ">=2.3.0,<2.4.0" name = "flake8-annotations" version = "2.9.1" description = "Flake8 Type Annotation Checks" -category = "dev" optional = false python-versions = ">=3.7,<4.0" +files = [ + {file = "flake8-annotations-2.9.1.tar.gz", hash = "sha256:11f09efb99ae63c8f9d6b492b75fe147fbc323179fddfe00b2e56eefeca42f57"}, + {file = "flake8_annotations-2.9.1-py3-none-any.whl", hash = "sha256:a4385158a7a9fc8af1d8820a2f4c8d03387997006a83f5f8bfe5bc6085bdf88a"}, +] [package.dependencies] attrs = ">=21.4" @@ -262,11 +723,14 @@ typed-ast = {version = ">=1.4,<2.0", markers = "python_version < \"3.8\""} [[package]] name = "flake8-docstrings" -version = "1.6.0" +version = "1.7.0" description = "Extension for flake8 which uses pydocstyle to check docstrings" -category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.7" +files = [ + {file = "flake8_docstrings-1.7.0-py2.py3-none-any.whl", hash = "sha256:51f2344026da083fc084166a9353f5082b01f72901df422f74b4d953ae88ac75"}, + {file = "flake8_docstrings-1.7.0.tar.gz", hash = "sha256:4c8cc748dc16e6869728699e5d0d685da9a10b0ea718e090b1ba088e67a941af"}, +] [package.dependencies] flake8 = ">=3" @@ -276,80 +740,237 @@ pydocstyle = ">=2.1" name = "freezegun" version = "1.2.2" description = "Let your Python tests travel through time" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "freezegun-1.2.2-py3-none-any.whl", hash = "sha256:ea1b963b993cb9ea195adbd893a48d573fda951b0da64f60883d7e988b606c9f"}, + {file = "freezegun-1.2.2.tar.gz", hash = "sha256:cd22d1ba06941384410cd967d8a99d5ae2442f57dfafeff2fda5de8dc5c05446"}, +] [package.dependencies] python-dateutil = ">=2.7" +[[package]] +name = "fs" +version = "2.4.16" +description = "Python's filesystem abstraction layer" +optional = false +python-versions = "*" +files = [ + {file = "fs-2.4.16-py2.py3-none-any.whl", hash = "sha256:660064febbccda264ae0b6bace80a8d1be9e089e0a5eb2427b7d517f9a91545c"}, + {file = "fs-2.4.16.tar.gz", hash = "sha256:ae97c7d51213f4b70b6a958292530289090de3a7e15841e108fbe144f069d313"}, +] + +[package.dependencies] +appdirs = ">=1.4.3,<1.5.0" +setuptools = "*" +six = ">=1.10,<2.0" + +[package.extras] +scandir = ["scandir (>=1.5,<2.0)"] + +[[package]] +name = "fs-s3fs" +version = "1.1.1" +description = "Amazon S3 filesystem for PyFilesystem2" +optional = true +python-versions = "*" +files = [ + {file = "fs-s3fs-1.1.1.tar.gz", hash = "sha256:b57f8c7664460ff7b451b4b44ca2ea9623a374d74e1284c2d5e6df499dc7976c"}, + {file = "fs_s3fs-1.1.1-py2.py3-none-any.whl", hash = "sha256:9ba160eaa93390cc5992a857675666cb2fbb3721b872474dfdc659a715c39280"}, +] + +[package.dependencies] +boto3 = ">=1.9,<2.0" +fs = ">=2.4,<3.0" +six = ">=1.10,<2.0" + +[[package]] +name = "furo" +version = "2023.3.27" +description = "A clean customisable Sphinx documentation theme." +optional = true +python-versions = ">=3.7" +files = [ + {file = "furo-2023.3.27-py3-none-any.whl", hash = "sha256:4ab2be254a2d5e52792d0ca793a12c35582dd09897228a6dd47885dabd5c9521"}, + {file = "furo-2023.3.27.tar.gz", hash = "sha256:b99e7867a5cc833b2b34d7230631dd6558c7a29f93071fdbb5709634bb33c5a5"}, +] + +[package.dependencies] +beautifulsoup4 = "*" +pygments = ">=2.7" +sphinx = ">=5.0,<7.0" +sphinx-basic-ng = "*" + [[package]] name = "greenlet" -version = "1.1.2" +version = "2.0.2" description = "Lightweight in-process concurrent programming" -category = "main" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +files = [ + {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, + {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, + {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, + {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, + {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, + {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, + {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, + {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, + {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, + {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, + {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, + {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, + {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, + {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, + {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, + {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, + {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, + {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, + {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, + {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, + {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, + {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, +] [package.extras] -docs = ["sphinx"] +docs = ["Sphinx", "docutils (<0.18)"] +test = ["objgraph", "psutil"] [[package]] name = "idna" -version = "3.3" +version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] [[package]] name = "imagesize" -version = "1.3.0" +version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, + {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, +] [[package]] name = "importlib-metadata" -version = "4.12.0" +version = "4.13.0" description = "Read metadata from Python packages" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-4.13.0-py3-none-any.whl", hash = "sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116"}, + {file = "importlib_metadata-4.13.0.tar.gz", hash = "sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d"}, +] [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] + +[[package]] +name = "importlib-resources" +version = "5.12.0" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, + {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [[package]] name = "inflection" version = "0.5.1" description = "A port of Ruby on Rails inflector to Python" -category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, + {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, +] [[package]] name = "iniconfig" -version = "1.1.1" -description = "iniconfig: brain-dead simple config-ini parsing" -category = "dev" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" optional = false -python-versions = "*" +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] [[package]] name = "jinja2" -version = "3.0.3" +version = "3.1.2" description = "A very fast and expressive template engine." -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] [package.dependencies] MarkupSafe = ">=2.0" @@ -358,32 +979,38 @@ MarkupSafe = ">=2.0" i18n = ["Babel (>=2.7)"] [[package]] -name = "jinja2-time" -version = "0.2.0" -description = "Jinja2 Extension for Dates and Times" -category = "dev" -optional = false -python-versions = "*" - -[package.dependencies] -arrow = "*" -jinja2 = "*" +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = true +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] [[package]] name = "joblib" -version = "1.1.0" +version = "1.3.2" description = "Lightweight pipelining with Python functions" -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"}, + {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"}, +] [[package]] name = "jsonpath-ng" version = "1.5.3" description = "A final implementation of JSONPath for Python that aims to be standard compliant, including arithmetic and binary comparison operators and providing clear AST for metaprogramming." -category = "main" optional = false python-versions = "*" +files = [ + {file = "jsonpath-ng-1.5.3.tar.gz", hash = "sha256:a273b182a82c1256daab86a313b937059261b5c5f8c4fa3fc38b882b344dd567"}, + {file = "jsonpath_ng-1.5.3-py2-none-any.whl", hash = "sha256:f75b95dbecb8a0f3b86fd2ead21c2b022c3f5770957492b9b6196ecccfeb10aa"}, + {file = "jsonpath_ng-1.5.3-py3-none-any.whl", hash = "sha256:292a93569d74029ba75ac2dc3d3630fc0e17b2df26119a165fa1d498ca47bf65"}, +] [package.dependencies] decorator = "*" @@ -392,29 +1019,37 @@ six = "*" [[package]] name = "jsonschema" -version = "3.2.0" +version = "4.17.3" description = "An implementation of JSON Schema validation for Python" -category = "main" optional = false -python-versions = "*" +python-versions = ">=3.7" +files = [ + {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, + {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, +] [package.dependencies] attrs = ">=17.4.0" importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} -pyrsistent = ">=0.14.0" -six = ">=1.11.0" +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} +pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -format_nongpl = ["rfc3339-validator", "rfc3986-validator (>0.1.0)", "webcolors", "jsonpointer (>1.13)", "idna"] -format = ["webcolors", "strict-rfc3339", "rfc3987", "jsonpointer (>1.13)", "idna"] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] [[package]] name = "livereload" version = "2.6.3" description = "Python LiveReload is an awesome tool for web developers" -category = "main" optional = true python-versions = "*" +files = [ + {file = "livereload-2.6.3-py2.py3-none-any.whl", hash = "sha256:ad4ac6f53b2d62bb6ce1a5e6e96f1f00976a32348afedcb4b6d68df2a1d346e4"}, + {file = "livereload-2.6.3.tar.gz", hash = "sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869"}, +] [package.dependencies] six = "*" @@ -422,230 +1057,462 @@ tornado = {version = "*", markers = "python_version > \"2.7\""} [[package]] name = "markdown-it-py" -version = "1.1.0" +version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" -category = "main" -optional = true -python-versions = "~=3.6" +optional = false +python-versions = ">=3.7" +files = [ + {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, + {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, +] [package.dependencies] -attrs = ">=19,<22" -typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} +mdurl = ">=0.1,<1.0" +typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [package.extras] -testing = ["pytest-regressions", "pytest-cov", "pytest-benchmark (>=3.2,<4.0)", "pytest (>=3.6,<4)", "psutil", "coverage"] -rtd = ["sphinx-book-theme", "sphinx-panels (>=0.4.0,<0.5.0)", "sphinx-copybutton", "sphinx (>=2,<4)", "pyyaml", "myst-nb (==0.13.0a1)"] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] plugins = ["mdit-py-plugins"] -linkify = ["linkify-it-py (>=1.0,<2.0)"] -compare = ["panflute (>=1.12,<2.0)", "mistune (>=0.8.4,<0.9.0)", "mistletoe-ebp (>=0.10.0,<0.11.0)", "markdown (>=3.2.2,<3.3.0)", "commonmark (>=0.9.1,<0.10.0)"] -code_style = ["pre-commit (==2.6)"] +profiling = ["gprof2dot"] +rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "2.0.1" +version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] [[package]] name = "mccabe" version = "0.6.1" description = "McCabe checker, plugin for flake8" -category = "dev" optional = false python-versions = "*" +files = [ + {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, + {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, +] [[package]] name = "mdit-py-plugins" -version = "0.3.0" +version = "0.3.5" description = "Collection of plugins for markdown-it-py" -category = "main" optional = true -python-versions = "~=3.6" +python-versions = ">=3.7" +files = [ + {file = "mdit-py-plugins-0.3.5.tar.gz", hash = "sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a"}, + {file = "mdit_py_plugins-0.3.5-py3-none-any.whl", hash = "sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e"}, +] [package.dependencies] markdown-it-py = ">=1.0.0,<3.0.0" [package.extras] -testing = ["pytest-regressions", "pytest-cov", "pytest (>=3.6,<4)", "coverage"] -rtd = ["sphinx-book-theme (>=0.1.0,<0.2.0)", "myst-parser (>=0.14.0,<0.15.0)"] -code_style = ["pre-commit (==2.6)"] +code-style = ["pre-commit"] +rtd = ["attrs", "myst-parser (>=0.16.1,<0.17.0)", "sphinx-book-theme (>=0.1.0,<0.2.0)"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] [[package]] name = "memoization" version = "0.4.0" description = "A powerful caching library for Python, with TTL support and multiple algorithm options. (https://github.com/lonelyenvoy/python-memoization)" -category = "main" optional = false python-versions = ">=3, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" +files = [ + {file = "memoization-0.4.0.tar.gz", hash = "sha256:fde5e7cd060ef45b135e0310cfec17b2029dc472ccb5bbbbb42a503d4538a135"}, +] [[package]] name = "mypy" -version = "0.971" +version = "1.4.1" description = "Optional static typing for Python" -category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "mypy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8"}, + {file = "mypy-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878"}, + {file = "mypy-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd"}, + {file = "mypy-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc"}, + {file = "mypy-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1"}, + {file = "mypy-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462"}, + {file = "mypy-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258"}, + {file = "mypy-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2"}, + {file = "mypy-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7"}, + {file = "mypy-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01"}, + {file = "mypy-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b"}, + {file = "mypy-1.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b"}, + {file = "mypy-1.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7"}, + {file = "mypy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9"}, + {file = "mypy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042"}, + {file = "mypy-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3"}, + {file = "mypy-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6"}, + {file = "mypy-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f"}, + {file = "mypy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc"}, + {file = "mypy-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828"}, + {file = "mypy-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3"}, + {file = "mypy-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816"}, + {file = "mypy-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c"}, + {file = "mypy-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f"}, + {file = "mypy-1.4.1-py3-none-any.whl", hash = "sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4"}, + {file = "mypy-1.4.1.tar.gz", hash = "sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b"}, +] [package.dependencies] -mypy-extensions = ">=0.4.3" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} -typing-extensions = ">=3.10" +typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "0.4.3" -description = "Experimental type system extensions for programs checked with the mypy typechecker." -category = "dev" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." optional = false -python-versions = "*" +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] [[package]] name = "myst-parser" -version = "0.18.0" -description = "An extended commonmark compliant parser, with bridges to docutils & sphinx." -category = "main" +version = "1.0.0" +description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser," optional = true python-versions = ">=3.7" +files = [ + {file = "myst-parser-1.0.0.tar.gz", hash = "sha256:502845659313099542bd38a2ae62f01360e7dd4b1310f025dd014dfc0439cdae"}, + {file = "myst_parser-1.0.0-py3-none-any.whl", hash = "sha256:69fb40a586c6fa68995e6521ac0a525793935db7e724ca9bac1d33be51be9a4c"}, +] [package.dependencies] -docutils = ">=0.15,<0.19" +docutils = ">=0.15,<0.20" jinja2 = "*" markdown-it-py = ">=1.0.0,<3.0.0" -mdit-py-plugins = ">=0.3.0,<0.4.0" +mdit-py-plugins = ">=0.3.4,<0.4.0" pyyaml = "*" -sphinx = ">=4,<6" -typing-extensions = "*" +sphinx = ">=5,<7" +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -testing = ["sphinx-pytest", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "pytest-cov", "pytest (>=6,<7)", "coverage", "beautifulsoup4"] -rtd = ["sphinxext-opengraph (>=0.6.3,<0.7.0)", "sphinxcontrib.mermaid (>=0.7.1,<0.8.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)", "sphinx-design", "sphinx-book-theme", "ipython"] +code-style = ["pre-commit (>=3.0,<4.0)"] linkify = ["linkify-it-py (>=1.0,<2.0)"] -code_style = ["pre-commit (>=2.12,<3.0)"] +rtd = ["ipython", "pydata-sphinx-theme (==v0.13.0rc4)", "sphinx-autodoc2 (>=0.4.2,<0.5.0)", "sphinx-book-theme (==1.0.0rc2)", "sphinx-copybutton", "sphinx-design2", "sphinx-pyscript", "sphinx-tippy (>=0.3.1)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.7.5,<0.8.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"] +testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=7,<8)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "sphinx-pytest"] +testing-docutils = ["pygments", "pytest (>=7,<8)", "pytest-param-files (>=0.3.4,<0.4.0)"] [[package]] name = "numpy" version = "1.21.6" description = "NumPy is the fundamental package for array computing with Python." -category = "dev" optional = false python-versions = ">=3.7,<3.11" +files = [ + {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8737609c3bbdd48e380d463134a35ffad3b22dc56295eff6f79fd85bd0eeeb25"}, + {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fdffbfb6832cd0b300995a2b08b8f6fa9f6e856d562800fea9182316d99c4e8e"}, + {file = "numpy-1.21.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3820724272f9913b597ccd13a467cc492a0da6b05df26ea09e78b171a0bb9da6"}, + {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f17e562de9edf691a42ddb1eb4a5541c20dd3f9e65b09ded2beb0799c0cf29bb"}, + {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f30427731561ce75d7048ac254dbe47a2ba576229250fb60f0fb74db96501a1"}, + {file = "numpy-1.21.6-cp310-cp310-win32.whl", hash = "sha256:d4bf4d43077db55589ffc9009c0ba0a94fa4908b9586d6ccce2e0b164c86303c"}, + {file = "numpy-1.21.6-cp310-cp310-win_amd64.whl", hash = "sha256:d136337ae3cc69aa5e447e78d8e1514be8c3ec9b54264e680cf0b4bd9011574f"}, + {file = "numpy-1.21.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6aaf96c7f8cebc220cdfc03f1d5a31952f027dda050e5a703a0d1c396075e3e7"}, + {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:67c261d6c0a9981820c3a149d255a76918278a6b03b6a036800359aba1256d46"}, + {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a6be4cb0ef3b8c9250c19cc122267263093eee7edd4e3fa75395dfda8c17a8e2"}, + {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c4068a8c44014b2d55f3c3f574c376b2494ca9cc73d2f1bd692382b6dffe3db"}, + {file = "numpy-1.21.6-cp37-cp37m-win32.whl", hash = "sha256:7c7e5fa88d9ff656e067876e4736379cc962d185d5cd808014a8a928d529ef4e"}, + {file = "numpy-1.21.6-cp37-cp37m-win_amd64.whl", hash = "sha256:bcb238c9c96c00d3085b264e5c1a1207672577b93fa666c3b14a45240b14123a"}, + {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:82691fda7c3f77c90e62da69ae60b5ac08e87e775b09813559f8901a88266552"}, + {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:643843bcc1c50526b3a71cd2ee561cf0d8773f062c8cbaf9ffac9fdf573f83ab"}, + {file = "numpy-1.21.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:357768c2e4451ac241465157a3e929b265dfac85d9214074985b1786244f2ef3"}, + {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f411b2c3f3d76bba0865b35a425157c5dcf54937f82bbeb3d3c180789dd66a6"}, + {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4aa48afdce4660b0076a00d80afa54e8a97cd49f457d68a4342d188a09451c1a"}, + {file = "numpy-1.21.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a96eef20f639e6a97d23e57dd0c1b1069a7b4fd7027482a4c5c451cd7732f4"}, + {file = "numpy-1.21.6-cp38-cp38-win32.whl", hash = "sha256:5c3c8def4230e1b959671eb959083661b4a0d2e9af93ee339c7dada6759a9470"}, + {file = "numpy-1.21.6-cp38-cp38-win_amd64.whl", hash = "sha256:bf2ec4b75d0e9356edea834d1de42b31fe11f726a81dfb2c2112bc1eaa508fcf"}, + {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4391bd07606be175aafd267ef9bea87cf1b8210c787666ce82073b05f202add1"}, + {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67f21981ba2f9d7ba9ade60c9e8cbaa8cf8e9ae51673934480e45cf55e953673"}, + {file = "numpy-1.21.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee5ec40fdd06d62fe5d4084bef4fd50fd4bb6bfd2bf519365f569dc470163ab0"}, + {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1dbe1c91269f880e364526649a52eff93ac30035507ae980d2fed33aaee633ac"}, + {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d9caa9d5e682102453d96a0ee10c7241b72859b01a941a397fd965f23b3e016b"}, + {file = "numpy-1.21.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58459d3bad03343ac4b1b42ed14d571b8743dc80ccbf27444f266729df1d6f5b"}, + {file = "numpy-1.21.6-cp39-cp39-win32.whl", hash = "sha256:7f5ae4f304257569ef3b948810816bc87c9146e8c446053539947eedeaa32786"}, + {file = "numpy-1.21.6-cp39-cp39-win_amd64.whl", hash = "sha256:e31f0bb5928b793169b87e3d1e070f2342b22d5245c755e2b81caa29756246c3"}, + {file = "numpy-1.21.6-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd1c8f6bd65d07d3810b90d02eba7997e32abbdf1277a481d698969e921a3be0"}, + {file = "numpy-1.21.6.zip", hash = "sha256:ecb55251139706669fdec2ff073c98ef8e9a84473e51e716211b41aa0f18e656"}, +] [[package]] -name = "objprint" -version = "0.2.0" -description = "A library that can print Python objects in human readable format" -category = "dev" -optional = false -python-versions = ">=3.6" +name = "numpy" +version = "1.24.3" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3c1104d3c036fb81ab923f507536daedc718d0ad5a8707c6061cdfd6d184e570"}, + {file = "numpy-1.24.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:202de8f38fc4a45a3eea4b63e2f376e5f2dc64ef0fa692838e31a808520efaf7"}, + {file = "numpy-1.24.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8535303847b89aa6b0f00aa1dc62867b5a32923e4d1681a35b5eef2d9591a463"}, + {file = "numpy-1.24.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d926b52ba1367f9acb76b0df6ed21f0b16a1ad87c6720a1121674e5cf63e2b6"}, + {file = "numpy-1.24.3-cp310-cp310-win32.whl", hash = "sha256:f21c442fdd2805e91799fbe044a7b999b8571bb0ab0f7850d0cb9641a687092b"}, + {file = "numpy-1.24.3-cp310-cp310-win_amd64.whl", hash = "sha256:ab5f23af8c16022663a652d3b25dcdc272ac3f83c3af4c02eb8b824e6b3ab9d7"}, + {file = "numpy-1.24.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9a7721ec204d3a237225db3e194c25268faf92e19338a35f3a224469cb6039a3"}, + {file = "numpy-1.24.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d6cc757de514c00b24ae8cf5c876af2a7c3df189028d68c0cb4eaa9cd5afc2bf"}, + {file = "numpy-1.24.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76e3f4e85fc5d4fd311f6e9b794d0c00e7002ec122be271f2019d63376f1d385"}, + {file = "numpy-1.24.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1d3c026f57ceaad42f8231305d4653d5f05dc6332a730ae5c0bea3513de0950"}, + {file = "numpy-1.24.3-cp311-cp311-win32.whl", hash = "sha256:c91c4afd8abc3908e00a44b2672718905b8611503f7ff87390cc0ac3423fb096"}, + {file = "numpy-1.24.3-cp311-cp311-win_amd64.whl", hash = "sha256:5342cf6aad47943286afa6f1609cad9b4266a05e7f2ec408e2cf7aea7ff69d80"}, + {file = "numpy-1.24.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7776ea65423ca6a15255ba1872d82d207bd1e09f6d0894ee4a64678dd2204078"}, + {file = "numpy-1.24.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ae8d0be48d1b6ed82588934aaaa179875e7dc4f3d84da18d7eae6eb3f06c242c"}, + {file = "numpy-1.24.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecde0f8adef7dfdec993fd54b0f78183051b6580f606111a6d789cd14c61ea0c"}, + {file = "numpy-1.24.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4749e053a29364d3452c034827102ee100986903263e89884922ef01a0a6fd2f"}, + {file = "numpy-1.24.3-cp38-cp38-win32.whl", hash = "sha256:d933fabd8f6a319e8530d0de4fcc2e6a61917e0b0c271fded460032db42a0fe4"}, + {file = "numpy-1.24.3-cp38-cp38-win_amd64.whl", hash = "sha256:56e48aec79ae238f6e4395886b5eaed058abb7231fb3361ddd7bfdf4eed54289"}, + {file = "numpy-1.24.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4719d5aefb5189f50887773699eaf94e7d1e02bf36c1a9d353d9f46703758ca4"}, + {file = "numpy-1.24.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ec87a7084caa559c36e0a2309e4ecb1baa03b687201d0a847c8b0ed476a7187"}, + {file = "numpy-1.24.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea8282b9bcfe2b5e7d491d0bf7f3e2da29700cec05b49e64d6246923329f2b02"}, + {file = "numpy-1.24.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210461d87fb02a84ef243cac5e814aad2b7f4be953b32cb53327bb49fd77fbb4"}, + {file = "numpy-1.24.3-cp39-cp39-win32.whl", hash = "sha256:784c6da1a07818491b0ffd63c6bbe5a33deaa0e25a20e1b3ea20cf0e43f8046c"}, + {file = "numpy-1.24.3-cp39-cp39-win_amd64.whl", hash = "sha256:d5036197ecae68d7f491fcdb4df90082b0d4960ca6599ba2659957aafced7c17"}, + {file = "numpy-1.24.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:352ee00c7f8387b44d19f4cada524586f07379c0d49270f87233983bc5087ca0"}, + {file = "numpy-1.24.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7d6acc2e7524c9955e5c903160aa4ea083736fde7e91276b0e5d98e6332812"}, + {file = "numpy-1.24.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:35400e6a8d102fd07c71ed7dcadd9eb62ee9a6e84ec159bd48c28235bbb0f8e4"}, + {file = "numpy-1.24.3.tar.gz", hash = "sha256:ab344f1bf21f140adab8e47fdbc7c35a477dc01408791f8ba00d018dd0bc5155"}, +] [[package]] name = "packaging" -version = "21.3" +version = "23.1" description = "Core utilities for Python packages" -category = "main" optional = false -python-versions = ">=3.6" - -[package.dependencies] -pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" +python-versions = ">=3.7" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] [[package]] name = "pathspec" -version = "0.9.0" +version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, +] [[package]] name = "pendulum" version = "2.1.2" description = "Python datetimes made easy" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "pendulum-2.1.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:b6c352f4bd32dff1ea7066bd31ad0f71f8d8100b9ff709fb343f3b86cee43efe"}, + {file = "pendulum-2.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:318f72f62e8e23cd6660dbafe1e346950281a9aed144b5c596b2ddabc1d19739"}, + {file = "pendulum-2.1.2-cp35-cp35m-macosx_10_15_x86_64.whl", hash = "sha256:0731f0c661a3cb779d398803655494893c9f581f6488048b3fb629c2342b5394"}, + {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3481fad1dc3f6f6738bd575a951d3c15d4b4ce7c82dce37cf8ac1483fde6e8b0"}, + {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9702069c694306297ed362ce7e3c1ef8404ac8ede39f9b28b7c1a7ad8c3959e3"}, + {file = "pendulum-2.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fb53ffa0085002ddd43b6ca61a7b34f2d4d7c3ed66f931fe599e1a531b42af9b"}, + {file = "pendulum-2.1.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:c501749fdd3d6f9e726086bf0cd4437281ed47e7bca132ddb522f86a1645d360"}, + {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c807a578a532eeb226150d5006f156632df2cc8c5693d778324b43ff8c515dd0"}, + {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2d1619a721df661e506eff8db8614016f0720ac171fe80dda1333ee44e684087"}, + {file = "pendulum-2.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f888f2d2909a414680a29ae74d0592758f2b9fcdee3549887779cd4055e975db"}, + {file = "pendulum-2.1.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e95d329384717c7bf627bf27e204bc3b15c8238fa8d9d9781d93712776c14002"}, + {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4c9c689747f39d0d02a9f94fcee737b34a5773803a64a5fdb046ee9cac7442c5"}, + {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1245cd0075a3c6d889f581f6325dd8404aca5884dea7223a5566c38aab94642b"}, + {file = "pendulum-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:db0a40d8bcd27b4fb46676e8eb3c732c67a5a5e6bfab8927028224fbced0b40b"}, + {file = "pendulum-2.1.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f5e236e7730cab1644e1b87aca3d2ff3e375a608542e90fe25685dae46310116"}, + {file = "pendulum-2.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:de42ea3e2943171a9e95141f2eecf972480636e8e484ccffaf1e833929e9e052"}, + {file = "pendulum-2.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7c5ec650cb4bec4c63a89a0242cc8c3cebcec92fcfe937c417ba18277d8560be"}, + {file = "pendulum-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:33fb61601083f3eb1d15edeb45274f73c63b3c44a8524703dc143f4212bf3269"}, + {file = "pendulum-2.1.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:29c40a6f2942376185728c9a0347d7c0f07905638c83007e1d262781f1e6953a"}, + {file = "pendulum-2.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:94b1fc947bfe38579b28e1cccb36f7e28a15e841f30384b5ad6c5e31055c85d7"}, + {file = "pendulum-2.1.2.tar.gz", hash = "sha256:b06a0ca1bfe41c990bbf0c029f0b6501a7f2ec4e38bfec730712015e8860f207"}, +] [package.dependencies] python-dateutil = ">=2.6,<3.0" pytzdata = ">=2020.1" [[package]] -name = "pipelinewise-singer-python" -version = "1.2.0" -description = "Singer.io utility library - PipelineWise compatible" -category = "main" +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." optional = false -python-versions = "*" - -[package.dependencies] -backoff = "1.8.0" -ciso8601 = "*" -jsonschema = "3.2.0" -python-dateutil = ">=2.6.0" -pytz = "<2021.0" -simplejson = "3.11.1" - -[package.extras] -dev = ["nose", "ipdb", "ipython", "pylint"] +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] [[package]] name = "platformdirs" -version = "2.4.0" -description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" +version = "3.6.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.6.0-py3-none-any.whl", hash = "sha256:ffa199e3fbab8365778c4a10e1fbf1b9cd50707de826eb304b50e57ec0cc8d38"}, + {file = "platformdirs-3.6.0.tar.gz", hash = "sha256:57e28820ca8094678b807ff529196506d7a21e17156cb1cddb3e74cebce54640"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.6.3", markers = "python_version < \"3.8\""} [package.extras] -docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] -test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] +docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)"] [[package]] name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] [package.dependencies] importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} [package.extras] -testing = ["pytest-benchmark", "pytest"] -dev = ["tox", "pre-commit"] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] [[package]] name = "ply" version = "3.11" description = "Python Lex & Yacc" -category = "main" optional = false python-versions = "*" +files = [ + {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, + {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, +] [[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "dev" +name = "prompt-toolkit" +version = "3.0.38" +description = "Library for building powerful interactive command lines in Python" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.38-py3-none-any.whl", hash = "sha256:45ea77a2f7c60418850331366c81cf6b5b9cf4c7fd34616f733c5427e6abbb1f"}, + {file = "prompt_toolkit-3.0.38.tar.gz", hash = "sha256:23ac5d50538a9a38c8bde05fecb47d0b403ecd0662857a86f886f798563d5b9b"}, +] + +[package.dependencies] +wcwidth = "*" [[package]] name = "pyarrow" -version = "9.0.0" +version = "12.0.1" description = "Python library for Apache Arrow" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "pyarrow-12.0.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:6d288029a94a9bb5407ceebdd7110ba398a00412c5b0155ee9813a40d246c5df"}, + {file = "pyarrow-12.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345e1828efdbd9aa4d4de7d5676778aba384a2c3add896d995b23d368e60e5af"}, + {file = "pyarrow-12.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d6009fdf8986332b2169314da482baed47ac053311c8934ac6651e614deacd6"}, + {file = "pyarrow-12.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d3c4cbbf81e6dd23fe921bc91dc4619ea3b79bc58ef10bce0f49bdafb103daf"}, + {file = "pyarrow-12.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:cdacf515ec276709ac8042c7d9bd5be83b4f5f39c6c037a17a60d7ebfd92c890"}, + {file = "pyarrow-12.0.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:749be7fd2ff260683f9cc739cb862fb11be376de965a2a8ccbf2693b098db6c7"}, + {file = "pyarrow-12.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6895b5fb74289d055c43db3af0de6e16b07586c45763cb5e558d38b86a91e3a7"}, + {file = "pyarrow-12.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1887bdae17ec3b4c046fcf19951e71b6a619f39fa674f9881216173566c8f718"}, + {file = "pyarrow-12.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c9cb8eeabbadf5fcfc3d1ddea616c7ce893db2ce4dcef0ac13b099ad7ca082"}, + {file = "pyarrow-12.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:ce4aebdf412bd0eeb800d8e47db854f9f9f7e2f5a0220440acf219ddfddd4f63"}, + {file = "pyarrow-12.0.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:e0d8730c7f6e893f6db5d5b86eda42c0a130842d101992b581e2138e4d5663d3"}, + {file = "pyarrow-12.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43364daec02f69fec89d2315f7fbfbeec956e0d991cbbef471681bd77875c40f"}, + {file = "pyarrow-12.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:051f9f5ccf585f12d7de836e50965b3c235542cc896959320d9776ab93f3b33d"}, + {file = "pyarrow-12.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:be2757e9275875d2a9c6e6052ac7957fbbfc7bc7370e4a036a9b893e96fedaba"}, + {file = "pyarrow-12.0.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:cf812306d66f40f69e684300f7af5111c11f6e0d89d6b733e05a3de44961529d"}, + {file = "pyarrow-12.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:459a1c0ed2d68671188b2118c63bac91eaef6fc150c77ddd8a583e3c795737bf"}, + {file = "pyarrow-12.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85e705e33eaf666bbe508a16fd5ba27ca061e177916b7a317ba5a51bee43384c"}, + {file = "pyarrow-12.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9120c3eb2b1f6f516a3b7a9714ed860882d9ef98c4b17edcdc91d95b7528db60"}, + {file = "pyarrow-12.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:c780f4dc40460015d80fcd6a6140de80b615349ed68ef9adb653fe351778c9b3"}, + {file = "pyarrow-12.0.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:a3c63124fc26bf5f95f508f5d04e1ece8cc23a8b0af2a1e6ab2b1ec3fdc91b24"}, + {file = "pyarrow-12.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b13329f79fa4472324f8d32dc1b1216616d09bd1e77cfb13104dec5463632c36"}, + {file = "pyarrow-12.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb656150d3d12ec1396f6dde542db1675a95c0cc8366d507347b0beed96e87ca"}, + {file = "pyarrow-12.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6251e38470da97a5b2e00de5c6a049149f7b2bd62f12fa5dbb9ac674119ba71a"}, + {file = "pyarrow-12.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:3de26da901216149ce086920547dfff5cd22818c9eab67ebc41e863a5883bac7"}, + {file = "pyarrow-12.0.1.tar.gz", hash = "sha256:cce317fc96e5b71107bf1f9f184d5e54e2bd14bbf3f9a3d62819961f0af86fec"}, +] [package.dependencies] numpy = ">=1.16.6" @@ -654,135 +1521,256 @@ numpy = ">=1.16.6" name = "pycodestyle" version = "2.7.0" description = "Python style guide checker" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"}, + {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"}, +] [[package]] name = "pycparser" version = "2.21" description = "C parser in Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] [[package]] name = "pydocstyle" -version = "6.1.1" +version = "6.3.0" description = "Python docstring style checker" -category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "pydocstyle-6.3.0-py3-none-any.whl", hash = "sha256:118762d452a49d6b05e194ef344a55822987a462831ade91ec5c06fd2169d019"}, + {file = "pydocstyle-6.3.0.tar.gz", hash = "sha256:7ce43f0c0ac87b07494eb9c0b462c0b73e6ff276807f204d6b53edc72b7e44e1"}, +] [package.dependencies] -snowballstemmer = "*" +importlib-metadata = {version = ">=2.0.0,<5.0.0", markers = "python_version < \"3.8\""} +snowballstemmer = ">=2.2.0" [package.extras] -toml = ["toml"] +toml = ["tomli (>=1.2.3)"] [[package]] name = "pyflakes" version = "2.3.1" description = "passive checker of Python programs" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pyflakes-2.3.1-py2.py3-none-any.whl", hash = "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3"}, + {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, +] + +[[package]] +name = "pygithub" +version = "1.58.2" +description = "Use the full Github API v3" +optional = false +python-versions = ">=3.7" +files = [ + {file = "PyGithub-1.58.2-py3-none-any.whl", hash = "sha256:f435884af617c6debaa76cbc355372d1027445a56fbc39972a3b9ed4968badc8"}, + {file = "PyGithub-1.58.2.tar.gz", hash = "sha256:1e6b1b7afe31f75151fb81f7ab6b984a7188a852bdb123dbb9ae90023c3ce60f"}, +] + +[package.dependencies] +deprecated = "*" +pyjwt = {version = ">=2.4.0", extras = ["crypto"]} +pynacl = ">=1.4.0" +requests = ">=2.14.0" [[package]] name = "pygments" -version = "2.13.0" +version = "2.15.1" description = "Pygments is a syntax highlighting package written in Python." -category = "main" -optional = true -python-versions = ">=3.6" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, + {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, +] [package.extras] plugins = ["importlib-metadata"] [[package]] name = "pyjwt" -version = "2.4.0" +version = "2.8.0" description = "JSON Web Token implementation in Python" -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, + {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, +] + +[package.dependencies] +cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} +typing-extensions = {version = "*", markers = "python_version <= \"3.7\""} [package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] -docs = ["zope.interface", "sphinx-rtd-theme", "sphinx"] -dev = ["pre-commit", "mypy", "coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)", "cryptography (>=3.3.1)", "zope.interface", "sphinx-rtd-theme", "sphinx"] -crypto = ["cryptography (>=3.3.1)"] [[package]] -name = "pyparsing" -version = "3.0.6" -description = "Python parsing module" -category = "main" +name = "pynacl" +version = "1.5.0" +description = "Python binding to the Networking and Cryptography (NaCl) library" optional = false python-versions = ">=3.6" +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] + +[package.dependencies] +cffi = ">=1.4.1" [package.extras] -diagrams = ["jinja2", "railroad-diagrams"] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyrsistent" -version = "0.18.0" +version = "0.19.3" description = "Persistent/Functional/Immutable data structures" -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, + {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, + {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, + {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, + {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, + {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, +] [[package]] name = "pytest" -version = "7.1.2" +version = "7.4.1" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.1-py3-none-any.whl", hash = "sha256:460c9a59b14e27c602eb5ece2e47bec99dc5fc5f6513cf924a7d03a578991b1f"}, + {file = "pytest-7.4.1.tar.gz", hash = "sha256:2f2301e797521b23e4d2585a0a3d7b5e50fdddaaf7e7d6773ea26ddb17c213ab"}, +] [package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} -attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" -py = ">=1.8.2" -tomli = ">=1.0.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-durations" +version = "1.2.0" +description = "Pytest plugin reporting fixtures and test functions execution time." +optional = true +python-versions = ">=3.6.2" +files = [ + {file = "pytest-durations-1.2.0.tar.gz", hash = "sha256:75793f7c2c393a947de4a92cc205e8dcb3d7fcde492628926cca97eb8e87077d"}, + {file = "pytest_durations-1.2.0-py3-none-any.whl", hash = "sha256:210c649d989fdf8e864b7f614966ca2c8be5b58a5224d60089a43618c146d7fb"}, +] + +[package.dependencies] +pytest = ">=4.6" + +[[package]] +name = "pytest-snapshot" +version = "0.9.0" +description = "A plugin for snapshot testing with pytest." +optional = false +python-versions = ">=3.5" +files = [ + {file = "pytest-snapshot-0.9.0.tar.gz", hash = "sha256:c7013c3abc3e860f9feff899f8b4debe3708650d8d8242a61bf2625ff64db7f3"}, + {file = "pytest_snapshot-0.9.0-py3-none-any.whl", hash = "sha256:4b9fe1c21c868fe53a545e4e3184d36bc1c88946e3f5c1d9dd676962a9b3d4ab"}, +] + +[package.dependencies] +pytest = ">=3.0.0" [[package]] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] [package.dependencies] six = ">=1.5" [[package]] name = "python-dotenv" -version = "0.20.0" +version = "0.21.1" description = "Read key-value pairs from a .env file and set them as environment variables" -category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" +files = [ + {file = "python-dotenv-0.21.1.tar.gz", hash = "sha256:1c93de8f636cde3ce377292818d0e440b6e45a82f215c3744979151fa8151c49"}, + {file = "python_dotenv-0.21.1-py3-none-any.whl", hash = "sha256:41e12e0318bebc859fcc4d97d4db8d20ad21721a6aa5047dd59f090391cb549a"}, +] [package.extras] cli = ["click (>=5.0)"] [[package]] name = "python-slugify" -version = "5.0.2" -description = "A Python Slugify application that handles Unicode" -category = "dev" +version = "8.0.1" +description = "A Python slugify application that also handles Unicode" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "python-slugify-8.0.1.tar.gz", hash = "sha256:ce0d46ddb668b3be82f4ed5e503dbc33dd815d83e2eb6824211310d3fb172a27"}, + {file = "python_slugify-8.0.1-py2.py3-none-any.whl", hash = "sha256:70ca6ea68fe63ecc8fa4fcf00ae651fc8a5d02d93dcd12ae6d4fc7ca46c4d395"}, +] [package.dependencies] text-unidecode = ">=1.3" @@ -792,106 +1780,344 @@ unidecode = ["Unidecode (>=1.1.1)"] [[package]] name = "pytz" -version = "2020.5" +version = "2023.3.post1" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" +files = [ + {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, + {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, +] [[package]] name = "pytzdata" version = "2020.1" description = "The Olson timezone database for Python." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pytzdata-2020.1-py2.py3-none-any.whl", hash = "sha256:e1e14750bcf95016381e4d472bad004eef710f2d6417240904070b3d6654485f"}, + {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"}, +] [[package]] name = "pyyaml" -version = "6.0" +version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "questionary" +version = "1.10.0" +description = "Python library to build pretty command line user prompts ⭐️" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "questionary-1.10.0-py3-none-any.whl", hash = "sha256:fecfcc8cca110fda9d561cb83f1e97ecbb93c613ff857f655818839dac74ce90"}, + {file = "questionary-1.10.0.tar.gz", hash = "sha256:600d3aefecce26d48d97eee936fdb66e4bc27f934c3ab6dd1e292c4f43946d90"}, +] + +[package.dependencies] +prompt_toolkit = ">=2.0,<4.0" + +[package.extras] +docs = ["Sphinx (>=3.3,<4.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphinx-autodoc-typehints (>=1.11.1,<2.0.0)", "sphinx-copybutton (>=0.3.1,<0.4.0)", "sphinx-rtd-theme (>=0.5.0,<0.6.0)"] [[package]] name = "requests" -version = "2.28.1" +version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false -python-versions = ">=3.7, <4" +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<3" +charset-normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" +urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests-mock" -version = "1.9.3" +version = "1.11.0" description = "Mock out responses from the requests package" -category = "dev" optional = false python-versions = "*" +files = [ + {file = "requests-mock-1.11.0.tar.gz", hash = "sha256:ef10b572b489a5f28e09b708697208c4a3b2b89ef80a9f01584340ea357ec3c4"}, + {file = "requests_mock-1.11.0-py2.py3-none-any.whl", hash = "sha256:f7fae383f228633f6bececebdab236c478ace2284d6292c6e7e2867b9ab74d15"}, +] [package.dependencies] requests = ">=2.3,<3" six = "*" [package.extras] -test = ["testtools", "testrepository (>=0.0.18)", "sphinx", "pytest", "purl", "mock", "fixtures"] fixture = ["fixtures"] +test = ["fixtures", "mock", "purl", "pytest", "requests-futures", "sphinx", "testtools"] + +[[package]] +name = "rich" +version = "13.5.2" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.5.2-py3-none-any.whl", hash = "sha256:146a90b3b6b47cac4a73c12866a499e9817426423f57c5a66949c086191a8808"}, + {file = "rich-13.5.2.tar.gz", hash = "sha256:fb9d6c0a0f643c99eed3875b5377a184132ba9be4d61516a55273d3554d75a39"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "s3transfer" +version = "0.6.1" +description = "An Amazon S3 Transfer Manager" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "s3transfer-0.6.1-py3-none-any.whl", hash = "sha256:3c0da2d074bf35d6870ef157158641178a4204a6e689e82546083e31e0311346"}, + {file = "s3transfer-0.6.1.tar.gz", hash = "sha256:640bb492711f4c0c0905e1f62b6aaeb771881935ad27884852411f8e9cacbca9"}, +] + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] + +[[package]] +name = "setuptools" +version = "68.0.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, + {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "simplejson" -version = "3.11.1" +version = "3.19.1" description = "Simple, fast, extensible JSON encoder/decoder for Python" -category = "main" optional = false -python-versions = "*" +python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "simplejson-3.19.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:412e58997a30c5deb8cab5858b8e2e5b40ca007079f7010ee74565cc13d19665"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e765b1f47293dedf77946f0427e03ee45def2862edacd8868c6cf9ab97c8afbd"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:3231100edee292da78948fa0a77dee4e5a94a0a60bcba9ed7a9dc77f4d4bb11e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:081ea6305b3b5e84ae7417e7f45956db5ea3872ec497a584ec86c3260cda049e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f253edf694ce836631b350d758d00a8c4011243d58318fbfbe0dd54a6a839ab4"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:5db86bb82034e055257c8e45228ca3dbce85e38d7bfa84fa7b2838e032a3219c"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:69a8b10a4f81548bc1e06ded0c4a6c9042c0be0d947c53c1ed89703f7e613950"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:58ee5e24d6863b22194020eb62673cf8cc69945fcad6b283919490f6e359f7c5"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:73d0904c2471f317386d4ae5c665b16b5c50ab4f3ee7fd3d3b7651e564ad74b1"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:66d780047c31ff316ee305c3f7550f352d87257c756413632303fc59fef19eac"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd4d50a27b065447c9c399f0bf0a993bd0e6308db8bbbfbc3ea03b41c145775a"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c16ec6a67a5f66ab004190829eeede01c633936375edcad7cbf06d3241e5865"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17a963e8dd4d81061cc05b627677c1f6a12e81345111fbdc5708c9f088d752c9"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7e78d79b10aa92f40f54178ada2b635c960d24fc6141856b926d82f67e56d169"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad071cd84a636195f35fa71de2186d717db775f94f985232775794d09f8d9061"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e7c70f19405e5f99168077b785fe15fcb5f9b3c0b70b0b5c2757ce294922c8c"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54fca2b26bcd1c403146fd9461d1da76199442297160721b1d63def2a1b17799"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:48600a6e0032bed17c20319d91775f1797d39953ccfd68c27f83c8d7fc3b32cb"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:93f5ac30607157a0b2579af59a065bcfaa7fadeb4875bf927a8f8b6739c8d910"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b79642a599740603ca86cf9df54f57a2013c47e1dd4dd2ae4769af0a6816900"}, + {file = "simplejson-3.19.1-cp310-cp310-win32.whl", hash = "sha256:d9f2c27f18a0b94107d57294aab3d06d6046ea843ed4a45cae8bd45756749f3a"}, + {file = "simplejson-3.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:5673d27806085d2a413b3be5f85fad6fca4b7ffd31cfe510bbe65eea52fff571"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:79c748aa61fd8098d0472e776743de20fae2686edb80a24f0f6593a77f74fe86"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:390f4a8ca61d90bcf806c3ad644e05fa5890f5b9a72abdd4ca8430cdc1e386fa"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d61482b5d18181e6bb4810b4a6a24c63a490c3a20e9fbd7876639653e2b30a1a"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2541fdb7467ef9bfad1f55b6c52e8ea52b3ce4a0027d37aff094190a955daa9d"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46133bc7dd45c9953e6ee4852e3de3d5a9a4a03b068bd238935a5c72f0a1ce34"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f96def94576f857abf58e031ce881b5a3fc25cbec64b2bc4824824a8a4367af9"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f14ecca970d825df0d29d5c6736ff27999ee7bdf5510e807f7ad8845f7760ce"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:66389b6b6ee46a94a493a933a26008a1bae0cfadeca176933e7ff6556c0ce998"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:22b867205cd258050c2625325fdd9a65f917a5aff22a23387e245ecae4098e78"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c39fa911e4302eb79c804b221ddec775c3da08833c0a9120041dd322789824de"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65dafe413b15e8895ad42e49210b74a955c9ae65564952b0243a18fb35b986cc"}, + {file = "simplejson-3.19.1-cp311-cp311-win32.whl", hash = "sha256:f05d05d99fce5537d8f7a0af6417a9afa9af3a6c4bb1ba7359c53b6257625fcb"}, + {file = "simplejson-3.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:b46aaf0332a8a9c965310058cf3487d705bf672641d2c43a835625b326689cf4"}, + {file = "simplejson-3.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b438e5eaa474365f4faaeeef1ec3e8d5b4e7030706e3e3d6b5bee6049732e0e6"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9d614a612ad02492f704fbac636f666fa89295a5d22b4facf2d665fc3b5ea9"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46e89f58e4bed107626edce1cf098da3664a336d01fc78fddcfb1f397f553d44"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96ade243fb6f3b57e7bd3b71e90c190cd0f93ec5dce6bf38734a73a2e5fa274f"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed18728b90758d171f0c66c475c24a443ede815cf3f1a91e907b0db0ebc6e508"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:6a561320485017ddfc21bd2ed5de2d70184f754f1c9b1947c55f8e2b0163a268"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:2098811cd241429c08b7fc5c9e41fcc3f59f27c2e8d1da2ccdcf6c8e340ab507"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:8f8d179393e6f0cf6c7c950576892ea6acbcea0a320838c61968ac7046f59228"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:eff87c68058374e45225089e4538c26329a13499bc0104b52b77f8428eed36b2"}, + {file = "simplejson-3.19.1-cp36-cp36m-win32.whl", hash = "sha256:d300773b93eed82f6da138fd1d081dc96fbe53d96000a85e41460fe07c8d8b33"}, + {file = "simplejson-3.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:37724c634f93e5caaca04458f267836eb9505d897ab3947b52f33b191bf344f3"}, + {file = "simplejson-3.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:74bf802debe68627227ddb665c067eb8c73aa68b2476369237adf55c1161b728"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70128fb92932524c89f373e17221cf9535d7d0c63794955cc3cd5868e19f5d38"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8090e75653ea7db75bc21fa5f7bcf5f7bdf64ea258cbbac45c7065f6324f1b50"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a755f7bfc8adcb94887710dc70cc12a69a454120c6adcc6f251c3f7b46ee6aac"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ccb2c1877bc9b25bc4f4687169caa925ffda605d7569c40e8e95186e9a5e58b"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:919bc5aa4d8094cf8f1371ea9119e5d952f741dc4162810ab714aec948a23fe5"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e333c5b62e93949f5ac27e6758ba53ef6ee4f93e36cc977fe2e3df85c02f6dc4"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3a4480e348000d89cf501b5606415f4d328484bbb431146c2971123d49fd8430"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cb502cde018e93e75dc8fc7bb2d93477ce4f3ac10369f48866c61b5e031db1fd"}, + {file = "simplejson-3.19.1-cp37-cp37m-win32.whl", hash = "sha256:f41915a4e1f059dfad614b187bc06021fefb5fc5255bfe63abf8247d2f7a646a"}, + {file = "simplejson-3.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3844305bc33d52c4975da07f75b480e17af3558c0d13085eaa6cc2f32882ccf7"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1cb19eacb77adc5a9720244d8d0b5507421d117c7ed4f2f9461424a1829e0ceb"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:926957b278de22797bfc2f004b15297013843b595b3cd7ecd9e37ccb5fad0b72"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b0e9a5e66969f7a47dc500e3dba8edc3b45d4eb31efb855c8647700a3493dd8a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79d46e7e33c3a4ef853a1307b2032cfb7220e1a079d0c65488fbd7118f44935a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344a5093b71c1b370968d0fbd14d55c9413cb6f0355fdefeb4a322d602d21776"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23fbb7b46d44ed7cbcda689295862851105c7594ae5875dce2a70eeaa498ff86"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3025e7e9ddb48813aec2974e1a7e68e63eac911dd5e0a9568775de107ac79a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:87b190e6ceec286219bd6b6f13547ca433f977d4600b4e81739e9ac23b5b9ba9"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc935d8322ba9bc7b84f99f40f111809b0473df167bf5b93b89fb719d2c4892b"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3b652579c21af73879d99c8072c31476788c8c26b5565687fd9db154070d852a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6aa7ca03f25b23b01629b1c7f78e1cd826a66bfb8809f8977a3635be2ec48f1a"}, + {file = "simplejson-3.19.1-cp38-cp38-win32.whl", hash = "sha256:08be5a241fdf67a8e05ac7edbd49b07b638ebe4846b560673e196b2a25c94b92"}, + {file = "simplejson-3.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ca56a6c8c8236d6fe19abb67ef08d76f3c3f46712c49a3b6a5352b6e43e8855f"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6424d8229ba62e5dbbc377908cfee9b2edf25abd63b855c21f12ac596cd18e41"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:547ea86ca408a6735335c881a2e6208851027f5bfd678d8f2c92a0f02c7e7330"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:889328873c35cb0b2b4c83cbb83ec52efee5a05e75002e2c0c46c4e42790e83c"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cdb4e544134f305b033ad79ae5c6b9a32e7c58b46d9f55a64e2a883fbbba01"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc2b3f06430cbd4fac0dae5b2974d2bf14f71b415fb6de017f498950da8159b1"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d125e754d26c0298715bdc3f8a03a0658ecbe72330be247f4b328d229d8cf67f"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:476c8033abed7b1fd8db62a7600bf18501ce701c1a71179e4ce04ac92c1c5c3c"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:199a0bcd792811c252d71e3eabb3d4a132b3e85e43ebd93bfd053d5b59a7e78b"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a79b439a6a77649bb8e2f2644e6c9cc0adb720fc55bed63546edea86e1d5c6c8"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:203412745fed916fc04566ecef3f2b6c872b52f1e7fb3a6a84451b800fb508c1"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ca922c61d87b4c38f37aa706520328ffe22d7ac1553ef1cadc73f053a673553"}, + {file = "simplejson-3.19.1-cp39-cp39-win32.whl", hash = "sha256:3e0902c278243d6f7223ba3e6c5738614c971fd9a887fff8feaa8dcf7249c8d4"}, + {file = "simplejson-3.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:d396b610e77b0c438846607cd56418bfc194973b9886550a98fd6724e8c6cfec"}, + {file = "simplejson-3.19.1-py3-none-any.whl", hash = "sha256:4710806eb75e87919b858af0cba4ffedc01b463edc3982ded7b55143f39e41e1"}, + {file = "simplejson-3.19.1.tar.gz", hash = "sha256:6277f60848a7d8319d27d2be767a7546bc965535b28070e310b3a9af90604a4c"}, +] [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] [[package]] name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -category = "main" optional = false python-versions = "*" +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] + +[[package]] +name = "soupsieve" +version = "2.4.1" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = true +python-versions = ">=3.7" +files = [ + {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, + {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, +] [[package]] name = "sphinx" -version = "5.1.1" +version = "5.3.0" description = "Python documentation generator" -category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "Sphinx-5.3.0.tar.gz", hash = "sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5"}, + {file = "sphinx-5.3.0-py3-none-any.whl", hash = "sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d"}, +] [package.dependencies] alabaster = ">=0.7,<0.8" -babel = ">=1.3" -colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""} +babel = ">=2.9" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} docutils = ">=0.14,<0.20" -imagesize = "*" -importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} -Jinja2 = ">=2.3" -packaging = "*" -Pygments = ">=2.0" +imagesize = ">=1.3" +importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} +Jinja2 = ">=3.0" +packaging = ">=21.0" +Pygments = ">=2.12" requests = ">=2.5.0" -snowballstemmer = ">=1.1" +snowballstemmer = ">=2.0" sphinxcontrib-applehelp = "*" sphinxcontrib-devhelp = "*" sphinxcontrib-htmlhelp = ">=2.0.0" @@ -901,16 +2127,19 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "flake8-comprehensions", "flake8-bugbear", "isort", "mypy (>=0.971)", "sphinx-lint", "docutils-stubs", "types-typed-ast", "types-requests"] -test = ["pytest (>=4.6)", "html5lib", "cython", "typed-ast"] +lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-bugbear", "flake8-comprehensions", "flake8-simplify", "isort", "mypy (>=0.981)", "sphinx-lint", "types-requests", "types-typed-ast"] +test = ["cython", "html5lib", "pytest (>=4.6)", "typed_ast"] [[package]] name = "sphinx-autobuild" version = "2021.3.14" description = "Rebuild Sphinx documentation on changes, with live-reload in the browser." -category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "sphinx-autobuild-2021.3.14.tar.gz", hash = "sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05"}, + {file = "sphinx_autobuild-2021.3.14-py3-none-any.whl", hash = "sha256:8fe8cbfdb75db04475232f05187c776f46f6e9e04cacf1e49ce81bdac649ccac"}, +] [package.dependencies] colorama = "*" @@ -918,1207 +2147,593 @@ livereload = "*" sphinx = "*" [package.extras] -test = ["pytest-cov", "pytest"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "sphinx-basic-ng" +version = "1.0.0b1" +description = "A modern skeleton for Sphinx themes." +optional = true +python-versions = ">=3.7" +files = [ + {file = "sphinx_basic_ng-1.0.0b1-py3-none-any.whl", hash = "sha256:ade597a3029c7865b24ad0eda88318766bcc2f9f4cef60df7e28126fde94db2a"}, + {file = "sphinx_basic_ng-1.0.0b1.tar.gz", hash = "sha256:89374bd3ccd9452a301786781e28c8718e99960f2d4f411845ea75fc7bb5a9b0"}, +] + +[package.dependencies] +sphinx = ">=4.0" + +[package.extras] +docs = ["furo", "ipython", "myst-parser", "sphinx-copybutton", "sphinx-inline-tabs"] [[package]] name = "sphinx-copybutton" -version = "0.5.0" +version = "0.5.2" description = "Add a copy button to each of your code cells." -category = "main" optional = true -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd"}, + {file = "sphinx_copybutton-0.5.2-py3-none-any.whl", hash = "sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e"}, +] [package.dependencies] sphinx = ">=1.8" [package.extras] -rtd = ["sphinx-book-theme", "myst-nb", "ipython", "sphinx"] -code_style = ["pre-commit (==2.12.1)"] +code-style = ["pre-commit (==2.12.1)"] +rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] [[package]] -name = "sphinx-rtd-theme" -version = "1.0.0" -description = "Read the Docs theme for Sphinx" -category = "main" +name = "sphinx-inline-tabs" +version = "2023.4.21" +description = "Add inline tabbed content to your Sphinx documentation." optional = true -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +python-versions = ">=3.8" +files = [ + {file = "sphinx_inline_tabs-2023.4.21-py3-none-any.whl", hash = "sha256:06809ac613f7c48ddd6e2fa588413e3fe92cff2397b56e2ccf0b0218f9ef6a78"}, + {file = "sphinx_inline_tabs-2023.4.21.tar.gz", hash = "sha256:5df2f13f602c158f3f5f6c509e008aeada199a8c76d97ba3aa2822206683bebc"}, +] [package.dependencies] -docutils = "<0.18" -sphinx = ">=1.6" +sphinx = ">=3" [package.extras] -dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client"] +doc = ["furo", "myst-parser"] +test = ["pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "sphinx-reredirects" +version = "0.1.2" +description = "Handles redirects for moved pages in Sphinx documentation projects" +optional = true +python-versions = ">=3.5" +files = [ + {file = "sphinx_reredirects-0.1.2-py3-none-any.whl", hash = "sha256:3a22161771aadd448bb608a4fe7277252182a337af53c18372b7104531d71489"}, + {file = "sphinx_reredirects-0.1.2.tar.gz", hash = "sha256:a0e7213304759b01edc22f032f1715a1c61176fc8f167164e7a52b9feec9ac64"}, +] + +[package.dependencies] +sphinx = "*" [[package]] name = "sphinxcontrib-applehelp" version = "1.0.2" description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books" -category = "main" optional = true python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"}, + {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"}, +] [package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] -lint = ["docutils-stubs", "mypy", "flake8"] [[package]] name = "sphinxcontrib-devhelp" version = "1.0.2" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -category = "main" optional = true python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, + {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, +] [package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] -lint = ["docutils-stubs", "mypy", "flake8"] [[package]] name = "sphinxcontrib-htmlhelp" version = "2.0.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"}, + {file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"}, +] [package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["html5lib", "pytest"] -lint = ["docutils-stubs", "mypy", "flake8"] [[package]] name = "sphinxcontrib-jsmath" version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" -category = "main" optional = true python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, + {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, +] [package.extras] -test = ["mypy", "flake8", "pytest"] +test = ["flake8", "mypy", "pytest"] [[package]] name = "sphinxcontrib-qthelp" version = "1.0.3" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -category = "main" optional = true python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, + {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, +] [package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] -lint = ["docutils-stubs", "mypy", "flake8"] [[package]] name = "sphinxcontrib-serializinghtml" version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -category = "main" optional = true python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, + {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, +] [package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] -lint = ["docutils-stubs", "mypy", "flake8"] [[package]] name = "sqlalchemy" -version = "1.4.40" +version = "2.0.20" description = "Database Abstraction Library" -category = "main" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.20-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759b51346aa388c2e606ee206c0bc6f15a5299f6174d1e10cadbe4530d3c7a98"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1506e988ebeaaf316f183da601f24eedd7452e163010ea63dbe52dc91c7fc70e"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5768c268df78bacbde166b48be788b83dddaa2a5974b8810af422ddfe68a9bc8"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3f0dd6d15b6dc8b28a838a5c48ced7455c3e1fb47b89da9c79cc2090b072a50"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:243d0fb261f80a26774829bc2cee71df3222587ac789b7eaf6555c5b15651eed"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6eb6d77c31e1bf4268b4d61b549c341cbff9842f8e115ba6904249c20cb78a61"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-win32.whl", hash = "sha256:bcb04441f370cbe6e37c2b8d79e4af9e4789f626c595899d94abebe8b38f9a4d"}, + {file = "SQLAlchemy-2.0.20-cp310-cp310-win_amd64.whl", hash = "sha256:d32b5ffef6c5bcb452723a496bad2d4c52b346240c59b3e6dba279f6dcc06c14"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd81466bdbc82b060c3c110b2937ab65ace41dfa7b18681fdfad2f37f27acdd7"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6fe7d61dc71119e21ddb0094ee994418c12f68c61b3d263ebaae50ea8399c4d4"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4e571af672e1bb710b3cc1a9794b55bce1eae5aed41a608c0401885e3491179"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3364b7066b3c7f4437dd345d47271f1251e0cfb0aba67e785343cdbdb0fff08c"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1be86ccea0c965a1e8cd6ccf6884b924c319fcc85765f16c69f1ae7148eba64b"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1d35d49a972649b5080557c603110620a86aa11db350d7a7cb0f0a3f611948a0"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-win32.whl", hash = "sha256:27d554ef5d12501898d88d255c54eef8414576f34672e02fe96d75908993cf53"}, + {file = "SQLAlchemy-2.0.20-cp311-cp311-win_amd64.whl", hash = "sha256:411e7f140200c02c4b953b3dbd08351c9f9818d2bd591b56d0fa0716bd014f1e"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3c6aceebbc47db04f2d779db03afeaa2c73ea3f8dcd3987eb9efdb987ffa09a3"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d3f175410a6db0ad96b10bfbb0a5530ecd4fcf1e2b5d83d968dd64791f810ed"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea8186be85da6587456c9ddc7bf480ebad1a0e6dcbad3967c4821233a4d4df57"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c3d99ba99007dab8233f635c32b5cd24fb1df8d64e17bc7df136cedbea427897"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:76fdfc0f6f5341987474ff48e7a66c3cd2b8a71ddda01fa82fedb180b961630a"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-win32.whl", hash = "sha256:d3793dcf5bc4d74ae1e9db15121250c2da476e1af8e45a1d9a52b1513a393459"}, + {file = "SQLAlchemy-2.0.20-cp37-cp37m-win_amd64.whl", hash = "sha256:79fde625a0a55220d3624e64101ed68a059c1c1f126c74f08a42097a72ff66a9"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:599ccd23a7146e126be1c7632d1d47847fa9f333104d03325c4e15440fc7d927"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1a58052b5a93425f656675673ef1f7e005a3b72e3f2c91b8acca1b27ccadf5f4"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79543f945be7a5ada9943d555cf9b1531cfea49241809dd1183701f94a748624"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63e73da7fb030ae0a46a9ffbeef7e892f5def4baf8064786d040d45c1d6d1dc5"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ce5e81b800a8afc870bb8e0a275d81957e16f8c4b62415a7b386f29a0cb9763"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb0d3e94c2a84215532d9bcf10229476ffd3b08f481c53754113b794afb62d14"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-win32.whl", hash = "sha256:8dd77fd6648b677d7742d2c3cc105a66e2681cc5e5fb247b88c7a7b78351cf74"}, + {file = "SQLAlchemy-2.0.20-cp38-cp38-win_amd64.whl", hash = "sha256:6f8a934f9dfdf762c844e5164046a9cea25fabbc9ec865c023fe7f300f11ca4a"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:26a3399eaf65e9ab2690c07bd5cf898b639e76903e0abad096cd609233ce5208"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4cde2e1096cbb3e62002efdb7050113aa5f01718035ba9f29f9d89c3758e7e4e"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b09ba72e4e6d341bb5bdd3564f1cea6095d4c3632e45dc69375a1dbe4e26ec"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b74eeafaa11372627ce94e4dc88a6751b2b4d263015b3523e2b1e57291102f0"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:77d37c1b4e64c926fa3de23e8244b964aab92963d0f74d98cbc0783a9e04f501"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:eefebcc5c555803065128401a1e224a64607259b5eb907021bf9b175f315d2a6"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-win32.whl", hash = "sha256:3423dc2a3b94125094897118b52bdf4d37daf142cbcf26d48af284b763ab90e9"}, + {file = "SQLAlchemy-2.0.20-cp39-cp39-win_amd64.whl", hash = "sha256:5ed61e3463021763b853628aef8bc5d469fe12d95f82c74ef605049d810f3267"}, + {file = "SQLAlchemy-2.0.20-py3-none-any.whl", hash = "sha256:63a368231c53c93e2b67d0c5556a9836fdcd383f7e3026a39602aad775b14acf"}, + {file = "SQLAlchemy-2.0.20.tar.gz", hash = "sha256:ca8a5ff2aa7f3ade6c498aaafce25b1eaeabe4e42b73e25519183e4566a16fc6"}, +] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +typing-extensions = ">=4.2.0" [package.extras] -aiomysql = ["greenlet (!=0.4.17)", "aiomysql"] -aiosqlite = ["typing_extensions (!=3.10.0.1)", "greenlet (!=0.4.17)", "aiosqlite"] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["greenlet (!=0.4.17)", "asyncmy (>=0.2.3,!=0.2.4)"] -mariadb_connector = ["mariadb (>=1.0.1,!=1.1.2)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] mssql = ["pyodbc"] -mssql_pymssql = ["pymssql"] -mssql_pyodbc = ["pyodbc"] -mypy = ["sqlalchemy2-stubs", "mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0,<2)", "mysqlclient (>=1.4.0)"] -mysql_connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=7,<8)", "cx_oracle (>=7)"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx-oracle (>=7)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] postgresql = ["psycopg2 (>=2.7)"] -postgresql_asyncpg = ["greenlet (!=0.4.17)", "asyncpg"] -postgresql_pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] -postgresql_psycopg2binary = ["psycopg2-binary"] -postgresql_psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql (<1)", "pymysql"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] sqlcipher = ["sqlcipher3-binary"] [[package]] -name = "sqlalchemy2-stubs" -version = "0.0.2a25" -description = "Typing Stubs for SQLAlchemy 1.4" -category = "dev" +name = "termcolor" +version = "2.3.0" +description = "ANSI color formatting for output in terminal" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"}, + {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"}, +] -[package.dependencies] -typing-extensions = ">=3.7.4" +[package.extras] +tests = ["pytest", "pytest-cov"] [[package]] name = "text-unidecode" version = "1.3" description = "The most basic Text::Unidecode port" -category = "dev" optional = false python-versions = "*" +files = [ + {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, + {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, +] [[package]] name = "tomli" -version = "1.2.3" +version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tomlkit" +version = "0.11.8" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomlkit-0.11.8-py3-none-any.whl", hash = "sha256:8c726c4c202bdb148667835f68d68780b9a003a9ec34167b6c673b38eff2a171"}, + {file = "tomlkit-0.11.8.tar.gz", hash = "sha256:9330fc7faa1db67b541b28e62018c17d20be733177d290a13b24c62d1614e0c3"}, +] [[package]] name = "tornado" version = "6.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -category = "main" optional = true python-versions = ">= 3.7" +files = [ + {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"}, + {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"}, + {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"}, + {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"}, + {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"}, +] [[package]] name = "typed-ast" -version = "1.4.3" +version = "1.5.4" description = "a fork of Python 2 and 3 ast modules with type comment support" -category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, + {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, + {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, + {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, + {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, + {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, + {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, + {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, + {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, +] + +[[package]] +name = "types-jsonschema" +version = "4.17.0.10" +description = "Typing stubs for jsonschema" optional = false python-versions = "*" +files = [ + {file = "types-jsonschema-4.17.0.10.tar.gz", hash = "sha256:8e979db34d69bc9f9b3d6e8b89bdbc60b3a41cfce4e1fb87bf191d205c7f5098"}, + {file = "types_jsonschema-4.17.0.10-py3-none-any.whl", hash = "sha256:3aa2a89afbd9eaa6ce0c15618b36f02692a621433889ce73014656f7d8caf971"}, +] [[package]] name = "types-python-dateutil" -version = "2.8.19" +version = "2.8.19.14" description = "Typing stubs for python-dateutil" -category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-python-dateutil-2.8.19.14.tar.gz", hash = "sha256:1f4f10ac98bb8b16ade9dbee3518d9ace017821d94b057a425b069f834737f4b"}, + {file = "types_python_dateutil-2.8.19.14-py3-none-any.whl", hash = "sha256:f977b8de27787639986b4e28963263fd0e5158942b3ecef91b9335c130cb1ce9"}, +] + +[[package]] +name = "types-pytz" +version = "2023.3.0.1" +description = "Typing stubs for pytz" +optional = false +python-versions = "*" +files = [ + {file = "types-pytz-2023.3.0.1.tar.gz", hash = "sha256:1a7b8d4aac70981cfa24478a41eadfcd96a087c986d6f150d77e3ceb3c2bdfab"}, + {file = "types_pytz-2023.3.0.1-py3-none-any.whl", hash = "sha256:65152e872137926bb67a8fe6cc9cfd794365df86650c5d5fdc7b167b0f38892e"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.11" +description = "Typing stubs for PyYAML" +optional = false +python-versions = "*" +files = [ + {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"}, + {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, +] [[package]] name = "types-requests" -version = "2.28.9" +version = "2.31.0.2" description = "Typing stubs for requests" -category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"}, + {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"}, +] [package.dependencies] -types-urllib3 = "<1.27" +types-urllib3 = "*" + +[[package]] +name = "types-simplejson" +version = "3.19.0.2" +description = "Typing stubs for simplejson" +optional = false +python-versions = "*" +files = [ + {file = "types-simplejson-3.19.0.2.tar.gz", hash = "sha256:ebc81f886f89d99d6b80c726518aa2228bc77c26438f18fd81455e4f79f8ee1b"}, + {file = "types_simplejson-3.19.0.2-py3-none-any.whl", hash = "sha256:8ba093dc7884f59b3e62aed217144085e675a269debc32678fd80e0b43b2b86f"}, +] [[package]] name = "types-urllib3" -version = "1.26.7" +version = "1.26.25.13" description = "Typing stubs for urllib3" -category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-urllib3-1.26.25.13.tar.gz", hash = "sha256:3300538c9dc11dad32eae4827ac313f5d986b8b21494801f1bf97a1ac6c03ae5"}, + {file = "types_urllib3-1.26.25.13-py3-none-any.whl", hash = "sha256:5dbd1d2bef14efee43f5318b5d36d805a489f6600252bb53626d4bfafd95e27c"}, +] [[package]] name = "typing-extensions" -version = "4.3.0" +version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] [[package]] name = "urllib3" -version = "1.26.8" +version = "1.26.16" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"}, + {file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"}, +] [package.extras] -brotli = ["brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] -name = "viztracer" -version = "0.15.4" -description = "A debugging and profiling tool that can trace and visualize python code execution" -category = "dev" +name = "wcwidth" +version = "0.2.6" +description = "Measures the displayed width of unicode strings in a terminal" optional = false -python-versions = ">=3.6" - -[package.dependencies] -objprint = ">=0.1.3" +python-versions = "*" +files = [ + {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, + {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, +] -[package.extras] -full = ["rich", "orjson"] +[[package]] +name = "wrapt" +version = "1.15.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, + {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, + {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, + {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, + {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, + {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, + {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, + {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, + {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, + {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, + {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, + {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, + {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, + {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, + {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, + {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, + {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, + {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, + {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, +] [[package]] name = "xdoctest" -version = "1.0.1" +version = "1.1.1" description = "A rewrite of the builtin doctest module" -category = "dev" optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4" +python-versions = ">=3.6" +files = [ + {file = "xdoctest-1.1.1-py3-none-any.whl", hash = "sha256:d59d4ed91cb92e4430ef0ad1b134a2bef02adff7d2fb9c9f057547bee44081a2"}, + {file = "xdoctest-1.1.1.tar.gz", hash = "sha256:2eac8131bdcdf2781b4e5a62d6de87f044b730cc8db8af142a51bb29c245e779"}, +] [package.dependencies] six = "*" [package.extras] -tests = ["pytest-cov", "pytest", "typing", "pytest", "pytest-cov", "pytest", "pytest-cov", "pytest", "pytest", "pytest-cov", "pytest", "scikit-build", "pybind11", "ninja", "codecov", "cmake"] -tests-strict = ["pytest-cov (==3.0.0)", "pytest (==6.2.5)", "typing (==3.7.4)", "pytest (==4.6.0)", "pytest (==4.6.0)", "pytest-cov (==2.9.0)", "pytest (==4.6.0)", "pytest-cov (==2.8.1)", "pytest (==4.6.0)", "pytest (==4.6.0)", "pytest-cov (==2.8.1)", "scikit-build (==0.11.1)", "pybind11 (==2.7.1)", "ninja (==1.10.2)", "codecov (==2.0.15)", "cmake (==3.21.2)"] +all = ["IPython", "IPython", "Pygments", "Pygments", "attrs", "codecov", "colorama", "debugpy", "debugpy", "debugpy", "debugpy", "debugpy", "ipykernel", "ipykernel", "ipython-genutils", "jedi", "jinja2", "jupyter-client", "jupyter-client", "jupyter-core", "nbconvert", "pyflakes", "pytest", "pytest", "pytest", "pytest-cov", "six", "tomli", "typing"] +all-strict = ["IPython (==7.10.0)", "IPython (==7.23.1)", "Pygments (==2.0.0)", "Pygments (==2.4.1)", "attrs (==19.2.0)", "codecov (==2.0.15)", "colorama (==0.4.1)", "debugpy (==1.0.0)", "debugpy (==1.0.0)", "debugpy (==1.0.0)", "debugpy (==1.3.0)", "debugpy (==1.6.0)", "ipykernel (==5.2.0)", "ipykernel (==6.0.0)", "ipython-genutils (==0.2.0)", "jedi (==0.16)", "jinja2 (==3.0.0)", "jupyter-client (==6.1.5)", "jupyter-client (==7.0.0)", "jupyter-core (==4.7.0)", "nbconvert (==6.0.0)", "pyflakes (==2.2.0)", "pytest (==4.6.0)", "pytest (==4.6.0)", "pytest (==6.2.5)", "pytest-cov (==3.0.0)", "six (==1.11.0)", "tomli (==0.2.0)", "typing (==3.7.4)"] +colors = ["Pygments", "Pygments", "colorama"] +jupyter = ["IPython", "IPython", "attrs", "debugpy", "debugpy", "debugpy", "debugpy", "debugpy", "ipykernel", "ipykernel", "ipython-genutils", "jedi", "jinja2", "jupyter-client", "jupyter-client", "jupyter-core", "nbconvert"] +optional = ["IPython", "IPython", "Pygments", "Pygments", "attrs", "colorama", "debugpy", "debugpy", "debugpy", "debugpy", "debugpy", "ipykernel", "ipykernel", "ipython-genutils", "jedi", "jinja2", "jupyter-client", "jupyter-client", "jupyter-core", "nbconvert", "pyflakes", "tomli"] +optional-strict = ["IPython (==7.10.0)", "IPython (==7.23.1)", "Pygments (==2.0.0)", "Pygments (==2.4.1)", "attrs (==19.2.0)", "colorama (==0.4.1)", "debugpy (==1.0.0)", "debugpy (==1.0.0)", "debugpy (==1.0.0)", "debugpy (==1.3.0)", "debugpy (==1.6.0)", "ipykernel (==5.2.0)", "ipykernel (==6.0.0)", "ipython-genutils (==0.2.0)", "jedi (==0.16)", "jinja2 (==3.0.0)", "jupyter-client (==6.1.5)", "jupyter-client (==7.0.0)", "jupyter-core (==4.7.0)", "nbconvert (==6.0.0)", "pyflakes (==2.2.0)", "tomli (==0.2.0)"] runtime-strict = ["six (==1.11.0)"] -optional = ["ipykernel", "ipython", "jupyter-client", "nbconvert", "jupyter-core", "jinja2", "jedi", "attrs", "pygments", "ipython-genutils", "debugpy", "debugpy", "debugpy", "ipykernel", "debugpy", "ipython", "jupyter-client", "pygments", "tomli", "debugpy", "colorama"] -optional-strict = ["ipykernel (==6.0.0)", "IPython (==7.23.1)", "jupyter-client (==7.0.0)", "nbconvert (==6.0.0)", "jupyter-core (==4.7.0)", "jinja2 (==3.0.0)", "jedi (==0.16)", "attrs (==19.2.0)", "Pygments (==2.4.1)", "ipython-genutils (==0.2.0)", "debugpy (==1.6.0)", "debugpy (==1.0.0)", "debugpy (==1.0.0)", "ipykernel (==5.2.0)", "debugpy (==1.0.0)", "IPython (==7.10.0)", "jupyter-client (==6.1.5)", "Pygments (==2.0.0)", "tomli (==0.2.0)", "debugpy (==1.3.0)", "colorama (==0.4.1)"] -jupyter = ["ipykernel", "ipython", "jupyter-client", "nbconvert", "jupyter-core", "jinja2", "jedi", "attrs", "ipython-genutils", "debugpy", "debugpy", "debugpy", "ipykernel", "debugpy", "ipython", "jupyter-client", "debugpy"] -colors = ["pygments", "pygments", "colorama"] -all = ["ipykernel", "ipython", "jupyter-client", "pytest-cov", "nbconvert", "jupyter-core", "jinja2", "jedi", "attrs", "pygments", "pytest", "ipython-genutils", "debugpy", "typing", "debugpy", "debugpy", "pytest", "ipykernel", "debugpy", "ipython", "jupyter-client", "pytest-cov", "pytest", "pytest-cov", "pytest", "pygments", "pytest", "debugpy", "pytest-cov", "pytest", "colorama", "six", "scikit-build", "pybind11", "ninja", "codecov", "cmake"] -all-strict = ["ipykernel (==6.0.0)", "IPython (==7.23.1)", "jupyter-client (==7.0.0)", "pytest-cov (==3.0.0)", "nbconvert (==6.0.0)", "jupyter-core (==4.7.0)", "jinja2 (==3.0.0)", "jedi (==0.16)", "attrs (==19.2.0)", "Pygments (==2.4.1)", "pytest (==6.2.5)", "ipython-genutils (==0.2.0)", "debugpy (==1.6.0)", "typing (==3.7.4)", "debugpy (==1.0.0)", "debugpy (==1.0.0)", "pytest (==4.6.0)", "ipykernel (==5.2.0)", "debugpy (==1.0.0)", "IPython (==7.10.0)", "jupyter-client (==6.1.5)", "pytest (==4.6.0)", "pytest-cov (==2.9.0)", "pytest (==4.6.0)", "pytest-cov (==2.8.1)", "Pygments (==2.0.0)", "pytest (==4.6.0)", "debugpy (==1.3.0)", "pytest (==4.6.0)", "pytest-cov (==2.8.1)", "colorama (==0.4.1)", "six (==1.11.0)", "scikit-build (==0.11.1)", "pybind11 (==2.7.1)", "ninja (==1.10.2)", "codecov (==2.0.15)", "cmake (==3.21.2)"] +tests = ["codecov", "pytest", "pytest", "pytest", "pytest-cov", "typing"] +tests-binary = ["cmake", "cmake", "ninja", "ninja", "pybind11", "pybind11", "scikit-build", "scikit-build"] +tests-binary-strict = ["cmake (==3.21.2)", "cmake (==3.25.0)", "ninja (==1.10.2)", "ninja (==1.11.1)", "pybind11 (==2.10.3)", "pybind11 (==2.7.1)", "scikit-build (==0.11.1)", "scikit-build (==0.16.1)"] +tests-strict = ["codecov (==2.0.15)", "pytest (==4.6.0)", "pytest (==4.6.0)", "pytest (==6.2.5)", "pytest-cov (==3.0.0)", "typing (==3.7.4)"] [[package]] name = "zipp" -version = "3.6.0" +version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +files = [ + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, +] [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [extras] -docs = ["sphinx", "sphinx-rtd-theme", "sphinx-copybutton", "myst-parser", "sphinx-autobuild"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-reredirects"] +s3 = ["fs-s3fs"] +testing = ["pytest", "pytest-durations"] [metadata] -lock-version = "1.1" -python-versions = "<3.11,>=3.7.1" -content-hash = "8e35d291790e0b4c417c78e70bcec44153d3a0e74b2d7f37e0184bbe82211a44" - -[metadata.files] -alabaster = [ - {file = "alabaster-0.7.12-py2.py3-none-any.whl", hash = "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359"}, - {file = "alabaster-0.7.12.tar.gz", hash = "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02"}, -] -arrow = [ - {file = "arrow-1.2.1-py3-none-any.whl", hash = "sha256:6b2914ef3997d1fd7b37a71ce9dd61a6e329d09e1c7b44f4d3099ca4a5c0933e"}, - {file = "arrow-1.2.1.tar.gz", hash = "sha256:c2dde3c382d9f7e6922ce636bf0b318a7a853df40ecb383b29192e6c5cc82840"}, -] -atomicwrites = [ - {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, - {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, -] -attrs = [ - {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, - {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, -] -babel = [ - {file = "Babel-2.9.1-py2.py3-none-any.whl", hash = "sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9"}, - {file = "Babel-2.9.1.tar.gz", hash = "sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0"}, -] -backoff = [ - {file = "backoff-1.8.0-py2.py3-none-any.whl", hash = "sha256:d340bb6f36d025c04214b8925112d8456970e5f28dda46e4f1133bf5c622cb0a"}, - {file = "backoff-1.8.0.tar.gz", hash = "sha256:c7187f15339e775aec926dc6e5e42f8a3ad7d3c2b9a6ecae7b535000f70cd838"}, -] -binaryornot = [ - {file = "binaryornot-0.4.4-py2.py3-none-any.whl", hash = "sha256:b8b71173c917bddcd2c16070412e369c3ed7f0528926f70cac18a6c97fd563e4"}, - {file = "binaryornot-0.4.4.tar.gz", hash = "sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061"}, -] -black = [ - {file = "black-22.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f586c26118bc6e714ec58c09df0157fe2d9ee195c764f630eb0d8e7ccce72e69"}, - {file = "black-22.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b270a168d69edb8b7ed32c193ef10fd27844e5c60852039599f9184460ce0807"}, - {file = "black-22.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6797f58943fceb1c461fb572edbe828d811e719c24e03375fd25170ada53825e"}, - {file = "black-22.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c85928b9d5f83b23cee7d0efcb310172412fbf7cb9d9ce963bd67fd141781def"}, - {file = "black-22.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6fe02afde060bbeef044af7996f335fbe90b039ccf3f5eb8f16df8b20f77666"}, - {file = "black-22.6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cfaf3895a9634e882bf9d2363fed5af8888802d670f58b279b0bece00e9a872d"}, - {file = "black-22.6.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94783f636bca89f11eb5d50437e8e17fbc6a929a628d82304c80fa9cd945f256"}, - {file = "black-22.6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2ea29072e954a4d55a2ff58971b83365eba5d3d357352a07a7a4df0d95f51c78"}, - {file = "black-22.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e439798f819d49ba1c0bd9664427a05aab79bfba777a6db94fd4e56fae0cb849"}, - {file = "black-22.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:187d96c5e713f441a5829e77120c269b6514418f4513a390b0499b0987f2ff1c"}, - {file = "black-22.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:074458dc2f6e0d3dab7928d4417bb6957bb834434516f21514138437accdbe90"}, - {file = "black-22.6.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a218d7e5856f91d20f04e931b6f16d15356db1c846ee55f01bac297a705ca24f"}, - {file = "black-22.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:568ac3c465b1c8b34b61cd7a4e349e93f91abf0f9371eda1cf87194663ab684e"}, - {file = "black-22.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c1734ab264b8f7929cef8ae5f900b85d579e6cbfde09d7387da8f04771b51c6"}, - {file = "black-22.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9a3ac16efe9ec7d7381ddebcc022119794872abce99475345c5a61aa18c45ad"}, - {file = "black-22.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b9fd45787ba8aa3f5e0a0a98920c1012c884622c6c920dbe98dbd05bc7c70fbf"}, - {file = "black-22.6.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7ba9be198ecca5031cd78745780d65a3f75a34b2ff9be5837045dce55db83d1c"}, - {file = "black-22.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a3db5b6409b96d9bd543323b23ef32a1a2b06416d525d27e0f67e74f1446c8f2"}, - {file = "black-22.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:560558527e52ce8afba936fcce93a7411ab40c7d5fe8c2463e279e843c0328ee"}, - {file = "black-22.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b154e6bbde1e79ea3260c4b40c0b7b3109ffcdf7bc4ebf8859169a6af72cd70b"}, - {file = "black-22.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:4af5bc0e1f96be5ae9bd7aaec219c901a94d6caa2484c21983d043371c733fc4"}, - {file = "black-22.6.0-py3-none-any.whl", hash = "sha256:ac609cf8ef5e7115ddd07d85d988d074ed00e10fbc3445aee393e70164a2219c"}, - {file = "black-22.6.0.tar.gz", hash = "sha256:6c6d39e28aed379aec40da1c65434c77d75e65bb59a1e1c283de545fb4e7c6c9"}, -] -certifi = [ - {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, - {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, -] -cffi = [ - {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, - {file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"}, - {file = "cffi-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14"}, - {file = "cffi-1.15.0-cp27-cp27m-win32.whl", hash = "sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474"}, - {file = "cffi-1.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6"}, - {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27"}, - {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023"}, - {file = "cffi-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2"}, - {file = "cffi-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382"}, - {file = "cffi-1.15.0-cp310-cp310-win32.whl", hash = "sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55"}, - {file = "cffi-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0"}, - {file = "cffi-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605"}, - {file = "cffi-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e"}, - {file = "cffi-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc"}, - {file = "cffi-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7"}, - {file = "cffi-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66"}, - {file = "cffi-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029"}, - {file = "cffi-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6"}, - {file = "cffi-1.15.0-cp38-cp38-win32.whl", hash = "sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c"}, - {file = "cffi-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443"}, - {file = "cffi-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a"}, - {file = "cffi-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8"}, - {file = "cffi-1.15.0-cp39-cp39-win32.whl", hash = "sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a"}, - {file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"}, - {file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"}, -] -chardet = [ - {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"}, - {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"}, -] -charset-normalizer = [ - {file = "charset-normalizer-2.0.10.tar.gz", hash = "sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd"}, - {file = "charset_normalizer-2.0.10-py3-none-any.whl", hash = "sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455"}, -] -ciso8601 = [ - {file = "ciso8601-2.2.0.tar.gz", hash = "sha256:14ad817ed31a698372d42afa81b0173d71cd1d0b48b7499a2da2a01dcc8695e6"}, -] -click = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] -colorama = [ - {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, - {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, -] -cookiecutter = [ - {file = "cookiecutter-2.1.1-py2.py3-none-any.whl", hash = "sha256:9f3ab027cec4f70916e28f03470bdb41e637a3ad354b4d65c765d93aad160022"}, - {file = "cookiecutter-2.1.1.tar.gz", hash = "sha256:f3982be8d9c53dac1261864013fdec7f83afd2e42ede6f6dd069c5e149c540d5"}, -] -coverage = [ - {file = "coverage-6.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7b4da9bafad21ea45a714d3ea6f3e1679099e420c8741c74905b92ee9bfa7cc"}, - {file = "coverage-6.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fde17bc42e0716c94bf19d92e4c9f5a00c5feb401f5bc01101fdf2a8b7cacf60"}, - {file = "coverage-6.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdbb0d89923c80dbd435b9cf8bba0ff55585a3cdb28cbec65f376c041472c60d"}, - {file = "coverage-6.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67f9346aeebea54e845d29b487eb38ec95f2ecf3558a3cffb26ee3f0dcc3e760"}, - {file = "coverage-6.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42c499c14efd858b98c4e03595bf914089b98400d30789511577aa44607a1b74"}, - {file = "coverage-6.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c35cca192ba700979d20ac43024a82b9b32a60da2f983bec6c0f5b84aead635c"}, - {file = "coverage-6.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9cc4f107009bca5a81caef2fca843dbec4215c05e917a59dec0c8db5cff1d2aa"}, - {file = "coverage-6.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f444627b3664b80d078c05fe6a850dd711beeb90d26731f11d492dcbadb6973"}, - {file = "coverage-6.4.4-cp310-cp310-win32.whl", hash = "sha256:66e6df3ac4659a435677d8cd40e8eb1ac7219345d27c41145991ee9bf4b806a0"}, - {file = "coverage-6.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:35ef1f8d8a7a275aa7410d2f2c60fa6443f4a64fae9be671ec0696a68525b875"}, - {file = "coverage-6.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c1328d0c2f194ffda30a45f11058c02410e679456276bfa0bbe0b0ee87225fac"}, - {file = "coverage-6.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61b993f3998ee384935ee423c3d40894e93277f12482f6e777642a0141f55782"}, - {file = "coverage-6.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d5dd4b8e9cd0deb60e6fcc7b0647cbc1da6c33b9e786f9c79721fd303994832f"}, - {file = "coverage-6.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7026f5afe0d1a933685d8f2169d7c2d2e624f6255fb584ca99ccca8c0e966fd7"}, - {file = "coverage-6.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9c7b9b498eb0c0d48b4c2abc0e10c2d78912203f972e0e63e3c9dc21f15abdaa"}, - {file = "coverage-6.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ee2b2fb6eb4ace35805f434e0f6409444e1466a47f620d1d5763a22600f0f892"}, - {file = "coverage-6.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ab066f5ab67059d1f1000b5e1aa8bbd75b6ed1fc0014559aea41a9eb66fc2ce0"}, - {file = "coverage-6.4.4-cp311-cp311-win32.whl", hash = "sha256:9d6e1f3185cbfd3d91ac77ea065d85d5215d3dfa45b191d14ddfcd952fa53796"}, - {file = "coverage-6.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e3d3c4cc38b2882f9a15bafd30aec079582b819bec1b8afdbde8f7797008108a"}, - {file = "coverage-6.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a095aa0a996ea08b10580908e88fbaf81ecf798e923bbe64fb98d1807db3d68a"}, - {file = "coverage-6.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef6f44409ab02e202b31a05dd6666797f9de2aa2b4b3534e9d450e42dea5e817"}, - {file = "coverage-6.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b7101938584d67e6f45f0015b60e24a95bf8dea19836b1709a80342e01b472f"}, - {file = "coverage-6.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a32ec68d721c3d714d9b105c7acf8e0f8a4f4734c811eda75ff3718570b5e3"}, - {file = "coverage-6.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6a864733b22d3081749450466ac80698fe39c91cb6849b2ef8752fd7482011f3"}, - {file = "coverage-6.4.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:08002f9251f51afdcc5e3adf5d5d66bb490ae893d9e21359b085f0e03390a820"}, - {file = "coverage-6.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a3b2752de32c455f2521a51bd3ffb53c5b3ae92736afde67ce83477f5c1dd928"}, - {file = "coverage-6.4.4-cp37-cp37m-win32.whl", hash = "sha256:f855b39e4f75abd0dfbcf74a82e84ae3fc260d523fcb3532786bcbbcb158322c"}, - {file = "coverage-6.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ee6ae6bbcac0786807295e9687169fba80cb0617852b2fa118a99667e8e6815d"}, - {file = "coverage-6.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:564cd0f5b5470094df06fab676c6d77547abfdcb09b6c29c8a97c41ad03b103c"}, - {file = "coverage-6.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cbbb0e4cd8ddcd5ef47641cfac97d8473ab6b132dd9a46bacb18872828031685"}, - {file = "coverage-6.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6113e4df2fa73b80f77663445be6d567913fb3b82a86ceb64e44ae0e4b695de1"}, - {file = "coverage-6.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d032bfc562a52318ae05047a6eb801ff31ccee172dc0d2504614e911d8fa83e"}, - {file = "coverage-6.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e431e305a1f3126477abe9a184624a85308da8edf8486a863601d58419d26ffa"}, - {file = "coverage-6.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cf2afe83a53f77aec067033199797832617890e15bed42f4a1a93ea24794ae3e"}, - {file = "coverage-6.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:783bc7c4ee524039ca13b6d9b4186a67f8e63d91342c713e88c1865a38d0892a"}, - {file = "coverage-6.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ff934ced84054b9018665ca3967fc48e1ac99e811f6cc99ea65978e1d384454b"}, - {file = "coverage-6.4.4-cp38-cp38-win32.whl", hash = "sha256:e1fabd473566fce2cf18ea41171d92814e4ef1495e04471786cbc943b89a3781"}, - {file = "coverage-6.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:4179502f210ebed3ccfe2f78bf8e2d59e50b297b598b100d6c6e3341053066a2"}, - {file = "coverage-6.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:98c0b9e9b572893cdb0a00e66cf961a238f8d870d4e1dc8e679eb8bdc2eb1b86"}, - {file = "coverage-6.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc600f6ec19b273da1d85817eda339fb46ce9eef3e89f220055d8696e0a06908"}, - {file = "coverage-6.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a98d6bf6d4ca5c07a600c7b4e0c5350cd483c85c736c522b786be90ea5bac4f"}, - {file = "coverage-6.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01778769097dbd705a24e221f42be885c544bb91251747a8a3efdec6eb4788f2"}, - {file = "coverage-6.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfa0b97eb904255e2ab24166071b27408f1f69c8fbda58e9c0972804851e0558"}, - {file = "coverage-6.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:fcbe3d9a53e013f8ab88734d7e517eb2cd06b7e689bedf22c0eb68db5e4a0a19"}, - {file = "coverage-6.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:15e38d853ee224e92ccc9a851457fb1e1f12d7a5df5ae44544ce7863691c7a0d"}, - {file = "coverage-6.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6913dddee2deff8ab2512639c5168c3e80b3ebb0f818fed22048ee46f735351a"}, - {file = "coverage-6.4.4-cp39-cp39-win32.whl", hash = "sha256:354df19fefd03b9a13132fa6643527ef7905712109d9c1c1903f2133d3a4e145"}, - {file = "coverage-6.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:1238b08f3576201ebf41f7c20bf59baa0d05da941b123c6656e42cdb668e9827"}, - {file = "coverage-6.4.4-pp36.pp37.pp38-none-any.whl", hash = "sha256:f67cf9f406cf0d2f08a3515ce2db5b82625a7257f88aad87904674def6ddaec1"}, - {file = "coverage-6.4.4.tar.gz", hash = "sha256:e16c45b726acb780e1e6f88b286d3c10b3914ab03438f32117c4aa52d7f30d58"}, -] -cryptography = [ - {file = "cryptography-37.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:549153378611c0cca1042f20fd9c5030d37a72f634c9326e225c9f666d472884"}, - {file = "cryptography-37.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:a958c52505c8adf0d3822703078580d2c0456dd1d27fabfb6f76fe63d2971cd6"}, - {file = "cryptography-37.0.4-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f721d1885ecae9078c3f6bbe8a88bc0786b6e749bf32ccec1ef2b18929a05046"}, - {file = "cryptography-37.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:3d41b965b3380f10e4611dbae366f6dc3cefc7c9ac4e8842a806b9672ae9add5"}, - {file = "cryptography-37.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80f49023dd13ba35f7c34072fa17f604d2f19bf0989f292cedf7ab5770b87a0b"}, - {file = "cryptography-37.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2dcb0b3b63afb6df7fd94ec6fbddac81b5492513f7b0436210d390c14d46ee8"}, - {file = "cryptography-37.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:b7f8dd0d4c1f21759695c05a5ec8536c12f31611541f8904083f3dc582604280"}, - {file = "cryptography-37.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:30788e070800fec9bbcf9faa71ea6d8068f5136f60029759fd8c3efec3c9dcb3"}, - {file = "cryptography-37.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:190f82f3e87033821828f60787cfa42bff98404483577b591429ed99bed39d59"}, - {file = "cryptography-37.0.4-cp36-abi3-win32.whl", hash = "sha256:b62439d7cd1222f3da897e9a9fe53bbf5c104fff4d60893ad1355d4c14a24157"}, - {file = "cryptography-37.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:f7a6de3e98771e183645181b3627e2563dcde3ce94a9e42a3f427d2255190327"}, - {file = "cryptography-37.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc95ed67b6741b2607298f9ea4932ff157e570ef456ef7ff0ef4884a134cc4b"}, - {file = "cryptography-37.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:f8c0a6e9e1dd3eb0414ba320f85da6b0dcbd543126e30fcc546e7372a7fbf3b9"}, - {file = "cryptography-37.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:e007f052ed10cc316df59bc90fbb7ff7950d7e2919c9757fd42a2b8ecf8a5f67"}, - {file = "cryptography-37.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bc997818309f56c0038a33b8da5c0bfbb3f1f067f315f9abd6fc07ad359398d"}, - {file = "cryptography-37.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:d204833f3c8a33bbe11eda63a54b1aad7aa7456ed769a982f21ec599ba5fa282"}, - {file = "cryptography-37.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:75976c217f10d48a8b5a8de3d70c454c249e4b91851f6838a4e48b8f41eb71aa"}, - {file = "cryptography-37.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7099a8d55cd49b737ffc99c17de504f2257e3787e02abe6d1a6d136574873441"}, - {file = "cryptography-37.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2be53f9f5505673eeda5f2736bea736c40f051a739bfae2f92d18aed1eb54596"}, - {file = "cryptography-37.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:91ce48d35f4e3d3f1d83e29ef4a9267246e6a3be51864a5b7d2247d5086fa99a"}, - {file = "cryptography-37.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4c590ec31550a724ef893c50f9a97a0c14e9c851c85621c5650d699a7b88f7ab"}, - {file = "cryptography-37.0.4.tar.gz", hash = "sha256:63f9c17c0e2474ccbebc9302ce2f07b55b3b3fcb211ded18a42d5764f5c10a82"}, -] -darglint = [ - {file = "darglint-1.8.1-py3-none-any.whl", hash = "sha256:5ae11c259c17b0701618a20c3da343a3eb98b3bc4b5a83d31cdd94f5ebdced8d"}, - {file = "darglint-1.8.1.tar.gz", hash = "sha256:080d5106df149b199822e7ee7deb9c012b49891538f14a11be681044f0bb20da"}, -] -decorator = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] -docutils = [ - {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"}, - {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"}, -] -flake8 = [ - {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"}, - {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, -] -flake8-annotations = [ - {file = "flake8-annotations-2.9.1.tar.gz", hash = "sha256:11f09efb99ae63c8f9d6b492b75fe147fbc323179fddfe00b2e56eefeca42f57"}, - {file = "flake8_annotations-2.9.1-py3-none-any.whl", hash = "sha256:a4385158a7a9fc8af1d8820a2f4c8d03387997006a83f5f8bfe5bc6085bdf88a"}, -] -flake8-docstrings = [ - {file = "flake8-docstrings-1.6.0.tar.gz", hash = "sha256:9fe7c6a306064af8e62a055c2f61e9eb1da55f84bb39caef2b84ce53708ac34b"}, - {file = "flake8_docstrings-1.6.0-py2.py3-none-any.whl", hash = "sha256:99cac583d6c7e32dd28bbfbef120a7c0d1b6dde4adb5a9fd441c4227a6534bde"}, -] -freezegun = [ - {file = "freezegun-1.2.2-py3-none-any.whl", hash = "sha256:ea1b963b993cb9ea195adbd893a48d573fda951b0da64f60883d7e988b606c9f"}, - {file = "freezegun-1.2.2.tar.gz", hash = "sha256:cd22d1ba06941384410cd967d8a99d5ae2442f57dfafeff2fda5de8dc5c05446"}, -] -greenlet = [ - {file = "greenlet-1.1.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:58df5c2a0e293bf665a51f8a100d3e9956febfbf1d9aaf8c0677cf70218910c6"}, - {file = "greenlet-1.1.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:aec52725173bd3a7b56fe91bc56eccb26fbdff1386ef123abb63c84c5b43b63a"}, - {file = "greenlet-1.1.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:833e1551925ed51e6b44c800e71e77dacd7e49181fdc9ac9a0bf3714d515785d"}, - {file = "greenlet-1.1.2-cp27-cp27m-win32.whl", hash = "sha256:aa5b467f15e78b82257319aebc78dd2915e4c1436c3c0d1ad6f53e47ba6e2713"}, - {file = "greenlet-1.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:40b951f601af999a8bf2ce8c71e8aaa4e8c6f78ff8afae7b808aae2dc50d4c40"}, - {file = "greenlet-1.1.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:95e69877983ea39b7303570fa6760f81a3eec23d0e3ab2021b7144b94d06202d"}, - {file = "greenlet-1.1.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:356b3576ad078c89a6107caa9c50cc14e98e3a6c4874a37c3e0273e4baf33de8"}, - {file = "greenlet-1.1.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:8639cadfda96737427330a094476d4c7a56ac03de7265622fcf4cfe57c8ae18d"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e5306482182170ade15c4b0d8386ded995a07d7cc2ca8f27958d34d6736497"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6a36bb9474218c7a5b27ae476035497a6990e21d04c279884eb10d9b290f1b1"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abb7a75ed8b968f3061327c433a0fbd17b729947b400747c334a9c29a9af6c58"}, - {file = "greenlet-1.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b336501a05e13b616ef81ce329c0e09ac5ed8c732d9ba7e3e983fcc1a9e86965"}, - {file = "greenlet-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:14d4f3cd4e8b524ae9b8aa567858beed70c392fdec26dbdb0a8a418392e71708"}, - {file = "greenlet-1.1.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:17ff94e7a83aa8671a25bf5b59326ec26da379ace2ebc4411d690d80a7fbcf23"}, - {file = "greenlet-1.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9f3cba480d3deb69f6ee2c1825060177a22c7826431458c697df88e6aeb3caee"}, - {file = "greenlet-1.1.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:fa877ca7f6b48054f847b61d6fa7bed5cebb663ebc55e018fda12db09dcc664c"}, - {file = "greenlet-1.1.2-cp35-cp35m-win32.whl", hash = "sha256:7cbd7574ce8e138bda9df4efc6bf2ab8572c9aff640d8ecfece1b006b68da963"}, - {file = "greenlet-1.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:903bbd302a2378f984aef528f76d4c9b1748f318fe1294961c072bdc7f2ffa3e"}, - {file = "greenlet-1.1.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:049fe7579230e44daef03a259faa24511d10ebfa44f69411d99e6a184fe68073"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:dd0b1e9e891f69e7675ba5c92e28b90eaa045f6ab134ffe70b52e948aa175b3c"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:7418b6bfc7fe3331541b84bb2141c9baf1ec7132a7ecd9f375912eca810e714e"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9d29ca8a77117315101425ec7ec2a47a22ccf59f5593378fc4077ac5b754fce"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21915eb821a6b3d9d8eefdaf57d6c345b970ad722f856cd71739493ce003ad08"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eff9d20417ff9dcb0d25e2defc2574d10b491bf2e693b4e491914738b7908168"}, - {file = "greenlet-1.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b8c008de9d0daba7b6666aa5bbfdc23dcd78cafc33997c9b7741ff6353bafb7f"}, - {file = "greenlet-1.1.2-cp36-cp36m-win32.whl", hash = "sha256:32ca72bbc673adbcfecb935bb3fb1b74e663d10a4b241aaa2f5a75fe1d1f90aa"}, - {file = "greenlet-1.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f0214eb2a23b85528310dad848ad2ac58e735612929c8072f6093f3585fd342d"}, - {file = "greenlet-1.1.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:b92e29e58bef6d9cfd340c72b04d74c4b4e9f70c9fa7c78b674d1fec18896dc4"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:fdcec0b8399108577ec290f55551d926d9a1fa6cad45882093a7a07ac5ec147b"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:93f81b134a165cc17123626ab8da2e30c0455441d4ab5576eed73a64c025b25c"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e12bdc622676ce47ae9abbf455c189e442afdde8818d9da983085df6312e7a1"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c790abda465726cfb8bb08bd4ca9a5d0a7bd77c7ac1ca1b839ad823b948ea28"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f276df9830dba7a333544bd41070e8175762a7ac20350786b322b714b0e654f5"}, - {file = "greenlet-1.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c5d5b35f789a030ebb95bff352f1d27a93d81069f2adb3182d99882e095cefe"}, - {file = "greenlet-1.1.2-cp37-cp37m-win32.whl", hash = "sha256:64e6175c2e53195278d7388c454e0b30997573f3f4bd63697f88d855f7a6a1fc"}, - {file = "greenlet-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b11548073a2213d950c3f671aa88e6f83cda6e2fb97a8b6317b1b5b33d850e06"}, - {file = "greenlet-1.1.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9633b3034d3d901f0a46b7939f8c4d64427dfba6bbc5a36b1a67364cf148a1b0"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:eb6ea6da4c787111adf40f697b4e58732ee0942b5d3bd8f435277643329ba627"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f3acda1924472472ddd60c29e5b9db0cec629fbe3c5c5accb74d6d6d14773478"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e859fcb4cbe93504ea18008d1df98dee4f7766db66c435e4882ab35cf70cac43"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e44c8afdbe5467e4f7b5851be223be68adb4272f44696ee71fe46b7036a711"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec8c433b3ab0419100bd45b47c9c8551248a5aee30ca5e9d399a0b57ac04651b"}, - {file = "greenlet-1.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2bde6792f313f4e918caabc46532aa64aa27a0db05d75b20edfc5c6f46479de2"}, - {file = "greenlet-1.1.2-cp38-cp38-win32.whl", hash = "sha256:288c6a76705dc54fba69fbcb59904ae4ad768b4c768839b8ca5fdadec6dd8cfd"}, - {file = "greenlet-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:8d2f1fb53a421b410751887eb4ff21386d119ef9cde3797bf5e7ed49fb51a3b3"}, - {file = "greenlet-1.1.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:166eac03e48784a6a6e0e5f041cfebb1ab400b394db188c48b3a84737f505b67"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:572e1787d1460da79590bf44304abbc0a2da944ea64ec549188fa84d89bba7ab"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:be5f425ff1f5f4b3c1e33ad64ab994eed12fc284a6ea71c5243fd564502ecbe5"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1692f7d6bc45e3200844be0dba153612103db241691088626a33ff1f24a0d88"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7227b47e73dedaa513cdebb98469705ef0d66eb5a1250144468e9c3097d6b59b"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ff61ff178250f9bb3cd89752df0f1dd0e27316a8bd1465351652b1b4a4cdfd3"}, - {file = "greenlet-1.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0051c6f1f27cb756ffc0ffbac7d2cd48cb0362ac1736871399a739b2885134d3"}, - {file = "greenlet-1.1.2-cp39-cp39-win32.whl", hash = "sha256:f70a9e237bb792c7cc7e44c531fd48f5897961701cdaa06cf22fc14965c496cf"}, - {file = "greenlet-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:013d61294b6cd8fe3242932c1c5e36e5d1db2c8afb58606c5a67efce62c1f5fd"}, - {file = "greenlet-1.1.2.tar.gz", hash = "sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a"}, -] -idna = [ - {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, - {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, -] -imagesize = [ - {file = "imagesize-1.3.0-py2.py3-none-any.whl", hash = "sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c"}, - {file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"}, -] -importlib-metadata = [ - {file = "importlib_metadata-4.12.0-py3-none-any.whl", hash = "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"}, - {file = "importlib_metadata-4.12.0.tar.gz", hash = "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670"}, -] -inflection = [ - {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, - {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, -] -iniconfig = [ - {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, - {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, -] -jinja2 = [ - {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, - {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"}, -] -jinja2-time = [ - {file = "jinja2-time-0.2.0.tar.gz", hash = "sha256:d14eaa4d315e7688daa4969f616f226614350c48730bfa1692d2caebd8c90d40"}, - {file = "jinja2_time-0.2.0-py2.py3-none-any.whl", hash = "sha256:d3eab6605e3ec8b7a0863df09cc1d23714908fa61aa6986a845c20ba488b4efa"}, -] -joblib = [ - {file = "joblib-1.1.0-py2.py3-none-any.whl", hash = "sha256:f21f109b3c7ff9d95f8387f752d0d9c34a02aa2f7060c2135f465da0e5160ff6"}, - {file = "joblib-1.1.0.tar.gz", hash = "sha256:4158fcecd13733f8be669be0683b96ebdbbd38d23559f54dca7205aea1bf1e35"}, -] -jsonpath-ng = [ - {file = "jsonpath-ng-1.5.3.tar.gz", hash = "sha256:a273b182a82c1256daab86a313b937059261b5c5f8c4fa3fc38b882b344dd567"}, - {file = "jsonpath_ng-1.5.3-py2-none-any.whl", hash = "sha256:f75b95dbecb8a0f3b86fd2ead21c2b022c3f5770957492b9b6196ecccfeb10aa"}, - {file = "jsonpath_ng-1.5.3-py3-none-any.whl", hash = "sha256:292a93569d74029ba75ac2dc3d3630fc0e17b2df26119a165fa1d498ca47bf65"}, -] -jsonschema = [ - {file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"}, - {file = "jsonschema-3.2.0.tar.gz", hash = "sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"}, -] -livereload = [ - {file = "livereload-2.6.3.tar.gz", hash = "sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869"}, -] -markdown-it-py = [ - {file = "markdown-it-py-1.1.0.tar.gz", hash = "sha256:36be6bb3ad987bfdb839f5ba78ddf094552ca38ccbd784ae4f74a4e1419fc6e3"}, - {file = "markdown_it_py-1.1.0-py3-none-any.whl", hash = "sha256:98080fc0bc34c4f2bcf0846a096a9429acbd9d5d8e67ed34026c03c61c464389"}, -] -markupsafe = [ - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, - {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, -] -mccabe = [ - {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, - {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, -] -mdit-py-plugins = [ - {file = "mdit-py-plugins-0.3.0.tar.gz", hash = "sha256:ecc24f51eeec6ab7eecc2f9724e8272c2fb191c2e93cf98109120c2cace69750"}, - {file = "mdit_py_plugins-0.3.0-py3-none-any.whl", hash = "sha256:b1279701cee2dbf50e188d3da5f51fee8d78d038cdf99be57c6b9d1aa93b4073"}, -] -memoization = [ - {file = "memoization-0.4.0.tar.gz", hash = "sha256:fde5e7cd060ef45b135e0310cfec17b2029dc472ccb5bbbbb42a503d4538a135"}, -] -mypy = [ - {file = "mypy-0.971-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f2899a3cbd394da157194f913a931edfd4be5f274a88041c9dc2d9cdcb1c315c"}, - {file = "mypy-0.971-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98e02d56ebe93981c41211c05adb630d1d26c14195d04d95e49cd97dbc046dc5"}, - {file = "mypy-0.971-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:19830b7dba7d5356d3e26e2427a2ec91c994cd92d983142cbd025ebe81d69cf3"}, - {file = "mypy-0.971-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:02ef476f6dcb86e6f502ae39a16b93285fef97e7f1ff22932b657d1ef1f28655"}, - {file = "mypy-0.971-cp310-cp310-win_amd64.whl", hash = "sha256:25c5750ba5609a0c7550b73a33deb314ecfb559c350bb050b655505e8aed4103"}, - {file = "mypy-0.971-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d3348e7eb2eea2472db611486846742d5d52d1290576de99d59edeb7cd4a42ca"}, - {file = "mypy-0.971-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3fa7a477b9900be9b7dd4bab30a12759e5abe9586574ceb944bc29cddf8f0417"}, - {file = "mypy-0.971-cp36-cp36m-win_amd64.whl", hash = "sha256:2ad53cf9c3adc43cf3bea0a7d01a2f2e86db9fe7596dfecb4496a5dda63cbb09"}, - {file = "mypy-0.971-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:855048b6feb6dfe09d3353466004490b1872887150c5bb5caad7838b57328cc8"}, - {file = "mypy-0.971-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:23488a14a83bca6e54402c2e6435467a4138785df93ec85aeff64c6170077fb0"}, - {file = "mypy-0.971-cp37-cp37m-win_amd64.whl", hash = "sha256:4b21e5b1a70dfb972490035128f305c39bc4bc253f34e96a4adf9127cf943eb2"}, - {file = "mypy-0.971-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9796a2ba7b4b538649caa5cecd398d873f4022ed2333ffde58eaf604c4d2cb27"}, - {file = "mypy-0.971-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a361d92635ad4ada1b1b2d3630fc2f53f2127d51cf2def9db83cba32e47c856"}, - {file = "mypy-0.971-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b793b899f7cf563b1e7044a5c97361196b938e92f0a4343a5d27966a53d2ec71"}, - {file = "mypy-0.971-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d1ea5d12c8e2d266b5fb8c7a5d2e9c0219fedfeb493b7ed60cd350322384ac27"}, - {file = "mypy-0.971-cp38-cp38-win_amd64.whl", hash = "sha256:23c7ff43fff4b0df93a186581885c8512bc50fc4d4910e0f838e35d6bb6b5e58"}, - {file = "mypy-0.971-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1f7656b69974a6933e987ee8ffb951d836272d6c0f81d727f1d0e2696074d9e6"}, - {file = "mypy-0.971-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2022bfadb7a5c2ef410d6a7c9763188afdb7f3533f22a0a32be10d571ee4bbe"}, - {file = "mypy-0.971-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef943c72a786b0f8d90fd76e9b39ce81fb7171172daf84bf43eaf937e9f220a9"}, - {file = "mypy-0.971-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d744f72eb39f69312bc6c2abf8ff6656973120e2eb3f3ec4f758ed47e414a4bf"}, - {file = "mypy-0.971-cp39-cp39-win_amd64.whl", hash = "sha256:77a514ea15d3007d33a9e2157b0ba9c267496acf12a7f2b9b9f8446337aac5b0"}, - {file = "mypy-0.971-py3-none-any.whl", hash = "sha256:0d054ef16b071149917085f51f89555a576e2618d5d9dd70bd6eea6410af3ac9"}, - {file = "mypy-0.971.tar.gz", hash = "sha256:40b0f21484238269ae6a57200c807d80debc6459d444c0489a102d7c6a75fa56"}, -] -mypy-extensions = [ - {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, - {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, -] -myst-parser = [ - {file = "myst-parser-0.18.0.tar.gz", hash = "sha256:739a4d96773a8e55a2cacd3941ce46a446ee23dcd6b37e06f73f551ad7821d86"}, - {file = "myst_parser-0.18.0-py3-none-any.whl", hash = "sha256:4965e51918837c13bf1c6f6fe2c6bddddf193148360fbdaefe743a4981358f6a"}, -] -numpy = [ - {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8737609c3bbdd48e380d463134a35ffad3b22dc56295eff6f79fd85bd0eeeb25"}, - {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fdffbfb6832cd0b300995a2b08b8f6fa9f6e856d562800fea9182316d99c4e8e"}, - {file = "numpy-1.21.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3820724272f9913b597ccd13a467cc492a0da6b05df26ea09e78b171a0bb9da6"}, - {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f17e562de9edf691a42ddb1eb4a5541c20dd3f9e65b09ded2beb0799c0cf29bb"}, - {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f30427731561ce75d7048ac254dbe47a2ba576229250fb60f0fb74db96501a1"}, - {file = "numpy-1.21.6-cp310-cp310-win32.whl", hash = "sha256:d4bf4d43077db55589ffc9009c0ba0a94fa4908b9586d6ccce2e0b164c86303c"}, - {file = "numpy-1.21.6-cp310-cp310-win_amd64.whl", hash = "sha256:d136337ae3cc69aa5e447e78d8e1514be8c3ec9b54264e680cf0b4bd9011574f"}, - {file = "numpy-1.21.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6aaf96c7f8cebc220cdfc03f1d5a31952f027dda050e5a703a0d1c396075e3e7"}, - {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:67c261d6c0a9981820c3a149d255a76918278a6b03b6a036800359aba1256d46"}, - {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a6be4cb0ef3b8c9250c19cc122267263093eee7edd4e3fa75395dfda8c17a8e2"}, - {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c4068a8c44014b2d55f3c3f574c376b2494ca9cc73d2f1bd692382b6dffe3db"}, - {file = "numpy-1.21.6-cp37-cp37m-win32.whl", hash = "sha256:7c7e5fa88d9ff656e067876e4736379cc962d185d5cd808014a8a928d529ef4e"}, - {file = "numpy-1.21.6-cp37-cp37m-win_amd64.whl", hash = "sha256:bcb238c9c96c00d3085b264e5c1a1207672577b93fa666c3b14a45240b14123a"}, - {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:82691fda7c3f77c90e62da69ae60b5ac08e87e775b09813559f8901a88266552"}, - {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:643843bcc1c50526b3a71cd2ee561cf0d8773f062c8cbaf9ffac9fdf573f83ab"}, - {file = "numpy-1.21.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:357768c2e4451ac241465157a3e929b265dfac85d9214074985b1786244f2ef3"}, - {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f411b2c3f3d76bba0865b35a425157c5dcf54937f82bbeb3d3c180789dd66a6"}, - {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4aa48afdce4660b0076a00d80afa54e8a97cd49f457d68a4342d188a09451c1a"}, - {file = "numpy-1.21.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a96eef20f639e6a97d23e57dd0c1b1069a7b4fd7027482a4c5c451cd7732f4"}, - {file = "numpy-1.21.6-cp38-cp38-win32.whl", hash = "sha256:5c3c8def4230e1b959671eb959083661b4a0d2e9af93ee339c7dada6759a9470"}, - {file = "numpy-1.21.6-cp38-cp38-win_amd64.whl", hash = "sha256:bf2ec4b75d0e9356edea834d1de42b31fe11f726a81dfb2c2112bc1eaa508fcf"}, - {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4391bd07606be175aafd267ef9bea87cf1b8210c787666ce82073b05f202add1"}, - {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67f21981ba2f9d7ba9ade60c9e8cbaa8cf8e9ae51673934480e45cf55e953673"}, - {file = "numpy-1.21.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee5ec40fdd06d62fe5d4084bef4fd50fd4bb6bfd2bf519365f569dc470163ab0"}, - {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1dbe1c91269f880e364526649a52eff93ac30035507ae980d2fed33aaee633ac"}, - {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d9caa9d5e682102453d96a0ee10c7241b72859b01a941a397fd965f23b3e016b"}, - {file = "numpy-1.21.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58459d3bad03343ac4b1b42ed14d571b8743dc80ccbf27444f266729df1d6f5b"}, - {file = "numpy-1.21.6-cp39-cp39-win32.whl", hash = "sha256:7f5ae4f304257569ef3b948810816bc87c9146e8c446053539947eedeaa32786"}, - {file = "numpy-1.21.6-cp39-cp39-win_amd64.whl", hash = "sha256:e31f0bb5928b793169b87e3d1e070f2342b22d5245c755e2b81caa29756246c3"}, - {file = "numpy-1.21.6-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd1c8f6bd65d07d3810b90d02eba7997e32abbdf1277a481d698969e921a3be0"}, - {file = "numpy-1.21.6.zip", hash = "sha256:ecb55251139706669fdec2ff073c98ef8e9a84473e51e716211b41aa0f18e656"}, -] -objprint = [ - {file = "objprint-0.2.0-py3-none-any.whl", hash = "sha256:bce2e9787b9f88e9e4b9b2a2a633065f55be890b5e8ca28211d06277ddc521ee"}, - {file = "objprint-0.2.0.tar.gz", hash = "sha256:5a40b03a71ea490f6279e3c14838f9dc5ce1a74e2769369755a04dec1f2c0f08"}, -] -packaging = [ - {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, - {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, -] -pathspec = [ - {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, - {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, -] -pendulum = [ - {file = "pendulum-2.1.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:b6c352f4bd32dff1ea7066bd31ad0f71f8d8100b9ff709fb343f3b86cee43efe"}, - {file = "pendulum-2.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:318f72f62e8e23cd6660dbafe1e346950281a9aed144b5c596b2ddabc1d19739"}, - {file = "pendulum-2.1.2-cp35-cp35m-macosx_10_15_x86_64.whl", hash = "sha256:0731f0c661a3cb779d398803655494893c9f581f6488048b3fb629c2342b5394"}, - {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3481fad1dc3f6f6738bd575a951d3c15d4b4ce7c82dce37cf8ac1483fde6e8b0"}, - {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9702069c694306297ed362ce7e3c1ef8404ac8ede39f9b28b7c1a7ad8c3959e3"}, - {file = "pendulum-2.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fb53ffa0085002ddd43b6ca61a7b34f2d4d7c3ed66f931fe599e1a531b42af9b"}, - {file = "pendulum-2.1.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:c501749fdd3d6f9e726086bf0cd4437281ed47e7bca132ddb522f86a1645d360"}, - {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c807a578a532eeb226150d5006f156632df2cc8c5693d778324b43ff8c515dd0"}, - {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2d1619a721df661e506eff8db8614016f0720ac171fe80dda1333ee44e684087"}, - {file = "pendulum-2.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f888f2d2909a414680a29ae74d0592758f2b9fcdee3549887779cd4055e975db"}, - {file = "pendulum-2.1.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e95d329384717c7bf627bf27e204bc3b15c8238fa8d9d9781d93712776c14002"}, - {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4c9c689747f39d0d02a9f94fcee737b34a5773803a64a5fdb046ee9cac7442c5"}, - {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1245cd0075a3c6d889f581f6325dd8404aca5884dea7223a5566c38aab94642b"}, - {file = "pendulum-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:db0a40d8bcd27b4fb46676e8eb3c732c67a5a5e6bfab8927028224fbced0b40b"}, - {file = "pendulum-2.1.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f5e236e7730cab1644e1b87aca3d2ff3e375a608542e90fe25685dae46310116"}, - {file = "pendulum-2.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:de42ea3e2943171a9e95141f2eecf972480636e8e484ccffaf1e833929e9e052"}, - {file = "pendulum-2.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7c5ec650cb4bec4c63a89a0242cc8c3cebcec92fcfe937c417ba18277d8560be"}, - {file = "pendulum-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:33fb61601083f3eb1d15edeb45274f73c63b3c44a8524703dc143f4212bf3269"}, - {file = "pendulum-2.1.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:29c40a6f2942376185728c9a0347d7c0f07905638c83007e1d262781f1e6953a"}, - {file = "pendulum-2.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:94b1fc947bfe38579b28e1cccb36f7e28a15e841f30384b5ad6c5e31055c85d7"}, - {file = "pendulum-2.1.2.tar.gz", hash = "sha256:b06a0ca1bfe41c990bbf0c029f0b6501a7f2ec4e38bfec730712015e8860f207"}, -] -pipelinewise-singer-python = [ - {file = "pipelinewise-singer-python-1.2.0.tar.gz", hash = "sha256:8ba501f9092dbd686cd5792ecf6aa97c2d25c225e9d8b2875dcead0f5738898c"}, - {file = "pipelinewise_singer_python-1.2.0-py3-none-any.whl", hash = "sha256:156f011cba10b1591ae37c5510ed9d21639258c1377cc00c07d9f7e9a3ae27fb"}, -] -platformdirs = [ - {file = "platformdirs-2.4.0-py3-none-any.whl", hash = "sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d"}, - {file = "platformdirs-2.4.0.tar.gz", hash = "sha256:367a5e80b3d04d2428ffa76d33f124cf11e8fff2acdaa9b43d545f5c7d661ef2"}, -] -pluggy = [ - {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, - {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, -] -ply = [ - {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, - {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, -] -py = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] -pyarrow = [ - {file = "pyarrow-9.0.0-cp310-cp310-macosx_10_13_universal2.whl", hash = "sha256:767cafb14278165ad539a2918c14c1b73cf20689747c21375c38e3fe62884902"}, - {file = "pyarrow-9.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0238998dc692efcb4e41ae74738d7c1234723271ccf520bd8312dca07d49ef8d"}, - {file = "pyarrow-9.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:55328348b9139c2b47450d512d716c2248fd58e2f04e2fc23a65e18726666d42"}, - {file = "pyarrow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc856628acd8d281652c15b6268ec7f27ebcb015abbe99d9baad17f02adc51f1"}, - {file = "pyarrow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29eb3e086e2b26202f3a4678316b93cfb15d0e2ba20f3ec12db8fd9cc07cde63"}, - {file = "pyarrow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e753f8fcf07d8e3a0efa0c8bd51fef5c90281ffd4c5637c08ce42cd0ac297de"}, - {file = "pyarrow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:3eef8a981f45d89de403e81fb83b8119c20824caddf1404274e41a5d66c73806"}, - {file = "pyarrow-9.0.0-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:7fa56cbd415cef912677270b8e41baad70cde04c6d8a8336eeb2aba85aa93706"}, - {file = "pyarrow-9.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f8c46bde1030d704e2796182286d1c56846552c50a39ad5bf5a20c0d8159fc35"}, - {file = "pyarrow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ad430cee28ebc4d6661fc7315747c7a18ae2a74e67498dcb039e1c762a2fb67"}, - {file = "pyarrow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a60bb291a964f63b2717fb1b28f6615ffab7e8585322bfb8a6738e6b321282"}, - {file = "pyarrow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9cef618159567d5f62040f2b79b1c7b38e3885f4ffad0ec97cd2d86f88b67cef"}, - {file = "pyarrow-9.0.0-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:5526a3bfb404ff6d31d62ea582cf2466c7378a474a99ee04d1a9b05de5264541"}, - {file = "pyarrow-9.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:da3e0f319509a5881867effd7024099fb06950a0768dad0d6873668bb88cfaba"}, - {file = "pyarrow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c715eca2092273dcccf6f08437371e04d112f9354245ba2fbe6c801879450b7"}, - {file = "pyarrow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f11a645a41ee531c3a5edda45dea07c42267f52571f818d388971d33fc7e2d4a"}, - {file = "pyarrow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5b390bdcfb8c5b900ef543f911cdfec63e88524fafbcc15f83767202a4a2491"}, - {file = "pyarrow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:d9eb04db626fa24fdfb83c00f76679ca0d98728cdbaa0481b6402bf793a290c0"}, - {file = "pyarrow-9.0.0-cp39-cp39-macosx_10_13_universal2.whl", hash = "sha256:4eebdab05afa23d5d5274b24c1cbeb1ba017d67c280f7d39fd8a8f18cbad2ec9"}, - {file = "pyarrow-9.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:02b820ecd1da02012092c180447de449fc688d0c3f9ff8526ca301cdd60dacd0"}, - {file = "pyarrow-9.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:92f3977e901db1ef5cba30d6cc1d7942b8d94b910c60f89013e8f7bb86a86eef"}, - {file = "pyarrow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f241bd488c2705df930eedfe304ada71191dcf67d6b98ceda0cc934fd2a8388e"}, - {file = "pyarrow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c5a073a930c632058461547e0bc572da1e724b17b6b9eb31a97da13f50cb6e0"}, - {file = "pyarrow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59bcd5217a3ae1e17870792f82b2ff92df9f3862996e2c78e156c13e56ff62e"}, - {file = "pyarrow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fe2ce795fa1d95e4e940fe5661c3c58aee7181c730f65ac5dd8794a77228de59"}, - {file = "pyarrow-9.0.0.tar.gz", hash = "sha256:7fb02bebc13ab55573d1ae9bb5002a6d20ba767bf8569b52fce5301d42495ab7"}, -] -pycodestyle = [ - {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"}, - {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"}, -] -pycparser = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] -pydocstyle = [ - {file = "pydocstyle-6.1.1-py3-none-any.whl", hash = "sha256:6987826d6775056839940041beef5c08cc7e3d71d63149b48e36727f70144dc4"}, - {file = "pydocstyle-6.1.1.tar.gz", hash = "sha256:1d41b7c459ba0ee6c345f2eb9ae827cab14a7533a88c5c6f7e94923f72df92dc"}, -] -pyflakes = [ - {file = "pyflakes-2.3.1-py2.py3-none-any.whl", hash = "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3"}, - {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, -] -pygments = [ - {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"}, - {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"}, -] -pyjwt = [ - {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, - {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, -] -pyparsing = [ - {file = "pyparsing-3.0.6-py3-none-any.whl", hash = "sha256:04ff808a5b90911829c55c4e26f75fa5ca8a2f5f36aa3a51f68e27033341d3e4"}, - {file = "pyparsing-3.0.6.tar.gz", hash = "sha256:d9bdec0013ef1eb5a84ab39a3b3868911598afa494f5faa038647101504e2b81"}, -] -pyrsistent = [ - {file = "pyrsistent-0.18.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f4c8cabb46ff8e5d61f56a037974228e978f26bfefce4f61a4b1ac0ba7a2ab72"}, - {file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:da6e5e818d18459fa46fac0a4a4e543507fe1110e808101277c5a2b5bab0cd2d"}, - {file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5e4395bbf841693eaebaa5bb5c8f5cdbb1d139e07c975c682ec4e4f8126e03d2"}, - {file = "pyrsistent-0.18.0-cp36-cp36m-win32.whl", hash = "sha256:527be2bfa8dc80f6f8ddd65242ba476a6c4fb4e3aedbf281dfbac1b1ed4165b1"}, - {file = "pyrsistent-0.18.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2aaf19dc8ce517a8653746d98e962ef480ff34b6bc563fc067be6401ffb457c7"}, - {file = "pyrsistent-0.18.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58a70d93fb79dc585b21f9d72487b929a6fe58da0754fa4cb9f279bb92369396"}, - {file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4916c10896721e472ee12c95cdc2891ce5890898d2f9907b1b4ae0f53588b710"}, - {file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:73ff61b1411e3fb0ba144b8f08d6749749775fe89688093e1efef9839d2dcc35"}, - {file = "pyrsistent-0.18.0-cp37-cp37m-win32.whl", hash = "sha256:b29b869cf58412ca5738d23691e96d8aff535e17390128a1a52717c9a109da4f"}, - {file = "pyrsistent-0.18.0-cp37-cp37m-win_amd64.whl", hash = "sha256:097b96f129dd36a8c9e33594e7ebb151b1515eb52cceb08474c10a5479e799f2"}, - {file = "pyrsistent-0.18.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:772e94c2c6864f2cd2ffbe58bb3bdefbe2a32afa0acb1a77e472aac831f83427"}, - {file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c1a9ff320fa699337e05edcaae79ef8c2880b52720bc031b219e5b5008ebbdef"}, - {file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd3caef37a415fd0dae6148a1b6957a8c5f275a62cca02e18474608cb263640c"}, - {file = "pyrsistent-0.18.0-cp38-cp38-win32.whl", hash = "sha256:e79d94ca58fcafef6395f6352383fa1a76922268fa02caa2272fff501c2fdc78"}, - {file = "pyrsistent-0.18.0-cp38-cp38-win_amd64.whl", hash = "sha256:a0c772d791c38bbc77be659af29bb14c38ced151433592e326361610250c605b"}, - {file = "pyrsistent-0.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d5ec194c9c573aafaceebf05fc400656722793dac57f254cd4741f3c27ae57b4"}, - {file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:6b5eed00e597b5b5773b4ca30bd48a5774ef1e96f2a45d105db5b4ebb4bca680"}, - {file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:48578680353f41dca1ca3dc48629fb77dfc745128b56fc01096b2530c13fd426"}, - {file = "pyrsistent-0.18.0-cp39-cp39-win32.whl", hash = "sha256:f3ef98d7b76da5eb19c37fda834d50262ff9167c65658d1d8f974d2e4d90676b"}, - {file = "pyrsistent-0.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:404e1f1d254d314d55adb8d87f4f465c8693d6f902f67eb6ef5b4526dc58e6ea"}, - {file = "pyrsistent-0.18.0.tar.gz", hash = "sha256:773c781216f8c2900b42a7b638d5b517bb134ae1acbebe4d1e8f1f41ea60eb4b"}, -] -pytest = [ - {file = "pytest-7.1.2-py3-none-any.whl", hash = "sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c"}, - {file = "pytest-7.1.2.tar.gz", hash = "sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45"}, -] -python-dateutil = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] -python-dotenv = [ - {file = "python-dotenv-0.20.0.tar.gz", hash = "sha256:b7e3b04a59693c42c36f9ab1cc2acc46fa5df8c78e178fc33a8d4cd05c8d498f"}, - {file = "python_dotenv-0.20.0-py3-none-any.whl", hash = "sha256:d92a187be61fe482e4fd675b6d52200e7be63a12b724abbf931a40ce4fa92938"}, -] -python-slugify = [ - {file = "python-slugify-5.0.2.tar.gz", hash = "sha256:f13383a0b9fcbe649a1892b9c8eb4f8eab1d6d84b84bb7a624317afa98159cab"}, - {file = "python_slugify-5.0.2-py2.py3-none-any.whl", hash = "sha256:6d8c5df75cd4a7c3a2d21e257633de53f52ab0265cd2d1dc62a730e8194a7380"}, -] -pytz = [ - {file = "pytz-2020.5-py2.py3-none-any.whl", hash = "sha256:16962c5fb8db4a8f63a26646d8886e9d769b6c511543557bc84e9569fb9a9cb4"}, - {file = "pytz-2020.5.tar.gz", hash = "sha256:180befebb1927b16f6b57101720075a984c019ac16b1b7575673bea42c6c3da5"}, -] -pytzdata = [ - {file = "pytzdata-2020.1-py2.py3-none-any.whl", hash = "sha256:e1e14750bcf95016381e4d472bad004eef710f2d6417240904070b3d6654485f"}, - {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"}, -] -pyyaml = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, -] -requests = [ - {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"}, - {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"}, -] -requests-mock = [ - {file = "requests-mock-1.9.3.tar.gz", hash = "sha256:8d72abe54546c1fc9696fa1516672f1031d72a55a1d66c85184f972a24ba0eba"}, - {file = "requests_mock-1.9.3-py2.py3-none-any.whl", hash = "sha256:0a2d38a117c08bb78939ec163522976ad59a6b7fdd82b709e23bb98004a44970"}, -] -simplejson = [ - {file = "simplejson-3.11.1-cp27-cp27m-win32.whl", hash = "sha256:38c2b563cd03363e7cb2bbba6c20ae4eaafd853a83954c8c8dd345ee391787bf"}, - {file = "simplejson-3.11.1-cp27-cp27m-win_amd64.whl", hash = "sha256:8d73b96a6ee7c81fd49dac7225e3846fd60b54a0b5b93a0aaea04c5a5d2e7bf2"}, - {file = "simplejson-3.11.1-cp33-cp33m-win32.whl", hash = "sha256:7f53ab6a675594f237ce7372c1edf742a6acb158149ed3259c5fffc5b613dc94"}, - {file = "simplejson-3.11.1-cp33-cp33m-win_amd64.whl", hash = "sha256:86aa9fd492230c4b8b6814fcf089b36ffba2cec4d0635c8c642135b9067ebbd7"}, - {file = "simplejson-3.11.1-cp34-cp34m-win32.whl", hash = "sha256:7df76ae6cac4a62ad5295f9a9131857077d84cb15fad2011acb2ce7410476009"}, - {file = "simplejson-3.11.1-cp34-cp34m-win_amd64.whl", hash = "sha256:a6939199c30b78ae31e62e6913f0e12cb71a4a5ad67c259e0a98688df027a5de"}, - {file = "simplejson-3.11.1-cp35-cp35m-win32.whl", hash = "sha256:11d91b88cc1e9645c79f0f6fd2961684249af963e2bbff5a00061ed4bbf55379"}, - {file = "simplejson-3.11.1-cp35-cp35m-win_amd64.whl", hash = "sha256:36b0de42e3a8a51086c339cc803f6ac7a9d1d5254066d680956a195ca12cf0d8"}, - {file = "simplejson-3.11.1.tar.gz", hash = "sha256:01a22d49ddd9a168b136f26cac87d9a335660ce07aa5c630b8e3607d6f4325e7"}, - {file = "simplejson-3.11.1.win-amd64-py2.7.exe", hash = "sha256:1975e6b621fe1c2b9321c56476e8ebe1b851006517c1d67041b378950374694c"}, - {file = "simplejson-3.11.1.win-amd64-py3.3.exe", hash = "sha256:f60f01b16215568a08611eb6a4d61d76c4173c3d69aac9cad593777056c284d5"}, - {file = "simplejson-3.11.1.win-amd64-py3.4.exe", hash = "sha256:6be48181337ac5f5d9f48c9c504f317e245519318992122a05c40e482a721d59"}, - {file = "simplejson-3.11.1.win-amd64-py3.5.exe", hash = "sha256:8ae8cdcbe49e29ddfdae0ab81c1f6c070706d18fcee86371352d0d54b47ad8ec"}, - {file = "simplejson-3.11.1.win32-py2.7.exe", hash = "sha256:ebbd52b59948350ad66205e66b299fcca0e0821ed275c21262c522f4a6cea9d2"}, - {file = "simplejson-3.11.1.win32-py3.3.exe", hash = "sha256:2dc7fb8c0c0ff9483ce31b93b700b1fa60aca9d099e6aca9813f28ff131ccf59"}, - {file = "simplejson-3.11.1.win32-py3.4.exe", hash = "sha256:97cc43ef4cb18a2725f6e26d22b96f8ca50872a195bde32707dcb284f89c1d4d"}, - {file = "simplejson-3.11.1.win32-py3.5.exe", hash = "sha256:c76d55d78dc8b06c96fd08c6cc5e2b0b650799627d3f9ca4ad23f40db72d5f6d"}, -] -six = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] -snowballstemmer = [ - {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, - {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, -] -sphinx = [ - {file = "Sphinx-5.1.1-py3-none-any.whl", hash = "sha256:309a8da80cb6da9f4713438e5b55861877d5d7976b69d87e336733637ea12693"}, - {file = "Sphinx-5.1.1.tar.gz", hash = "sha256:ba3224a4e206e1fbdecf98a4fae4992ef9b24b85ebf7b584bb340156eaf08d89"}, -] -sphinx-autobuild = [ - {file = "sphinx-autobuild-2021.3.14.tar.gz", hash = "sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05"}, - {file = "sphinx_autobuild-2021.3.14-py3-none-any.whl", hash = "sha256:8fe8cbfdb75db04475232f05187c776f46f6e9e04cacf1e49ce81bdac649ccac"}, -] -sphinx-copybutton = [ - {file = "sphinx-copybutton-0.5.0.tar.gz", hash = "sha256:a0c059daadd03c27ba750da534a92a63e7a36a7736dcf684f26ee346199787f6"}, - {file = "sphinx_copybutton-0.5.0-py3-none-any.whl", hash = "sha256:9684dec7434bd73f0eea58dda93f9bb879d24bff2d8b187b1f2ec08dfe7b5f48"}, -] -sphinx-rtd-theme = [ - {file = "sphinx_rtd_theme-1.0.0-py2.py3-none-any.whl", hash = "sha256:4d35a56f4508cfee4c4fb604373ede6feae2a306731d533f409ef5c3496fdbd8"}, - {file = "sphinx_rtd_theme-1.0.0.tar.gz", hash = "sha256:eec6d497e4c2195fa0e8b2016b337532b8a699a68bcb22a512870e16925c6a5c"}, -] -sphinxcontrib-applehelp = [ - {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"}, - {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"}, -] -sphinxcontrib-devhelp = [ - {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, - {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, -] -sphinxcontrib-htmlhelp = [ - {file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"}, - {file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"}, -] -sphinxcontrib-jsmath = [ - {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, - {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, -] -sphinxcontrib-qthelp = [ - {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, - {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, -] -sphinxcontrib-serializinghtml = [ - {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, - {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, -] -sqlalchemy = [ - {file = "SQLAlchemy-1.4.40-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:b07fc38e6392a65935dc8b486229679142b2ea33c94059366b4d8b56f1e35a97"}, - {file = "SQLAlchemy-1.4.40-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fb4edb6c354eac0fcc07cb91797e142f702532dbb16c1d62839d6eec35f814cf"}, - {file = "SQLAlchemy-1.4.40-cp27-cp27m-win32.whl", hash = "sha256:2026632051a93997cf8f6fda14360f99230be1725b7ab2ef15be205a4b8a5430"}, - {file = "SQLAlchemy-1.4.40-cp27-cp27m-win_amd64.whl", hash = "sha256:f2aa85aebc0ef6b342d5d3542f969caa8c6a63c8d36cf5098769158a9fa2123c"}, - {file = "SQLAlchemy-1.4.40-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0b9e3d81f86ba04007f0349e373a5b8c81ec2047aadb8d669caf8c54a092461"}, - {file = "SQLAlchemy-1.4.40-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:1ab08141d93de83559f6a7d9a962830f918623a885b3759ec2b9d1a531ff28fe"}, - {file = "SQLAlchemy-1.4.40-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00dd998b43b282c71de46b061627b5edb9332510eb1edfc5017b9e4356ed44ea"}, - {file = "SQLAlchemy-1.4.40-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb342c0e25cc8f78a0e7c692da3b984f072666b316fbbec2a0e371cb4dfef5f0"}, - {file = "SQLAlchemy-1.4.40-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23b693876ac7963b6bc7b1a5f3a2642f38d2624af834faad5933913928089d1b"}, - {file = "SQLAlchemy-1.4.40-cp310-cp310-win32.whl", hash = "sha256:2cf50611ef4221ad587fb7a1708e61ff72966f84330c6317642e08d6db4138fd"}, - {file = "SQLAlchemy-1.4.40-cp310-cp310-win_amd64.whl", hash = "sha256:26ee4dbac5dd7abf18bf3cd8f04e51f72c339caf702f68172d308888cd26c6c9"}, - {file = "SQLAlchemy-1.4.40-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:b41b87b929118838bafc4bb18cf3c5cd1b3be4b61cd9042e75174df79e8ac7a2"}, - {file = "SQLAlchemy-1.4.40-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:885e11638946472b4a0a7db8e6df604b2cf64d23dc40eedc3806d869fcb18fae"}, - {file = "SQLAlchemy-1.4.40-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b7ff0a8bf0aec1908b92b8dfa1246128bf4f94adbdd3da6730e9c542e112542d"}, - {file = "SQLAlchemy-1.4.40-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfa8ab4ba0c97ab6bcae1f0948497d14c11b6c6ecd1b32b8a79546a0823d8211"}, - {file = "SQLAlchemy-1.4.40-cp36-cp36m-win32.whl", hash = "sha256:d259fa08e4b3ed952c01711268bcf6cd2442b0c54866d64aece122f83da77c6d"}, - {file = "SQLAlchemy-1.4.40-cp36-cp36m-win_amd64.whl", hash = "sha256:c8d974c991eef0cd29418a5957ae544559dc326685a6f26b3a914c87759bf2f4"}, - {file = "SQLAlchemy-1.4.40-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:28b1791a30d62fc104070965f1a2866699c45bbf5adc0be0cf5f22935edcac58"}, - {file = "SQLAlchemy-1.4.40-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7ccdca6cd167611f4a62a8c2c0c4285c2535640d77108f782ce3f3cccb70f3a"}, - {file = "SQLAlchemy-1.4.40-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:69deec3a94de10062080d91e1ba69595efeafeafe68b996426dec9720031fb25"}, - {file = "SQLAlchemy-1.4.40-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ad778f4e80913fb171247e4fa82123d0068615ae1d51a9791fc4284cb81748"}, - {file = "SQLAlchemy-1.4.40-cp37-cp37m-win32.whl", hash = "sha256:9ced2450c9fd016f9232d976661623e54c450679eeefc7aa48a3d29924a63189"}, - {file = "SQLAlchemy-1.4.40-cp37-cp37m-win_amd64.whl", hash = "sha256:cdee4d475e35684d210dc6b430ff8ca2ed0636378ac19b457e2f6f350d1f5acc"}, - {file = "SQLAlchemy-1.4.40-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:08b47c971327e733ffd6bae2d4f50a7b761793efe69d41067fcba86282819eea"}, - {file = "SQLAlchemy-1.4.40-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cf03d37819dc17a388d313919daf32058d19ba1e592efdf14ce8cbd997e6023"}, - {file = "SQLAlchemy-1.4.40-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a62c0ecbb9976550f26f7bf75569f425e661e7249349487f1483115e5fc893a6"}, - {file = "SQLAlchemy-1.4.40-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ec440990ab00650d0c7ea2c75bc225087afdd7ddcb248e3d934def4dff62762"}, - {file = "SQLAlchemy-1.4.40-cp38-cp38-win32.whl", hash = "sha256:2b64955850a14b9d481c17becf0d3f62fb1bb31ac2c45c2caf5ad06d9e811187"}, - {file = "SQLAlchemy-1.4.40-cp38-cp38-win_amd64.whl", hash = "sha256:959bf4390766a8696aa01285016c766b4eb676f712878aac5fce956dd49695d9"}, - {file = "SQLAlchemy-1.4.40-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:0992f3cc640ec0f88f721e426da884c34ff0a60eb73d3d64172e23dfadfc8a0b"}, - {file = "SQLAlchemy-1.4.40-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa9e0d7832b7511b3b3fd0e67fac85ff11fd752834c143ca2364c9b778c0485a"}, - {file = "SQLAlchemy-1.4.40-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c9d0f1a9538cc5e75f2ea0cb6c3d70155a1b7f18092c052e0d84105622a41b63"}, - {file = "SQLAlchemy-1.4.40-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c956a5d1adb49a35d78ef0fae26717afc48a36262359bb5b0cbd7a3a247c26f"}, - {file = "SQLAlchemy-1.4.40-cp39-cp39-win32.whl", hash = "sha256:6b70d02bbe1adbbf715d2249cacf9ac17c6f8d22dfcb3f1a4fbc5bf64364da8a"}, - {file = "SQLAlchemy-1.4.40-cp39-cp39-win_amd64.whl", hash = "sha256:bf073c619b5a7f7cd731507d0fdc7329bee14b247a63b0419929e4acd24afea8"}, - {file = "SQLAlchemy-1.4.40.tar.gz", hash = "sha256:44a660506080cc975e1dfa5776fe5f6315ddc626a77b50bf0eee18b0389ea265"}, -] -sqlalchemy2-stubs = [ - {file = "sqlalchemy2-stubs-0.0.2a25.tar.gz", hash = "sha256:2fbfddfee7fc6b45206dc52e9fe9d91a787efb6af13191debe84dda9b4798abd"}, - {file = "sqlalchemy2_stubs-0.0.2a25-py3-none-any.whl", hash = "sha256:9104894cee3159906079c4a31c2a66fbedfc72a381c51dfd1d8ebae8ffd7f2ca"}, -] -text-unidecode = [ - {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, - {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, -] -tomli = [ - {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, - {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, -] -tornado = [ - {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"}, - {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"}, - {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"}, - {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"}, - {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"}, - {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"}, - {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"}, - {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"}, - {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"}, - {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"}, - {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"}, -] -typed-ast = [ - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win32.whl", hash = "sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win_amd64.whl", hash = "sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3"}, - {file = "typed_ast-1.4.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win32.whl", hash = "sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7"}, - {file = "typed_ast-1.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win32.whl", hash = "sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805"}, - {file = "typed_ast-1.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39"}, - {file = "typed_ast-1.4.3-cp38-cp38-win32.whl", hash = "sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927"}, - {file = "typed_ast-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40"}, - {file = "typed_ast-1.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3"}, - {file = "typed_ast-1.4.3-cp39-cp39-win32.whl", hash = "sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808"}, - {file = "typed_ast-1.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c"}, - {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, -] -types-python-dateutil = [ - {file = "types-python-dateutil-2.8.19.tar.gz", hash = "sha256:bfd3eb39c7253aea4ba23b10f69b017d30b013662bb4be4ab48b20bbd763f309"}, - {file = "types_python_dateutil-2.8.19-py3-none-any.whl", hash = "sha256:6284df1e4783d8fc6e587f0317a81333856b872a6669a282f8a325342bce7fa8"}, -] -types-requests = [ - {file = "types-requests-2.28.9.tar.gz", hash = "sha256:feaf581bd580497a47fe845d506fa3b91b484cf706ff27774e87659837de9962"}, - {file = "types_requests-2.28.9-py3-none-any.whl", hash = "sha256:86cb66d3de2f53eac5c09adc42cf6547eefbd0c7e1210beca1ee751c35d96083"}, -] -types-urllib3 = [ - {file = "types-urllib3-1.26.7.tar.gz", hash = "sha256:cfd1fbbe4ba9a605ed148294008aac8a7b8b7472651d1cc357d507ae5962e3d2"}, - {file = "types_urllib3-1.26.7-py3-none-any.whl", hash = "sha256:3adcf2cb5981809091dbff456e6999fe55f201652d8c360f99997de5ac2f556e"}, -] -typing-extensions = [ - {file = "typing_extensions-4.3.0-py3-none-any.whl", hash = "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02"}, - {file = "typing_extensions-4.3.0.tar.gz", hash = "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"}, -] -urllib3 = [ - {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, - {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, -] -viztracer = [ - {file = "viztracer-0.15.4-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:94b916d893b15c065f504292b1e0016af32b4c9588f29522e15a6fdabd12dbbf"}, - {file = "viztracer-0.15.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e2cff8443f05b983265f7391118070fee27b586295b6df83575d7c811b51057"}, - {file = "viztracer-0.15.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.manylinux_2_24_i686.whl", hash = "sha256:7fab9a97256c0fda692ceef4ccfc678c2acbd07cae2a946e0b760ae5b52fd501"}, - {file = "viztracer-0.15.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b25ee86e22649e38e37ba0857387986ebfed0f613a607a5cd1f54f7fd317132"}, - {file = "viztracer-0.15.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:880375b2192227daef5a71d60eb4d8158d27d9e7548fbd76baf9870e65082749"}, - {file = "viztracer-0.15.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0bd97d1621093297d8c94c227dd1fc3d5b2de748ea14751603552a17ca6ca025"}, - {file = "viztracer-0.15.4-cp310-cp310-win32.whl", hash = "sha256:d1ba7762b974fb82359d618fbcc425da0db896952657129e374c61fd1e86310e"}, - {file = "viztracer-0.15.4-cp310-cp310-win_amd64.whl", hash = "sha256:ef4ef2fa4ad5a434633c4a0cae45dfbc6c187bef0d31b482385a101bedc80054"}, - {file = "viztracer-0.15.4-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:3c5bd1c944ddfc6ce059f294dd25f7d0a850967f5b118566a7147ac24f7a3c4c"}, - {file = "viztracer-0.15.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cc8c06ea7e8cfb0dc390c03b9d95c772e85eb73d7e14a3a7e4e95d1961fdced"}, - {file = "viztracer-0.15.4-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.manylinux_2_24_i686.whl", hash = "sha256:dea4664ff25bb09454d3723f17e57eaa1e384a1917648b73db55e801f8ca59db"}, - {file = "viztracer-0.15.4-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4fbaf8aa710aa0655db35e14ec97a90390fb6559c68d3d72408ecaa8b9f1eef"}, - {file = "viztracer-0.15.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44fad4f8029585bad6d4ef6c90e3aec91c678ec34865d32136501d71ea0351cc"}, - {file = "viztracer-0.15.4-cp36-cp36m-win32.whl", hash = "sha256:5ab1b10ebc30faf6ecd4bccc4cb020919a3b0aedd8e371e8cb99d438f21a822e"}, - {file = "viztracer-0.15.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4bc92e1b2f289e472999d68366314c5c87ed38dbe5f947d2f2ddea81df0bb32f"}, - {file = "viztracer-0.15.4-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:a0c467fc21e69cc1ec30b3c830d2c6a35586db39e95b46f2f51b1c215113e742"}, - {file = "viztracer-0.15.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5db8a5607d91f6f7ca54440d2e6faeac3d15da4c28ff4a547e11fccb6deb695a"}, - {file = "viztracer-0.15.4-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.manylinux_2_24_i686.whl", hash = "sha256:2d78c189464fb9338fdef67223c0217f6370d3bcfcfb66f2f3dc947aa5615c88"}, - {file = "viztracer-0.15.4-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3889a18b4ad4eafceed6e78269a41764e3755d1a338391baebb93ab3450e62e"}, - {file = "viztracer-0.15.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40cf6e6315e7894f2ec17113f626590d0bf4685f46a5e86fd85eba38fe6e9d29"}, - {file = "viztracer-0.15.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2182f2aa599e57914ce496da5e609b5f411aba5e3cdbc992993c3af5cafad5d8"}, - {file = "viztracer-0.15.4-cp37-cp37m-win32.whl", hash = "sha256:09063442836a1b171558637b50c76fa45e31c102724b5de448986c3899cf3225"}, - {file = "viztracer-0.15.4-cp37-cp37m-win_amd64.whl", hash = "sha256:32eb9160d7fdac84934e581c54cfb25a2f0a3f5a58bc76f1e8b05d3dc3e1ec26"}, - {file = "viztracer-0.15.4-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:731b3245a8a8f6fe0cf2da7ab78648f0fe9c8c46058dabc8c8f0d59ef5a21112"}, - {file = "viztracer-0.15.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d43aca3e67cb54861731efe35a50e1a75999e6b173d7464e0ff7456be6ba59a"}, - {file = "viztracer-0.15.4-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.manylinux_2_24_i686.whl", hash = "sha256:00c2338f04d1bf6b2c2b003f10a154eafb902584a0204572f47d9efc1d0202f1"}, - {file = "viztracer-0.15.4-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524c7957213352232ee1aa7081d2b390c1f3c2d38cd5b70fa70fadf76a57cf0f"}, - {file = "viztracer-0.15.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2667097770cde8ee1af0bf77d470cf3e6b27a01c84565ec1f516a8a309910e5c"}, - {file = "viztracer-0.15.4-cp38-cp38-win32.whl", hash = "sha256:757e473622a2b95f72763b8267455e9554d2290d8fac8d6311239ce639aa9034"}, - {file = "viztracer-0.15.4-cp38-cp38-win_amd64.whl", hash = "sha256:7a0e53ae02dc1a0e10a24849de498353e6221f326973ff33ff4734f5999b173c"}, - {file = "viztracer-0.15.4-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d6ab0d24e93ddc3f623219ba13d9ddff3bac2f81890e6de13055c41965112083"}, - {file = "viztracer-0.15.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a93afcb7d2fc3adac3706e6cc409f5df3992381b98b899947be6032e9b825d38"}, - {file = "viztracer-0.15.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.manylinux_2_24_i686.whl", hash = "sha256:35d948a34c6d14cac388f59a66f7f57c9c78973d035ff350ce6c0382430dfdf7"}, - {file = "viztracer-0.15.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9110feb65a47e95dfdfa66a0f3cd87f06c3d44d18f1291b920dd0eda234ec176"}, - {file = "viztracer-0.15.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be7dee1190a5ef7642d162e1772d72ba395a820d828ac006a863f45cfdd7db32"}, - {file = "viztracer-0.15.4-cp39-cp39-win32.whl", hash = "sha256:a97887c2c34985990e67e287da42b176eaa8c411482b09a4ba252f2cbd9b11f1"}, - {file = "viztracer-0.15.4-cp39-cp39-win_amd64.whl", hash = "sha256:6a5ba827ad201fecee5f1647cc32c2f59476ec2881395d82e4bea56fa7599dc8"}, - {file = "viztracer-0.15.4.tar.gz", hash = "sha256:df18692a0f765e29e70ad1c5c74429be54968f16fca8a6db8dd0dc52cd87fb8b"}, -] -xdoctest = [ - {file = "xdoctest-1.0.1-py3-none-any.whl", hash = "sha256:334e422dfd7a889acf17b56db4f42a45f530f7324e3418a5ce9b0df4603babba"}, - {file = "xdoctest-1.0.1.tar.gz", hash = "sha256:3b90a4b7894dda58c9c8dc673fb61fdceb90fdc838baf1cac475618c8e35975c"}, -] -zipp = [ - {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, - {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, -] +lock-version = "2.0" +python-versions = "<3.12,>=3.7.1" +content-hash = "ce7485cc6dffcd5b2fbb8f8f4c3cf0bd7e21d91aad64f6c019683c127f3be952" diff --git a/pyproject.toml b/pyproject.toml index b4852b6ba..ea5884d67 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,16 +1,18 @@ [tool.poetry] name = "singer-sdk" -version = "0.8.0" +version = "0.31.1" description = "A framework for building Singer taps" -authors = ["Meltano Team and Contributors"] -maintainers = ["Meltano Team and Contributors"] +authors = ["Meltano Team and Contributors <hello@meltano.com>"] +maintainers = ["Meltano Team and Contributors <hello@meltano.com>"] readme = "README.md" homepage = "https://sdk.meltano.com/en/latest/" repository = "https://github.com/meltano/sdk" documentation = "https://sdk.meltano.com/en/latest/" keywords = [ "Meltano", + "Singer", "Meltano SDK", + "Singer SDK", "ELT", ] classifiers = [ @@ -21,11 +23,12 @@ classifiers = [ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Software Development :: Libraries :: Application Frameworks", "Typing :: Typed", ] -license = "Apache 2.0" +license = "Apache-2.0" [tool.poetry.urls] "Issue Tracker" = "https://github.com/meltano/sdk/issues" @@ -35,141 +38,120 @@ license = "Apache 2.0" "Youtube" = "https://www.youtube.com/meltano" [tool.poetry.dependencies] -python = "<3.11,>=3.7.1" -pipelinewise-singer-python = "1.2.0" -backoff = ">=1.8.0,<2.0" +python = "<3.12,>=3.7.1" +backoff = ">=2.0.0,<3.0" pendulum = "^2.1.0" click = "~=8.0" +fs = "^2.4.16" PyJWT = "~=2.4" requests = "^2.25.1" -cryptography = ">=3.4.6,<38.0.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +cryptography = ">=3.4.6,<42.0.0" +importlib-metadata = {version = "<5.0.0", markers = "python_version < \"3.8\""} +importlib-resources = {version = "5.12.0", markers = "python_version < \"3.9\""} memoization = ">=0.3.2,<0.5.0" jsonpath-ng = "^1.5.3" joblib = "^1.0.1" inflection = "^0.5.1" -sqlalchemy = "^1.4" -python-dotenv = "^0.20.0" +sqlalchemy = ">=1.4,<3.0" +python-dotenv = ">=0.20,<0.22" typing-extensions = "^4.2.0" +simplejson = "^3.17.6" +jsonschema = "^4.16.0" +packaging = ">=23.1" +pytz = ">=2022.2.1,<2024.0.0" +PyYAML = "^6.0" +# urllib3 2.0 is not compatible with botocore +urllib3 = ">=1.26,<2" # Sphinx dependencies installed as optional 'docs' extras # https://github.com/readthedocs/readthedocs.org/issues/4912#issuecomment-664002569 sphinx = {version = ">=4.5,<6.0", optional = true} -sphinx-rtd-theme = {version = ">=0.5.2,<1.1.0", optional = true} +furo = {version = ">=2022.12.7,<2024.0.0", optional = true} sphinx-copybutton = {version = ">=0.3.1,<0.6.0", optional = true} -myst-parser = {version = ">=0.17.2,<0.19.0", optional = true} +myst-parser = {version = ">=0.17.2,<1.1.0", optional = true} sphinx-autobuild = {version = "^2021.3.14", optional = true} +sphinx-reredirects = {version = "^0.1.1", optional = true} +sphinx-inline-tabs = {version = ">=2023.4.21", optional = true, markers = "python_version >= \"3.8\""} + +# File storage dependencies installed as optional 'filesystem' extras +fs-s3fs = {version = "^1.1.1", optional = true} + +# Testing dependencies installed as optional 'testing' extras +pytest = {version="^7.2.1", optional = true} +pytest-durations = {version = "^1.2.0", optional = true} [tool.poetry.extras] docs = [ "sphinx", - "sphinx-rtd-theme", + "furo", "sphinx-copybutton", "myst-parser", "sphinx-autobuild", + "sphinx-reredirects", + "sphinx-inline-tabs", +] +s3 = ["fs-s3fs"] +testing = [ + "pytest", + "pytest-durations" ] -[tool.poetry.dev-dependencies] +[tool.poetry.group.dev.dependencies] # snowflake-connector-python = "2.0.4" # Removed: Too many version conflicts! -pytest = "^7.1.2" -xdoctest = "^1.0.1" -mypy = "^0.971" -cookiecutter = "^2.1.1" +commitizen-version-bump = { git = "https://github.com/meltano/commitizen-version-bump.git", branch = "main" } +xdoctest = "^1.1.1" +mypy = "^1.0" +cookiecutter = ">=2.1.1,<2.3.1" PyYAML = "^6.0" -pyarrow = "^9.0.0" freezegun = "^1.2.2" -viztracer = "^0.15.4" -requests-mock = "^1.9.3" -sqlalchemy2-stubs = {version = "^0.0.2a25", allow-prereleases = true} +numpy = [ + { version = "<1.22", python = "<3.8" }, + { version = ">=1.22", python = ">=3.8" }, +] +requests-mock = "^1.10.0" +types-jsonschema = "^4.17.0.6" types-python-dateutil = "^2.8.19" -types-requests = "^2.28.9" -coverage = {extras = ["toml"], version = "^6.4"} +types-pytz = ">=2022.7.1.2,<2024.0.0.0" +types-requests = "^2.28.11" +types-simplejson = "^3.18.0" +types-PyYAML = "^6.0.12" +coverage = {extras = ["toml"], version = "^7.2"} +pyarrow = ">=11,<13" +pytest-snapshot = "^0.9.0" # Cookiecutter tests -black = "^22.6" +black = "^23.1" darglint = "^1.8.0" flake8 = "^3.9.0" flake8-annotations = "^2.9.1" -flake8-docstrings = "^1.6.0" +flake8-docstrings = "^1.7.0" [tool.black] exclude = ".*simpleeval.*" -[tool.isort] -profile = "black" -multi_line_output = 3 # Vertical Hanging Indent -src_paths = "singer_sdk" -known_first_party = ["tests", "samples"] - [tool.pytest.ini_options] addopts = '-vvv --ignore=singer_sdk/helpers/_simpleeval.py -m "not external"' markers = [ "external: Tests relying on external resources", + "windows: Tests that only run on Windows", + "snapshot: Tests that use pytest-snapshot", ] +testpaths = ["tests"] +norecursedirs = "cookiecutter" [tool.commitizen] -name = "cz_customize" -version = "0.8.0" +name = "cz_version_bump" +version = "0.31.1" tag_format = "v$major.$minor.$patch$prerelease" version_files = [ - "docs/conf.py", + "docs/conf.py:^release =", "pyproject.toml:^version =", "cookiecutter/tap-template/{{cookiecutter.tap_id}}/pyproject.toml:singer-sdk", "cookiecutter/target-template/{{cookiecutter.target_id}}/pyproject.toml:singer-sdk", + "cookiecutter/mapper-template/{{cookiecutter.mapper_id}}/pyproject.toml:singer-sdk", ".github/ISSUE_TEMPLATE/bug.yml:^ placeholder:", ] -[tool.commitizen.customize] -message_template = "{{change_type}}: {{message}}" -commit_parser = '^(?P<change_type>feat|fix|refactor|perf|break|docs)(?:\((?P<scope>[^()\r\n]*)\)|\()?(?P<breaking>!)?:\s(?P<message>.*)?' -schema_pattern = '(feat|fix|refactor|perf|break|docs|ci|chore|style|revert|test|build)(?:\((?P<scope>[^()\r\n]*)\)|\()?(?P<breaking>!)?:(\s.*)' -schema = """ -<type>(<scope>): <subject> -<BLANK LINE> -<body> -<BLANK LINE> -(BREAKING CHANGE: )<footer>""" -change_type_order = [ - "BREAKING CHANGES", - "✨ New", - "🐛 Fixes", - "⚙️ Under the Hood", - "⚡ Performance Improvements", - "📚 Documentation Improvements", -] - -[tool.commitizen.customize.change_type_map] -break = "BREAKING CHANGES" -feat = "✨ New" -fix = "🐛 Fixes" -refactor = "⚙️ Under the Hood" -docs = "📚 Documentation Improvements" -perf = "⚡ Performance Improvements" - -[[tool.commitizen.customize.questions]] -type = "list" -name = "change_type" -choices = [ - { value = "feat", name = "feat: A new feature." }, - { value = "fix", name = "fix: A bug fix." }, - { value = "refactor", name = "refactor: A code change that neither fixes a bug nor adds a feature." }, - { value = "perf", name = "perf: A code change that improves performance." }, - { value = "docs", name = "docs: A documentation change." }, - { value = "break", name = "break: A breaking change." }, - { value = "chore", name = "chore: A change that doesn't affect the meaning of the codebase." }, - { value = "style", name = "style: A code style change." }, - { value = "revert", name = "revert: Revert to a commit." }, - { value = "test", name = "test: A test change." }, - { value = "build", name = "build: A build system change." }, - { value = "ci", name = "ci: A change to CI/CD." }, -] -message = "Select the type of change you are committing" - -[[tool.commitizen.customize.questions]] -type = "input" -name = "message" -message = "Subject" - [tool.coverage.paths] source = [ "singer_sdk/", @@ -197,13 +179,142 @@ exclude_lines = [ "raise NotImplementedError", "if __name__ == .__main__.:", '''class .*\bProtocol\):''', + '''@(abc\.)?abstractmethod''', + '''if (t\.)?TYPE_CHECKING:''', ] fail_under = 82 +[tool.mypy] +exclude = "tests" +files = "singer_sdk" +python_version = "3.8" +warn_unused_configs = true +warn_unused_ignores = true +warn_return_any = true + +[[tool.mypy.overrides]] +ignore_missing_imports = true +module = [ + "bcrypt.*", + "joblib.*", + "pyarrow.*", + "pandas.*", + "jsonpath_ng.*", + "samples.*", + "sqlalchemy.*", +] + [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry.scripts] -# Sample CLI declaration: -# sdk-tap-countries-sample = 'singer_sdk.samples.sample_tap_countries.countries_tap:SampleTapCountries.cli' +pytest11 = { reference = "singer_sdk:testing.pytest_plugin", extras = ["testing"], type = "console" } + +[tool.ruff] +exclude = [ + "cookiecutter/*", + "singer_sdk/helpers/_simpleeval.py", + "tests/core/test_simpleeval.py", +] +ignore = [ + "ANN101", # Missing type annotation for `self` in method + "ANN102", # Missing type annotation for `cls` in class method + "N818", # Exception name should be named with an Error suffix +] +line-length = 88 +select = [ + "F", # Pyflakes + "E", # pycodestyle (error) + "W", # pycodestyle (warning) + "C90", # mccabe + "I", # isort + "N", # pep8-naming + "D", # pydocstyle/flake8-docstrings + "UP", # pyupgrade + "YTT", # flake8-2020 + "ANN", # flake8-annotations + "S", # flake8-bandit + "BLE", # flake8-blind-except + "FBT", # flake8-boolean-trap + "B", # flake8-bugbear + "A", # flake8-builtins + "COM", # flake8-commas + "C4", # flake8-comprehensions + "DTZ", # flake8-datetimezs + "T10", # flake8-debugger + "EM", # flake8-errmsg + "FA", # flake8-future-annotations + "ISC", # flake8-implicit-str-concat + "ICN", # flake8-import-conventions + "G", # flake8-logging-format + "INP", # flake8-no-pep420 + "PIE", # flake8-pie + "T20", # flake8-print + "PT", # flake8-pytest-style + "Q", # flake8-quotes + "RSE", # flake8-raise + "RET", # flake8-return + # "SLF", # flake8-self + "SIM", # flake8-simplify + "TID", # flake8-tidy-imports + "TCH", # flake8-type-checking + "ARG", # flake8-unused-arguments + "PTH", # flake8-use-pathlib + "ERA", # eradicate + "PGH", # pygrep-hooks + "PLC", # pylint (convention) + "PLE", # pylint (error) + "PLR", # pylint (refactor) + "PLW", # pylint (warning) + "PERF", # perflint + "RUF", # ruff +] +src = ["samples", "singer_sdk", "tests"] +target-version = "py37" + +[tool.ruff.per-file-ignores] +"docs/conf.py" = [ + "D", # pydocstyle/flake8-docstrings + "I002", # isort: missing-required-import + "INP001", # flake8-no-pep420: implicit-namespace-package +] +"noxfile.py" = ["ANN"] +"tests/*" = ["ANN", "D1", "D2", "FBT001", "FBT003", "PLR2004", "S101"] +# Disabled some checks in samples code +"samples/*" = ["ANN", "D"] +# Don't require docstrings conventions or type annotations in private modules +"singer_sdk/helpers/_*.py" = ["ANN", "D105"] +# Templates support a generic resource of type Any. +"singer_sdk/testing/*.py" = ["S101"] +"singer_sdk/testing/templates.py" = ["ANN401"] + +[tool.ruff.flake8-annotations] +allow-star-arg-any = true +mypy-init-return = true +suppress-dummy-args = true + +[tool.ruff.flake8-import-conventions] +banned-from = ["typing"] + +[tool.ruff.flake8-import-conventions.extend-aliases] +typing = "t" + +[tool.ruff.flake8-pytest-style] +fixture-parentheses = false +parametrize-names-type = "csv" + +[tool.ruff.isort] +known-first-party = ["singer_sdk", "samples", "tests"] +required-imports = ["from __future__ import annotations"] + +[tool.ruff.pep8-naming] +classmethod-decorators = [ + "singer_sdk.cli.plugin_cli", +] + +[tool.ruff.pydocstyle] +convention = "google" + +[tool.ruff.pylint] +max-args = 9 diff --git a/samples/aapl/__init__.py b/samples/aapl/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/aapl/__main__.py b/samples/aapl/__main__.py index ce072c106..08f54e55f 100644 --- a/samples/aapl/__main__.py +++ b/samples/aapl/__main__.py @@ -5,6 +5,8 @@ $ poetry run python samples/aapl """ +from __future__ import annotations + from aapl import Fundamentals Fundamentals.cli() diff --git a/samples/aapl/aapl.py b/samples/aapl/aapl.py index db1c5b4e8..ecbf032de 100644 --- a/samples/aapl/aapl.py +++ b/samples/aapl/aapl.py @@ -1,5 +1,7 @@ """A simple tap with one big record and schema.""" +from __future__ import annotations + import json from pathlib import Path @@ -16,7 +18,7 @@ class AAPL(Stream): def get_records(self, _): """Generate a single record.""" - with open(PROJECT_DIR / "AAPL.json") as f: + with PROJECT_DIR.joinpath("AAPL.json").open() as f: record = json.load(f) yield record diff --git a/samples/sample_mapper/__init__.py b/samples/sample_mapper/__init__.py index 9b7c0360b..868105448 100644 --- a/samples/sample_mapper/__init__.py +++ b/samples/sample_mapper/__init__.py @@ -1 +1,3 @@ """Stream maps transformer.""" + +from __future__ import annotations diff --git a/samples/sample_mapper/mapper.py b/samples/sample_mapper/mapper.py index e9fbf850e..e079bac08 100644 --- a/samples/sample_mapper/mapper.py +++ b/samples/sample_mapper/mapper.py @@ -1,15 +1,18 @@ """A sample inline mapper app.""" -from pathlib import PurePath -from typing import Generator, List, Optional, Union +from __future__ import annotations -import singer +import typing as t +import singer_sdk._singerlib as singer import singer_sdk.typing as th from singer_sdk.helpers._util import utc_now from singer_sdk.mapper import PluginMapper from singer_sdk.mapper_base import InlineMapper +if t.TYPE_CHECKING: + from pathlib import PurePath + class StreamTransform(InlineMapper): """A map transformer which implements the Stream Maps capability.""" @@ -33,17 +36,18 @@ class StreamTransform(InlineMapper): }, }, "additionalProperties": {"type": ["string", "null"]}, - } - ) + }, + ), ), required=True, description="Stream maps", - ) + ), ).to_dict() def __init__( self, - config: Optional[Union[dict, PurePath, str, List[Union[PurePath, str]]]] = None, + *, + config: dict | PurePath | str | list[PurePath | str] | None = None, parse_env_config: bool = False, validate_config: bool = True, ) -> None: @@ -68,7 +72,7 @@ def __init__( def map_schema_message( self, message_dict: dict, - ) -> Generator[singer.Message, None, None]: + ) -> t.Generator[singer.Message, None, None]: """Map a schema message according to config. Args: @@ -97,7 +101,7 @@ def map_schema_message( def map_record_message( self, message_dict: dict, - ) -> Generator[singer.Message, None, None]: + ) -> t.Generator[singer.Message, None, None]: """Map a record message according to config. Args: @@ -121,7 +125,7 @@ def map_record_message( self.logger.info(stream_map.stream_alias) yield record_message - def map_state_message(self, message_dict: dict) -> List[singer.Message]: + def map_state_message(self, message_dict: dict) -> list[singer.Message]: """Do nothing to the message. Args: @@ -135,7 +139,7 @@ def map_state_message(self, message_dict: dict) -> List[singer.Message]: def map_activate_version_message( self, message_dict: dict, - ) -> Generator[singer.Message, None, None]: + ) -> t.Generator[singer.Message, None, None]: """Duplicate the message or alias the stream name as defined in configuration. Args: diff --git a/samples/sample_tap_bigquery/__init__.py b/samples/sample_tap_bigquery/__init__.py index 13e4cdb49..bf0681faf 100644 --- a/samples/sample_tap_bigquery/__init__.py +++ b/samples/sample_tap_bigquery/__init__.py @@ -1,6 +1,6 @@ """A sample implementation for BigQuery.""" -from typing import List, Tuple, Type +from __future__ import annotations from singer_sdk import SQLConnector, SQLStream, SQLTap from singer_sdk import typing as th # JSON schema typing helpers @@ -9,13 +9,16 @@ class BigQueryConnector(SQLConnector): """Connects to the BigQuery SQL source.""" - def get_sqlalchemy_url(cls, config: dict) -> str: + def get_sqlalchemy_url(self, config: dict) -> str: """Concatenate a SQLAlchemy URL for use in connecting to the source.""" return f"bigquery://{config['project_id']}" def get_object_names( - self, engine, inspected, schema_name: str - ) -> List[Tuple[str, bool]]: + self, + engine, + inspected, + schema_name: str, + ) -> list[tuple[str, bool]]: """Return discoverable object names.""" # Bigquery inspections returns table names in the form # `schema_name.table_name` which later results in the project name @@ -26,7 +29,9 @@ def get_object_names( return [ (table_name.split(".")[-1], is_view) for (table_name, is_view) in super().get_object_names( - engine, inspected, schema_name + engine, + inspected, + schema_name, ) ] @@ -44,11 +49,14 @@ class TapBigQuery(SQLTap): config_jsonschema = th.PropertiesList( th.Property( - "project_id", th.StringType, required=True, description="GCP Project" + "project_id", + th.StringType, + required=True, + description="GCP Project", ), ).to_dict() - default_stream_class: Type[SQLStream] = BigQueryStream + default_stream_class: type[SQLStream] = BigQueryStream __all__ = ["TapBigQuery", "BigQueryConnector", "BigQueryStream"] diff --git a/samples/sample_tap_countries/__init__.py b/samples/sample_tap_countries/__init__.py index ca223d5a1..f2f413f51 100644 --- a/samples/sample_tap_countries/__init__.py +++ b/samples/sample_tap_countries/__init__.py @@ -1 +1,3 @@ """Countries API Sample.""" + +from __future__ import annotations diff --git a/samples/sample_tap_countries/__main__.py b/samples/sample_tap_countries/__main__.py new file mode 100644 index 000000000..3c749ecb3 --- /dev/null +++ b/samples/sample_tap_countries/__main__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from samples.sample_tap_countries.countries_tap import SampleTapCountries + +SampleTapCountries.cli() diff --git a/samples/sample_tap_countries/countries_streams.py b/samples/sample_tap_countries/countries_streams.py index 08186a7bd..708e1678a 100644 --- a/samples/sample_tap_countries/countries_streams.py +++ b/samples/sample_tap_countries/countries_streams.py @@ -6,6 +6,8 @@ - https://countries.trevorblades.com/ """ +from __future__ import annotations + import abc from pathlib import Path @@ -28,7 +30,7 @@ class CountriesStream(CountriesAPIStream): """Countries API stream.""" name = "countries" - primary_keys = ["code"] + primary_keys = ("code",) query = """ countries { code @@ -69,7 +71,7 @@ class CountriesStream(CountriesAPIStream): th.ObjectType( th.Property("code", th.StringType), th.Property("name", th.StringType), - ) + ), ), ), ).to_dict() @@ -79,7 +81,7 @@ class ContinentsStream(CountriesAPIStream): """Continents stream from the Countries API.""" name = "continents" - primary_keys = ["code"] + primary_keys = ("code",) schema_filepath = SCHEMAS_DIR / "continents.json" query = """ continents { diff --git a/samples/sample_tap_countries/countries_tap.py b/samples/sample_tap_countries/countries_tap.py index 842686117..0c3906166 100644 --- a/samples/sample_tap_countries/countries_tap.py +++ b/samples/sample_tap_countries/countries_tap.py @@ -6,7 +6,7 @@ - https://countries.trevorblades.com/ """ -from typing import List +from __future__ import annotations from samples.sample_tap_countries.countries_streams import ( ContinentsStream, @@ -22,9 +22,13 @@ class SampleTapCountries(Tap): name: str = "sample-tap-countries" config_jsonschema = PropertiesList().to_dict() - def discover_streams(self) -> List[Stream]: + def discover_streams(self) -> list[Stream]: """Return a list of discovered streams.""" return [ CountriesStream(tap=self), ContinentsStream(tap=self), ] + + +if __name__ == "__main__": + SampleTapCountries.cli() diff --git a/samples/sample_tap_gitlab/__init__.py b/samples/sample_tap_gitlab/__init__.py index 0c5781bfd..879dd5a64 100644 --- a/samples/sample_tap_gitlab/__init__.py +++ b/samples/sample_tap_gitlab/__init__.py @@ -1 +1,3 @@ """Gitlab API Sample.""" + +from __future__ import annotations diff --git a/samples/sample_tap_gitlab/gitlab_graphql_streams.py b/samples/sample_tap_gitlab/gitlab_graphql_streams.py index 53bce85a1..b29fbc13e 100644 --- a/samples/sample_tap_gitlab/gitlab_graphql_streams.py +++ b/samples/sample_tap_gitlab/gitlab_graphql_streams.py @@ -4,6 +4,8 @@ # - https://gitlab.com/-/graphql-explorer """ +from __future__ import annotations + from pathlib import Path from singer_sdk.streams import GraphQLStream @@ -32,7 +34,7 @@ class GraphQLCurrentUserStream(GitlabGraphQLStream): """Gitlab Current User stream.""" name = "currentuser" - primary_keys = ["id"] + primary_keys = ("id",) replication_key = None schema_filepath = SCHEMAS_DIR / "currentuser.json" query = """ @@ -46,11 +48,11 @@ class GraphQLProjectsStream(GitlabGraphQLStream): """Gitlab Projects stream.""" name = "projects" - primary_keys = ["id"] + primary_keys = ("id",) replication_key = None schema_filepath = SCHEMAS_DIR / "projects-graphql.json" @property def query(self) -> str: """Return dynamic GraphQL query.""" - return f"project(fullPath: {self.config['project_id']}" " { name }" + return f"project(fullPath: {self.config['project_id']} {{ name }}" diff --git a/samples/sample_tap_gitlab/gitlab_rest_streams.py b/samples/sample_tap_gitlab/gitlab_rest_streams.py index 425348e6e..1480a017d 100644 --- a/samples/sample_tap_gitlab/gitlab_rest_streams.py +++ b/samples/sample_tap_gitlab/gitlab_rest_streams.py @@ -1,11 +1,12 @@ """Sample tap stream test for tap-gitlab.""" -from pathlib import Path -from typing import Any, Dict, List, Optional, cast +from __future__ import annotations -import requests +import typing as t +from pathlib import Path from singer_sdk.authenticators import SimpleAuthenticator +from singer_sdk.pagination import SimpleHeaderPaginator from singer_sdk.streams.rest import RESTStream from singer_sdk.typing import ( ArrayType, @@ -21,7 +22,7 @@ DEFAULT_URL_BASE = "https://gitlab.com/api/v4" -class GitlabStream(RESTStream): +class GitlabStream(RESTStream[str]): """Sample tap test for gitlab.""" _LOG_REQUEST_METRIC_URLS = True @@ -35,12 +36,15 @@ def url_base(self) -> str: def authenticator(self) -> SimpleAuthenticator: """Return an authenticator for REST API requests.""" return SimpleAuthenticator( - stream=self, auth_headers={"Private-Token": self.config.get("auth_token")} + stream=self, + auth_headers={"Private-Token": self.config.get("auth_token")}, ) def get_url_params( - self, context: Optional[dict], next_page_token: Optional[Any] - ) -> Dict[str, Any]: + self, + context: dict | None, # noqa: ARG002 + next_page_token: str | None, + ) -> dict[str, t.Any]: """Return a dictionary of values to be used in URL parameterization.""" params: dict = {} if next_page_token: @@ -50,38 +54,42 @@ def get_url_params( params["order_by"] = self.replication_key return params - def get_next_page_token( - self, response: requests.Response, previous_token: Optional[Any] - ) -> Optional[Any]: - """Return token for identifying next page or None if not applicable.""" - next_page_token = response.headers.get("X-Next-Page", None) - if next_page_token: - self.logger.debug(f"Next page token retrieved: {next_page_token}") - return next_page_token + def get_new_paginator(self) -> SimpleHeaderPaginator: + """Return a new paginator for GitLab API endpoints. + + Returns: + A new paginator. + """ + return SimpleHeaderPaginator("X-Next-Page") class ProjectBasedStream(GitlabStream): """Base class for streams that are keys based on project ID.""" @property - def partitions(self) -> List[dict]: + def partitions(self) -> list[dict]: """Return a list of partition key dicts (if applicable), otherwise None.""" if "{project_id}" in self.path: return [ - {"project_id": id} for id in cast(list, self.config.get("project_ids")) + {"project_id": pid} + for pid in t.cast(list, self.config.get("project_ids")) ] if "{group_id}" in self.path: if "group_ids" not in self.config: - raise ValueError( - f"Missing `group_ids` setting which is required for the " + msg = ( + "Missing `group_ids` setting which is required for the " f"'{self.name}' stream." ) - return [{"group_id": id} for id in cast(list, self.config.get("group_ids"))] - raise ValueError( - "Could not detect partition type for Gitlab stream " - f"'{self.name}' ({self.path}). " - "Expected a URL path containing '{project_id}' or '{group_id}'. " + raise ValueError(msg) + return [ + {"group_id": gid} for gid in t.cast(list, self.config.get("group_ids")) + ] + msg = ( + f"Could not detect partition type for Gitlab stream '{self.name}' " + f"({self.path}). Expected a URL path containing '{{project_id}}' or " + "'{{group_id}}'." ) + raise ValueError(msg) class ProjectsStream(ProjectBasedStream): @@ -89,7 +97,7 @@ class ProjectsStream(ProjectBasedStream): name = "projects" path = "/projects/{project_id}?statistics=1" - primary_keys = ["id"] + primary_keys = ("id",) replication_key = "last_activity_at" is_sorted = True schema_filepath = SCHEMAS_DIR / "projects.json" @@ -100,7 +108,7 @@ class ReleasesStream(ProjectBasedStream): name = "releases" path = "/projects/{project_id}/releases" - primary_keys = ["project_id", "commit_id", "tag_name"] + primary_keys = ("project_id", "tag_name") replication_key = None schema_filepath = SCHEMAS_DIR / "releases.json" @@ -110,9 +118,9 @@ class IssuesStream(ProjectBasedStream): name = "issues" path = "/projects/{project_id}/issues?scope=all&updated_after={start_date}" - primary_keys = ["id"] + primary_keys = ("id",) replication_key = "updated_at" - is_sorted = True + is_sorted = False schema_filepath = SCHEMAS_DIR / "issues.json" @@ -123,7 +131,7 @@ class CommitsStream(ProjectBasedStream): path = ( "/projects/{project_id}/repository/commits?since={start_date}&with_stats=true" ) - primary_keys = ["id"] + primary_keys = ("id",) replication_key = "created_at" is_sorted = False schema_filepath = SCHEMAS_DIR / "commits.json" @@ -138,7 +146,7 @@ class EpicsStream(ProjectBasedStream): name = "epics" path = "/groups/{group_id}/epics?updated_after={start_date}" - primary_keys = ["id"] + primary_keys = ("id",) replication_key = "updated_at" is_sorted = True schema = PropertiesList( @@ -160,9 +168,11 @@ class EpicsStream(ProjectBasedStream): Property("downvotes", IntegerType), ).to_dict() - # schema_filepath = SCHEMAS_DIR / "epics.json" - - def get_child_context(self, record: dict, context: Optional[dict]) -> dict: + def get_child_context( + self, + record: dict, + context: dict | None, # noqa: ARG002 + ) -> dict: """Perform post processing, including queuing up any child stream types.""" # Ensure child state record(s) are created return { @@ -177,16 +187,19 @@ class EpicIssuesStream(GitlabStream): name = "epic_issues" path = "/groups/{group_id}/epics/{epic_iid}/issues" - primary_keys = ["id"] + primary_keys = ("id",) replication_key = None schema_filepath = SCHEMAS_DIR / "epic_issues.json" parent_stream_type = EpicsStream # Stream should wait for parents to complete. def get_url_params( - self, context: Optional[dict], next_page_token: Optional[Any] - ) -> Dict[str, Any]: + self, + context: dict | None, + next_page_token: str | None, + ) -> dict[str, t.Any]: """Return a dictionary of values to be used in parameterization.""" result = super().get_url_params(context, next_page_token) if not context or "epic_id" not in context: - raise ValueError("Cannot sync epic issues without already known epic IDs.") + msg = "Cannot sync epic issues without already known epic IDs." + raise ValueError(msg) return result diff --git a/samples/sample_tap_gitlab/gitlab_tap.py b/samples/sample_tap_gitlab/gitlab_tap.py index 6bdb04fb0..952e406ae 100644 --- a/samples/sample_tap_gitlab/gitlab_tap.py +++ b/samples/sample_tap_gitlab/gitlab_tap.py @@ -1,6 +1,6 @@ """Sample tap test for tap-gitlab.""" -from typing import List +from __future__ import annotations from samples.sample_tap_gitlab.gitlab_rest_streams import ( CommitsStream, @@ -34,12 +34,12 @@ class SampleTapGitlab(Tap): name: str = "sample-tap-gitlab" config_jsonschema = PropertiesList( - Property("auth_token", StringType, required=True), + Property("auth_token", StringType, required=True, secret=True), Property("project_ids", ArrayType(StringType), required=True), Property("group_ids", ArrayType(StringType), required=True), Property("start_date", DateTimeType, required=True), ).to_dict() - def discover_streams(self) -> List[Stream]: + def discover_streams(self) -> list[Stream]: """Return a list of discovered streams.""" return [stream_class(tap=self) for stream_class in STREAM_TYPES] diff --git a/samples/sample_tap_gitlab/schemas/commits.json b/samples/sample_tap_gitlab/schemas/commits.json index 33978e57f..532e978c5 100644 --- a/samples/sample_tap_gitlab/schemas/commits.json +++ b/samples/sample_tap_gitlab/schemas/commits.json @@ -1,95 +1,71 @@ { - "type": "object", - "properties": { - "id": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "project_id": { + "type": "string" + }, + "short_id": { + "type": "string" + }, + "title": { + "type": ["null", "string"] + }, + "author_name": { + "type": ["null", "string"] + }, + "author_email": { + "type": ["null", "string"] + }, + "authored_date": { + "type": "string", + "format": "date-time" + }, + "committer_name": { + "type": ["null", "string"] + }, + "committer_email": { + "type": ["null", "string"] + }, + "committed_date": { + "type": "string", + "format": "date-time" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "message": { + "type": ["null", "string"] + }, + "parent_ids": { + "anyOf": [ + { + "type": "array", + "items": { "type": "string" + } }, - "project_id": { - "type": "integer" - }, - "short_id": { - "type": "string" - }, - "title": { - "type": [ - "null", - "string" - ] - }, - "author_name": { - "type": [ - "null", - "string" - ] - }, - "author_email": { - "type": [ - "null", - "string" - ] - }, - "authored_date": { - "type": "string", - "format": "date-time" - }, - "committer_name": { - "type": [ - "null", - "string" - ] - }, - "committer_email": { - "type": [ - "null", - "string" - ] - }, - "committed_date": { - "type": "string", - "format": "date-time" - }, - "created_at": { - "type": "string", - "format": "date-time" - }, - "message": { - "type": [ - "null", - "string" - ] - }, - "allow_failure": { - "type": [ - "null", - "boolean" - ] + { + "type": "null" + } + ] + }, + "stats": { + "type": "object", + "properties": { + "additions": { + "type": "integer" }, - "parent_ids": { - "anyOf": [ - { - "type": "array", - "items": { - "type": "string" - } - }, - { - "type": "null" - } - ] + "deletions": { + "type": "integer" }, - "stats": { - "type": "object", - "properties": { - "additions": { - "type": "integer" - }, - "deletions": { - "type": "integer" - }, - "total": { - "type": "integer" - } - } + "total": { + "type": "integer" } + } } -} \ No newline at end of file + } +} diff --git a/samples/sample_tap_gitlab/schemas/epic_issues.json b/samples/sample_tap_gitlab/schemas/epic_issues.json index 372f9682b..2330bb427 100644 --- a/samples/sample_tap_gitlab/schemas/epic_issues.json +++ b/samples/sample_tap_gitlab/schemas/epic_issues.json @@ -1,26 +1,269 @@ { - "type": "object", - "properties": { - "group_id": { - "type": "integer" + "properties": { + "_links": { + "properties": { + "award_emoji": { + "type": "string" }, - "epic_iid": { - "type": "integer" + "notes": { + "type": "string" }, - "epic_issue_id": { - "type": "integer" + "project": { + "type": "string" }, - "issue_id": { - "type": "integer" + "self": { + "type": "string" + } + }, + "required": ["award_emoji", "notes", "project", "self"], + "type": "object" + }, + "assignee": { + "properties": { + "avatar_url": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" }, - "issue_iid": { + "state": { + "type": "string" + }, + "username": { + "type": "string" + }, + "web_url": { + "type": "string" + } + }, + "required": ["avatar_url", "id", "name", "state", "username", "web_url"], + "type": ["object", "null"] + }, + "assignees": { + "items": { + "properties": { + "avatar_url": { + "type": "string" + }, + "id": { "type": "integer" + }, + "name": { + "type": "string" + }, + "state": { + "type": "string" + }, + "username": { + "type": "string" + }, + "web_url": { + "type": "string" + } + }, + "required": [ + "avatar_url", + "id", + "name", + "state", + "username", + "web_url" + ], + "type": "object" + }, + "type": "array" + }, + "author": { + "properties": { + "avatar_url": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "state": { + "type": "string" + }, + "username": { + "type": "string" + }, + "web_url": { + "type": "string" + } + }, + "required": ["avatar_url", "id", "name", "state", "username", "web_url"], + "type": "object" + }, + "closed_at": { + "type": ["string", "null"], + "format": "date-time" + }, + "confidential": { + "type": "boolean" + }, + "created_at": { + "format": "date-time", + "type": "string" + }, + "description": { + "type": "string" + }, + "discussion_locked": { + "type": "null" + }, + "downvotes": { + "type": "integer" + }, + "due_date": { + "type": ["string", "null"], + "format": "date-time" + }, + "epic_issue_id": { + "type": "integer" + }, + "id": { + "type": "integer" + }, + "iid": { + "type": "integer" + }, + "labels": { + "items": { + "type": "string" + }, + "type": "array" + }, + "milestone": { + "properties": { + "created_at": { + "format": "date-time", + "type": "string" + }, + "description": { + "type": "string" + }, + "due_date": { + "type": ["string", "null"], + "format": "date-time" + }, + "id": { + "type": "integer" + }, + "iid": { + "type": "integer" }, "project_id": { - "type": ["null", "integer"] + "type": "integer" + }, + "start_date": { + "type": ["string", "null"], + "format": "date-time" + }, + "state": { + "type": "string" + }, + "title": { + "type": "string" + }, + "updated_at": { + "format": "date-time", + "type": "string" + } + }, + "required": [ + "created_at", + "description", + "due_date", + "id", + "iid", + "start_date", + "state", + "title", + "updated_at" + ], + "type": ["object", "null"] + }, + "project_id": { + "type": "integer" + }, + "state": { + "type": "string" + }, + "time_stats": { + "properties": { + "human_time_estimate": { + "type": ["string", "null"] + }, + "human_total_time_spent": { + "type": ["string", "null"] + }, + "time_estimate": { + "type": "integer" }, - "relative_position": { - "type": ["null", "integer"] + "total_time_spent": { + "type": "integer" } + }, + "required": [ + "human_time_estimate", + "human_total_time_spent", + "time_estimate", + "total_time_spent" + ], + "type": "object" + }, + "title": { + "type": "string" + }, + "updated_at": { + "format": "date-time", + "type": "string" + }, + "upvotes": { + "type": "integer" + }, + "user_notes_count": { + "type": "integer" + }, + "web_url": { + "type": "string" + }, + "weight": { + "type": ["integer", "null"] } + }, + "required": [ + "_links", + "assignee", + "assignees", + "author", + "closed_at", + "confidential", + "created_at", + "description", + "discussion_locked", + "downvotes", + "due_date", + "epic_issue_id", + "id", + "iid", + "labels", + "milestone", + "project_id", + "state", + "time_stats", + "title", + "updated_at", + "upvotes", + "user_notes_count", + "web_url", + "weight" + ], + "type": "object" } diff --git a/samples/sample_tap_gitlab/schemas/issues.json b/samples/sample_tap_gitlab/schemas/issues.json index c5a988a25..98119c7f8 100644 --- a/samples/sample_tap_gitlab/schemas/issues.json +++ b/samples/sample_tap_gitlab/schemas/issues.json @@ -31,7 +31,33 @@ "assignees": { "type": "array", "items": { - "type": "integer" + "type": "object", + "properties": { + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "username": { + "type": "string" + }, + "state": { + "type": "string" + }, + "avatar_url": { + "type": [ + "null", + "string" + ] + }, + "web_url": { + "type": [ + "null", + "string" + ] + } + } } }, "closed_by_id": { diff --git a/samples/sample_tap_gitlab/schemas/projects.json b/samples/sample_tap_gitlab/schemas/projects.json index c506de6dd..d02858e5f 100644 --- a/samples/sample_tap_gitlab/schemas/projects.json +++ b/samples/sample_tap_gitlab/schemas/projects.json @@ -1,373 +1,208 @@ { - "type": "object", - "properties": { - "approvals_before_merge": { - "type": [ - "null", - "integer" - ] - }, - "archived": { - "type": [ - "null", - "boolean" - ] - }, - "avatar_url": { - "type": [ - "null", - "string" - ] - }, - "builds_enabled": { - "type": [ - "null", - "boolean" - ] - }, - "container_registry_enabled": { - "type": [ - "null", - "boolean" - ] - }, - "created_at": { - "type": [ - "string", - "null" - ], - "format": "date-time" - }, - "creator_id": { - "type": [ - "null", - "integer" - ] - }, - "default_branch": { - "type": [ - "null", - "string" - ] - }, - "description": { - "type": [ - "null", - "string" - ] - }, - "forks_count": { - "type": [ - "null", - "integer" - ] - }, - "http_url_to_repo": { - "type": [ - "null", - "string" - ] - }, + "type": "object", + "properties": { + "approvals_before_merge": { + "type": ["null", "integer"] + }, + "archived": { + "type": ["null", "boolean"] + }, + "avatar_url": { + "type": ["null", "string"] + }, + "builds_enabled": { + "type": ["null", "boolean"] + }, + "container_registry_enabled": { + "type": ["null", "boolean"] + }, + "created_at": { + "type": ["string", "null"], + "format": "date-time" + }, + "creator_id": { + "type": ["null", "integer"] + }, + "default_branch": { + "type": ["null", "string"] + }, + "description": { + "type": ["null", "string"] + }, + "forks_count": { + "type": ["null", "integer"] + }, + "http_url_to_repo": { + "type": ["null", "string"] + }, + "id": { + "type": ["null", "integer"] + }, + "issues_enabled": { + "type": ["null", "boolean"] + }, + "last_activity_at": { + "type": ["string", "null"], + "format": "date-time" + }, + "lfs_enabled": { + "type": ["null", "boolean"] + }, + "merge_requests_enabled": { + "type": ["null", "boolean"] + }, + "merge_method": { + "type": ["null", "string"] + }, + "name": { + "type": ["null", "string"] + }, + "name_with_namespace": { + "type": ["null", "string"] + }, + "namespace": { + "type": "object", + "properties": { "id": { - "type": [ - "null", - "integer" - ] + "type": ["null", "integer"] }, - "issues_enabled": { - "type": [ - "null", - "boolean" - ] - }, - "last_activity_at": { - "type": [ - "string", - "null" - ], - "format": "date-time" - }, - "lfs_enabled": { - "type": [ - "null", - "boolean" - ] - }, - "merge_requests_enabled": { - "type": [ - "null", - "boolean" - ] - }, - "merge_method": { - "type": [ - "null", - "string" - ] + "kind": { + "type": ["null", "string"] }, "name": { - "type": [ - "null", - "string" - ] - }, - "name_with_namespace": { - "type": [ - "null", - "string" - ] - }, - "namespace": { - "type": "object", - "properties": { - "id": { - "type": [ - "null", - "integer" - ] - }, - "kind": { - "type": [ - "null", - "string" - ] - }, - "name": { - "type": [ - "null", - "string" - ] - }, - "path": { - "type": [ - "null", - "string" - ] - }, - "full_path": { - "type": [ - "null", - "string" - ] - }, - "parent_id": { - "type": [ - "null", - "integer" - ] - } - } - }, - "only_allow_merge_if_all_discussions_are_resolved": { - "type": [ - "null", - "boolean" - ] - }, - "only_allow_merge_if_build_succeeds": { - "type": [ - "null", - "boolean" - ] - }, - "open_issues_count": { - "type": [ - "null", - "integer" - ] - }, - "owner_id": { - "type": [ - "null", - "integer" - ] + "type": ["null", "string"] }, "path": { - "type": [ - "null", - "string" - ] - }, - "path_with_namespace": { - "type": [ - "null", - "string" - ] - }, - "permissions": { - "type": "object", - "properties": { - "group_access": { - "type": [ - "object", - "null" - ], - "properties": { - "access_level": { - "type": [ - "null", - "integer" - ] - }, - "notification_level": { - "type": [ - "null", - "integer" - ] - } - } - }, - "project_access": { - "type": [ - "object", - "null" - ], - "properties": { - "access_level": { - "type": [ - "null", - "integer" - ] - }, - "notification_level": { - "type": [ - "null", - "integer" - ] - } - } - } - } - }, - "public": { - "type": [ - "null", - "boolean" - ] - }, - "public_builds": { - "type": [ - "null", - "boolean" - ] - }, - "request_access_enabled": { - "type": [ - "null", - "boolean" - ] - }, - "shared_runners_enabled": { - "type": [ - "null", - "boolean" - ] - }, - "shared_with_groups": { - "type": [ - "array", - "null" - ], - "items": { - "type": "object", - "properties": { - "group_id": { - "type": "integer" - }, - "group_name": { - "type": "string" - }, - "group_access_level": { - "type": "integer" - } - } - } + "type": ["null", "string"] }, - "snippets_enabled": { - "type": [ - "null", - "boolean" - ] + "full_path": { + "type": ["null", "string"] }, - "ssh_url_to_repo": { - "type": [ - "null", - "string" - ] - }, - "star_count": { - "type": [ - "null", - "integer" - ] - }, - "statistics": { - "type": "object", - "properties": { - "commit_count": { - "type": [ - "null", - "integer" - ] - }, - "storage_size": { - "type": [ - "null", - "integer" - ] - }, - "repository_size": { - "type": [ - "null", - "integer" - ] - }, - "lfs_objects_size": { - "type": [ - "null", - "integer" - ] - }, - "job_artifacts_size": { - "type": [ - "null", - "integer" - ] - } + "parent_id": { + "type": ["null", "integer"] + } + } + }, + "only_allow_merge_if_all_discussions_are_resolved": { + "type": ["null", "boolean"] + }, + "only_allow_merge_if_build_succeeds": { + "type": ["null", "boolean"] + }, + "open_issues_count": { + "type": ["null", "integer"] + }, + "owner_id": { + "type": ["null", "integer"] + }, + "path": { + "type": ["null", "string"] + }, + "path_with_namespace": { + "type": ["null", "string"] + }, + "permissions": { + "type": "object", + "properties": { + "group_access": { + "type": ["object", "null"], + "properties": { + "access_level": { + "type": ["null", "integer"] + }, + "notification_level": { + "type": ["null", "integer"] } - }, - "tag_list": { - "type": [ - "array", - "null" - ], - "items": { - "type": "string" + } + }, + "project_access": { + "type": ["object", "null"], + "properties": { + "access_level": { + "type": ["null", "integer"] + }, + "notification_level": { + "type": ["null", "integer"] } - }, - "visibility_level": { - "type": [ - "null", - "integer" - ] - }, - "visibility": { - "type": [ - "null", - "string" - ] - }, - "web_url": { - "type": [ - "null", - "string" - ] - }, - "wiki_enabled": { - "type": [ - "null", - "boolean" - ] + } + } + } + }, + "public": { + "type": ["null", "boolean"] + }, + "public_builds": { + "type": ["null", "boolean"] + }, + "request_access_enabled": { + "type": ["null", "boolean"] + }, + "shared_runners_enabled": { + "type": ["null", "boolean"] + }, + "shared_with_groups": { + "type": ["array", "null"], + "items": { + "type": "object", + "properties": { + "group_id": { + "type": "integer" + }, + "group_name": { + "type": "string" + }, + "group_access_level": { + "type": "integer" + } + } + } + }, + "snippets_enabled": { + "type": ["null", "boolean"] + }, + "ssh_url_to_repo": { + "type": ["null", "string"] + }, + "star_count": { + "type": ["null", "integer"] + }, + "statistics": { + "type": ["object", "null"], + "properties": { + "commit_count": { + "type": ["null", "integer"] + }, + "storage_size": { + "type": ["null", "integer"] + }, + "repository_size": { + "type": ["null", "integer"] + }, + "lfs_objects_size": { + "type": ["null", "integer"] + }, + "job_artifacts_size": { + "type": ["null", "integer"] } + } + }, + "tag_list": { + "type": ["array", "null"], + "items": { + "type": "string" + } + }, + "visibility_level": { + "type": ["null", "integer"] + }, + "visibility": { + "type": ["null", "string"] + }, + "web_url": { + "type": ["null", "string"] + }, + "wiki_enabled": { + "type": ["null", "boolean"] } -} \ No newline at end of file + } +} diff --git a/samples/sample_tap_gitlab/schemas/releases.json b/samples/sample_tap_gitlab/schemas/releases.json index 3d2a8729a..9165c802d 100644 --- a/samples/sample_tap_gitlab/schemas/releases.json +++ b/samples/sample_tap_gitlab/schemas/releases.json @@ -1,54 +1,45 @@ { - "type": "object", - "properties": { - "tag_name": { - "type": "string" + "type": "object", + "properties": { + "tag_name": { + "type": "string" + }, + "author_id": { + "type": ["null", "integer"] + }, + "commit_id": { + "type": ["string", "null"] + }, + "project_id": { + "type": "string" + }, + "description": { + "type": ["null", "string"] + }, + "name": { + "type": ["null", "string"] + }, + "created_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" }, - "author_id": { - "type": [ - "null", - "integer" - ] - }, - "commit_id": { - "type": "string" - }, - "project_id": { - "type": "integer" - }, - "description": { - "type": [ - "null", - "string" - ] - }, - "name": { - "type": [ - "null", - "string" - ] - }, - "created_at": { - "anyOf": [ - { - "type": "string", - "format": "date-time" - }, - { - "type": "null" - } - ] + { + "type": "null" + } + ] + }, + "released_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" }, - "released_at": { - "anyOf": [ - { - "type": "string", - "format": "date-time" - }, - { - "type": "null" - } - ] + { + "type": "null" } + ] } -} \ No newline at end of file + } +} diff --git a/samples/sample_tap_google_analytics/__init__.py b/samples/sample_tap_google_analytics/__init__.py index 20469dd2a..dbe8aec91 100644 --- a/samples/sample_tap_google_analytics/__init__.py +++ b/samples/sample_tap_google_analytics/__init__.py @@ -1 +1,3 @@ """Google Analytics sample.""" + +from __future__ import annotations diff --git a/samples/sample_tap_google_analytics/ga_tap.py b/samples/sample_tap_google_analytics/ga_tap.py index 8044df0b2..0bade4370 100644 --- a/samples/sample_tap_google_analytics/ga_tap.py +++ b/samples/sample_tap_google_analytics/ga_tap.py @@ -1,8 +1,9 @@ """Sample tap test for tap-google-analytics.""" +from __future__ import annotations + import json from pathlib import Path -from typing import List from samples.sample_tap_google_analytics.ga_tap_stream import ( GASimpleSampleStream, @@ -23,10 +24,15 @@ class SampleTapGoogleAnalytics(Tap): name: str = "sample-tap-google-analytics" config_jsonschema = PropertiesList( Property("view_id", StringType(), required=True), - Property("client_email", StringType(), required=True), - Property("private_key", StringType(), required=True), + Property( + "client_email", + StringType(), + required=True, + examples=["me@example.com"], + ), + Property("private_key", StringType(), required=True, secret=True), ).to_dict() - def discover_streams(self) -> List[SampleGoogleAnalyticsStream]: + def discover_streams(self) -> list[SampleGoogleAnalyticsStream]: """Return a list of all streams.""" return [GASimpleSampleStream(tap=self)] diff --git a/samples/sample_tap_google_analytics/ga_tap_stream.py b/samples/sample_tap_google_analytics/ga_tap_stream.py index 5ff4fbfd9..8d1f09592 100644 --- a/samples/sample_tap_google_analytics/ga_tap_stream.py +++ b/samples/sample_tap_google_analytics/ga_tap_stream.py @@ -1,7 +1,9 @@ """Sample tap stream test for tap-google-analytics.""" +from __future__ import annotations + +import typing as t from pathlib import Path -from typing import Any, Iterable, List, Optional, cast import pendulum @@ -19,7 +21,7 @@ class GoogleJWTAuthenticator(OAuthJWTAuthenticator): @property def client_id(self) -> str: """Override since Google auth uses email, not numeric client ID.""" - return cast(str, self.config["client_email"]) + return t.cast(str, self.config["client_email"]) class SampleGoogleAnalyticsStream(RESTStream): @@ -30,8 +32,8 @@ class SampleGoogleAnalyticsStream(RESTStream): rest_method = "POST" # Child class overrides: - dimensions: List[str] = [] - metrics: List[str] = [] + dimensions: tuple[str] = () + metrics: tuple[str] = () @property def authenticator(self) -> GoogleJWTAuthenticator: @@ -43,37 +45,36 @@ def authenticator(self) -> GoogleJWTAuthenticator: ) def prepare_request_payload( - self, context: Optional[dict], next_page_token: Optional[Any] - ) -> Optional[dict]: + self, + context: dict | None, # noqa: ARG002 + next_page_token: t.Any | None, # noqa: ARG002 + ) -> dict | None: """Prepare the data payload for the REST API request.""" - # params = self.get_url_params(context, next_page_token) request_def = { "viewId": self.config["view_id"], "metrics": [{"expression": m} for m in self.metrics], "dimensions": [{"name": d} for d in self.dimensions], - # "orderBys": [ - # {"fieldName": "ga:sessions", "sortOrder": "DESCENDING"}, - # {"fieldName": "ga:pageviews", "sortOrder": "DESCENDING"}, - # ], } if self.config.get("start_date"): request_def["dateRanges"] = [ { "startDate": self.config.get("start_date"), "endDate": pendulum.now(tz="UTC"), - } + }, ] return {"reportRequests": [request_def]} - def parse_response(self, response) -> Iterable[dict]: + def parse_response(self, response) -> t.Iterable[dict]: """Parse Google Analytics API response into individual records.""" self.logger.info( - f"Received raw Google Analytics query response: {response.json()}" + "Received raw Google Analytics query response: %s", + response.json(), ) report_data = response.json().get("reports", [{}])[0].get("data") if not report_data: self.logger.info( - f"Received empty Google Analytics query response: {response.json()}" + "Received empty Google Analytics query response: %s", + response.json(), ) for total in report_data["totals"]: yield {"totals": total["values"]} @@ -85,5 +86,5 @@ class GASimpleSampleStream(SampleGoogleAnalyticsStream): name = "simple_sample" schema_filepath = SCHEMAS_DIR / "simple-sample.json" - dimensions = ["ga:date"] - metrics = ["ga:users", "ga:sessions"] + dimensions = ("ga:date",) + metrics = ("ga:users", "ga:sessions") diff --git a/samples/sample_tap_google_analytics/schemas/simple-sample.json b/samples/sample_tap_google_analytics/schemas/simple-sample.json index 33978e57f..794869260 100644 --- a/samples/sample_tap_google_analytics/schemas/simple-sample.json +++ b/samples/sample_tap_google_analytics/schemas/simple-sample.json @@ -1,95 +1,102 @@ { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "project_id": { - "type": "integer" - }, - "short_id": { - "type": "string" - }, - "title": { - "type": [ - "null", - "string" - ] - }, - "author_name": { - "type": [ - "null", - "string" - ] - }, - "author_email": { - "type": [ - "null", - "string" - ] - }, - "authored_date": { - "type": "string", - "format": "date-time" - }, - "committer_name": { - "type": [ - "null", - "string" - ] + "type": "object", + "properties": { + "id": { + "type": ["string", "null"] + }, + "project_id": { + "type": ["integer", "null"] + }, + "short_id": { + "type": ["string", "null"] + }, + "title": { + "type": ["null", "string"] + }, + "author_name": { + "type": ["null", "string"] + }, + "author_email": { + "type": ["null", "string"] + }, + "authored_date": { + "anyOf": [ + { + "type": "string", + "format": "date-time" }, - "committer_email": { - "type": [ - "null", - "string" - ] - }, - "committed_date": { - "type": "string", - "format": "date-time" - }, - "created_at": { - "type": "string", - "format": "date-time" - }, - "message": { - "type": [ - "null", - "string" - ] + { + "type": "null" + } + ] + }, + "committer_name": { + "type": ["null", "string"] + }, + "committer_email": { + "type": ["null", "string"] + }, + "committed_date": { + "anyOf": [ + { + "type": "string", + "format": "date-time" }, - "allow_failure": { - "type": [ - "null", - "boolean" - ] + { + "type": "null" + } + ] + }, + "created_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" }, - "parent_ids": { - "anyOf": [ - { - "type": "array", - "items": { - "type": "string" - } - }, - { - "type": "null" - } - ] + { + "type": "null" + } + ] + }, + "message": { + "type": ["null", "string"] + }, + "allow_failure": { + "type": ["null", "boolean"] + }, + "parent_ids": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } }, - "stats": { - "type": "object", - "properties": { - "additions": { - "type": "integer" - }, - "deletions": { - "type": "integer" - }, - "total": { - "type": "integer" - } + { + "type": "null" + } + ] + }, + "stats": { + "anyOf": [ + { + "type": "object", + "properties": { + "additions": { + "type": "integer" + }, + "deletions": { + "type": "integer" + }, + "total": { + "type": "integer" } + } + }, + { + "type": "null" } + ] } -} \ No newline at end of file + } +} diff --git a/samples/sample_tap_hostile/__init__.py b/samples/sample_tap_hostile/__init__.py new file mode 100644 index 000000000..77ede8c7d --- /dev/null +++ b/samples/sample_tap_hostile/__init__.py @@ -0,0 +1,9 @@ +"""A sample tap for testing SQL target property name transformations.""" + +from __future__ import annotations + +from .hostile_tap import SampleTapHostile + +__all__ = [ + "SampleTapHostile", +] diff --git a/samples/sample_tap_hostile/hostile_streams.py b/samples/sample_tap_hostile/hostile_streams.py new file mode 100644 index 000000000..e711b769f --- /dev/null +++ b/samples/sample_tap_hostile/hostile_streams.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import random +import string +import typing as t + +from singer_sdk import typing as th +from singer_sdk.streams import Stream + + +class HostilePropertyNamesStream(Stream): + """ + A stream with property names that are not compatible as unescaped identifiers + in common DBMS systems. + """ + + name = "hostile_property_names_stream" + schema = th.PropertiesList( + th.Property("name with spaces", th.StringType), + th.Property("NameIsCamelCase", th.StringType), + th.Property("name-with-dashes", th.StringType), + th.Property("Name-with-Dashes-and-Mixed-cases", th.StringType), + th.Property("5name_starts_with_number", th.StringType), + th.Property("6name_starts_with_number", th.StringType), + th.Property("7name_starts_with_number", th.StringType), + th.Property("name_with_emoji_😈", th.StringType), + ).to_dict() + + @staticmethod + def get_random_lowercase_string(): + return "".join( + random.choice(string.ascii_lowercase) for _ in range(10) # noqa: S311 + ) + + def get_records( + self, + context: dict | None, # noqa: ARG002 + ) -> t.Iterable[dict | tuple[dict, dict]]: + return ( + { + key: self.get_random_lowercase_string() + for key in self.schema["properties"] + } + for _ in range(10) + ) diff --git a/samples/sample_tap_hostile/hostile_tap.py b/samples/sample_tap_hostile/hostile_tap.py new file mode 100644 index 000000000..957ab47a0 --- /dev/null +++ b/samples/sample_tap_hostile/hostile_tap.py @@ -0,0 +1,24 @@ +"""A sample tap for testing SQL target property name transformations.""" + +from __future__ import annotations + +from samples.sample_tap_hostile.hostile_streams import HostilePropertyNamesStream +from singer_sdk import Stream, Tap +from singer_sdk.typing import PropertiesList + + +class SampleTapHostile(Tap): + """Sample tap for for testing SQL target property name transformations.""" + + name: str = "sample-tap-hostile" + config_jsonschema = PropertiesList().to_dict() + + def discover_streams(self) -> list[Stream]: + """Return a list of discovered streams.""" + return [ + HostilePropertyNamesStream(tap=self), + ] + + +if __name__ == "__main__": + SampleTapHostile.cli() diff --git a/samples/sample_tap_sqlite/__init__.py b/samples/sample_tap_sqlite/__init__.py index 5f92b85d0..908c4f57a 100644 --- a/samples/sample_tap_sqlite/__init__.py +++ b/samples/sample_tap_sqlite/__init__.py @@ -1,8 +1,8 @@ """A sample implementation for SQLite.""" -from typing import Any, Dict +from __future__ import annotations -import sqlalchemy +import typing as t from singer_sdk import SQLConnector, SQLStream, SQLTap from singer_sdk import typing as th @@ -16,25 +16,10 @@ class SQLiteConnector(SQLConnector): This class handles all DDL and type conversions. """ - def get_sqlalchemy_url(self, config: Dict[str, Any]) -> str: + def get_sqlalchemy_url(self, config: dict[str, t.Any]) -> str: """Generates a SQLAlchemy URL for SQLite.""" return f"sqlite:///{config[DB_PATH_CONFIG]}" - def create_sqlalchemy_connection(self) -> sqlalchemy.engine.Connection: - """Return a new SQLAlchemy connection using the provided config. - - This override simply provides a more helpful error message on failure. - - Returns: - A newly created SQLAlchemy engine object. - """ - try: - return super().create_sqlalchemy_connection() - except Exception as ex: - raise RuntimeError( - f"Error connecting to DB at '{self.config[DB_PATH_CONFIG]}': {ex}" - ) from ex - class SQLiteStream(SQLStream): """The Stream class for SQLite. @@ -60,7 +45,8 @@ class SQLiteTap(SQLTap): DB_PATH_CONFIG, th.StringType, description="The path to your SQLite database file(s).", - ) + examples=["./path/to/my.db", "/absolute/path/to/my.db"], + ), ).to_dict() diff --git a/samples/sample_target_csv/__init__.py b/samples/sample_target_csv/__init__.py index e89cf32ae..979a6c2ba 100644 --- a/samples/sample_target_csv/__init__.py +++ b/samples/sample_target_csv/__init__.py @@ -1 +1,3 @@ """Module test for target-csv functionality.""" + +from __future__ import annotations diff --git a/samples/sample_target_csv/__main__.py b/samples/sample_target_csv/__main__.py new file mode 100644 index 000000000..f7c28343e --- /dev/null +++ b/samples/sample_target_csv/__main__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from samples.sample_target_csv.csv_target import SampleTargetCSV + +SampleTargetCSV.cli() diff --git a/samples/sample_target_csv/csv_target.py b/samples/sample_target_csv/csv_target.py index 2b9c4d3e9..d52b76f48 100644 --- a/samples/sample_target_csv/csv_target.py +++ b/samples/sample_target_csv/csv_target.py @@ -1,5 +1,7 @@ """Sample target test for target-csv.""" +from __future__ import annotations + from samples.sample_target_csv.csv_target_sink import SampleCSVTargetSink from singer_sdk import typing as th from singer_sdk.target_base import Target diff --git a/samples/sample_target_csv/csv_target_sink.py b/samples/sample_target_csv/csv_target_sink.py index f086321d9..4f02bf0bb 100644 --- a/samples/sample_target_csv/csv_target_sink.py +++ b/samples/sample_target_csv/csv_target_sink.py @@ -1,5 +1,7 @@ """Sample Parquet target stream class, which handles writing streams.""" +from __future__ import annotations + import csv from pathlib import Path @@ -30,16 +32,20 @@ def process_batch(self, context: dict) -> None: openmode = "a" outpath = self.target_filepath.absolute() if not outpath.is_file(): - self.logger.info(f"Writing to new file: {outpath}") + self.logger.info("Writing to new file: %s", outpath) newfile = True openmode = "w" - with open(outpath, openmode, newline="\n", encoding="utf-8") as csvfile: + with outpath.open(openmode, newline="\n", encoding="utf-8") as csvfile: writer = csv.writer( - csvfile, delimiter="\t", quotechar='"', quoting=csv.QUOTE_NONNUMERIC + csvfile, + delimiter="\t", + quotechar='"', + quoting=csv.QUOTE_NONNUMERIC, + escapechar="\\", ) for record in records_to_drain: if newfile and not records_written: - # Write header row if new file + # Write header line if new file writer.writerow(record.keys()) writer.writerow(record.values()) records_written += 1 diff --git a/samples/sample_target_parquet/__init__.py b/samples/sample_target_parquet/__init__.py index 1a4bad056..748f22665 100644 --- a/samples/sample_target_parquet/__init__.py +++ b/samples/sample_target_parquet/__init__.py @@ -1,6 +1,8 @@ """Module test for target-parquet functionality.""" +from __future__ import annotations + # Reuse the tap connection rather than create a new target connection: from samples.sample_target_parquet.parquet_target import SampleTargetParquet from samples.sample_target_parquet.parquet_target_sink import SampleParquetTargetSink diff --git a/samples/sample_target_parquet/parquet_target.py b/samples/sample_target_parquet/parquet_target.py index 2bbbe32d5..d8c8f7a74 100644 --- a/samples/sample_target_parquet/parquet_target.py +++ b/samples/sample_target_parquet/parquet_target.py @@ -1,5 +1,7 @@ """Sample target test for target-parquet.""" +from __future__ import annotations + from samples.sample_target_parquet.parquet_target_sink import SampleParquetTargetSink from singer_sdk import typing as th from singer_sdk.target_base import Target diff --git a/samples/sample_target_parquet/parquet_target_sink.py b/samples/sample_target_parquet/parquet_target_sink.py index e90de2872..e98dca2b1 100644 --- a/samples/sample_target_parquet/parquet_target_sink.py +++ b/samples/sample_target_parquet/parquet_target_sink.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, Dict, Union +import typing as t import pyarrow as pa import pyarrow.parquet as pq @@ -10,7 +10,7 @@ from singer_sdk.sinks import BatchSink -def json_schema_to_arrow(schema: dict[str, Any]) -> pa.Schema: +def json_schema_to_arrow(schema: dict[str, t.Any]) -> pa.Schema: """Convert a JSON Schema to an Arrow schema. Args: @@ -23,7 +23,7 @@ def json_schema_to_arrow(schema: dict[str, Any]) -> pa.Schema: return pa.schema(fields) -def _json_schema_to_arrow_fields(schema: dict[str, Any]) -> pa.StructType: +def _json_schema_to_arrow_fields(schema: dict[str, t.Any]) -> pa.StructType: """Convert a JSON Schema to an Arrow struct. Args: @@ -36,10 +36,13 @@ def _json_schema_to_arrow_fields(schema: dict[str, Any]) -> pa.StructType: for name, property_schema in schema.get("properties", {}).items(): field = pa.field(name, _json_type_to_arrow_field(property_schema)) fields.append(field) - return fields + return fields if fields else [pa.field("dummy", pa.string())] -def _json_type_to_arrow_field(schema_type: dict[str, Any]) -> pa.DataType: + +def _json_type_to_arrow_field( # noqa: PLR0911 + schema_type: dict[str, t.Any], +) -> pa.DataType: """Convert a JSON Schema to an Arrow struct. Args: @@ -62,22 +65,22 @@ def _json_type_to_arrow_field(schema_type: dict[str, Any]) -> pa.DataType: items = schema_type.get("items", {}) return pa.list_(_json_type_to_arrow_field(items)) - elif main_type == "object": + if main_type == "object": return pa.struct(_json_schema_to_arrow_fields(schema_type)) - elif main_type == "string": + if main_type == "string": return pa.string() - elif main_type == "integer": + if main_type == "integer": return pa.int64() - elif main_type == "number": + if main_type == "number": return pa.float64() - elif main_type == "boolean": + if main_type == "boolean": return pa.bool_() - elif main_type == "null": + if main_type == "null": return pa.null() return pa.null() @@ -97,22 +100,3 @@ def process_batch(self, context: dict) -> None: table = pa.Table.from_pylist(records_to_drain, schema=schema) writer.write_table(table) writer.close() - - @staticmethod - def translate_data_type(singer_type: str | dict) -> Any: - """Translate from singer_type to a native type.""" - if singer_type in ["decimal", "float", "double"]: - return pa.decimal128 - if singer_type in ["date-time"]: - return pa.datetime - if singer_type in ["date"]: - return pa.date64 - return pa.string - - def _get_parquet_schema(self) -> List[Tuple[str, Any]]: - col_list: List[Tuple[str, Any]] = [] - for property in self.schema["properties"]: - col_list.append( - (property["name"], self.translate_data_type(property["type"])) - ) - return col_list diff --git a/samples/sample_target_sqlite/__init__.py b/samples/sample_target_sqlite/__init__.py index d4e4372bd..bd759e464 100644 --- a/samples/sample_target_sqlite/__init__.py +++ b/samples/sample_target_sqlite/__init__.py @@ -1,8 +1,8 @@ """A sample implementation for SQLite.""" -from typing import Any, Dict +from __future__ import annotations -import sqlalchemy +import typing as t from singer_sdk import SQLConnector, SQLSink, SQLTarget from singer_sdk import typing as th @@ -20,25 +20,10 @@ class SQLiteConnector(SQLConnector): allow_column_alter = False allow_merge_upsert = True - def get_sqlalchemy_url(self, config: Dict[str, Any]) -> str: + def get_sqlalchemy_url(self, config: dict[str, t.Any]) -> str: """Generates a SQLAlchemy URL for SQLite.""" return f"sqlite:///{config[DB_PATH_CONFIG]}" - def create_sqlalchemy_connection(self) -> sqlalchemy.engine.Connection: - """Return a new SQLAlchemy connection using the provided config. - - This override simply provides a more helpful error message on failure. - - Returns: - A newly created SQLAlchemy engine object. - """ - try: - return super().create_sqlalchemy_connection() - except Exception as ex: - raise RuntimeError( - f"Error connecting to DB at '{self.config[DB_PATH_CONFIG]}'" - ) from ex - class SQLiteSink(SQLSink): """The Sink class for SQLite. @@ -66,7 +51,7 @@ class SQLiteTarget(SQLTarget): DB_PATH_CONFIG, th.StringType, description="The path to your SQLite database file(s).", - ) + ), ).to_dict() diff --git a/samples/sample_target_sqlite/__main__.py b/samples/sample_target_sqlite/__main__.py new file mode 100644 index 000000000..6eaf3a653 --- /dev/null +++ b/samples/sample_target_sqlite/__main__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from samples.sample_target_sqlite import SQLiteTarget + +SQLiteTarget.cli() diff --git a/singer_sdk/__init__.py b/singer_sdk/__init__.py index 71a3d017d..723928f2f 100644 --- a/singer_sdk/__init__.py +++ b/singer_sdk/__init__.py @@ -1,16 +1,13 @@ -"""SDK for building singer-compliant Singer taps.""" +"""SDK for building Singer taps.""" + +from __future__ import annotations from singer_sdk import streams +from singer_sdk.connectors import SQLConnector from singer_sdk.mapper_base import InlineMapper from singer_sdk.plugin_base import PluginBase from singer_sdk.sinks import BatchSink, RecordSink, Sink, SQLSink -from singer_sdk.streams import ( - GraphQLStream, - RESTStream, - SQLConnector, - SQLStream, - Stream, -) +from singer_sdk.streams import GraphQLStream, RESTStream, SQLStream, Stream from singer_sdk.tap_base import SQLTap, Tap from singer_sdk.target_base import SQLTarget, Target diff --git a/singer_sdk/_singerlib/__init__.py b/singer_sdk/_singerlib/__init__.py new file mode 100644 index 000000000..150d0d2cd --- /dev/null +++ b/singer_sdk/_singerlib/__init__.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from singer_sdk._singerlib.catalog import ( + Catalog, + CatalogEntry, + Metadata, + MetadataMapping, + SelectionMask, + StreamMetadata, +) +from singer_sdk._singerlib.messages import ( + ActivateVersionMessage, + Message, + RecordMessage, + SchemaMessage, + SingerMessageType, + StateMessage, + exclude_null_dict, + write_message, +) +from singer_sdk._singerlib.schema import Schema, resolve_schema_references +from singer_sdk._singerlib.utils import strftime, strptime_to_utc + +__all__ = [ + "Catalog", + "CatalogEntry", + "Metadata", + "MetadataMapping", + "SelectionMask", + "StreamMetadata", + "ActivateVersionMessage", + "Message", + "RecordMessage", + "SchemaMessage", + "SingerMessageType", + "StateMessage", + "exclude_null_dict", + "write_message", + "Schema", + "resolve_schema_references", + "strftime", + "strptime_to_utc", +] diff --git a/singer_sdk/_singerlib/catalog.py b/singer_sdk/_singerlib/catalog.py new file mode 100644 index 000000000..77fe884d8 --- /dev/null +++ b/singer_sdk/_singerlib/catalog.py @@ -0,0 +1,417 @@ +from __future__ import annotations + +import enum +import logging +import typing as t +from dataclasses import dataclass, fields + +from singer_sdk._singerlib.schema import Schema + +if t.TYPE_CHECKING: + from typing_extensions import TypeAlias + + +Breadcrumb = t.Tuple[str, ...] + +logger = logging.getLogger(__name__) + + +class SelectionMask(t.Dict[Breadcrumb, bool]): + """Boolean mask for property selection in schemas and records.""" + + def __missing__(self, breadcrumb: Breadcrumb) -> bool: + """Handle missing breadcrumbs. + + - Properties default to parent value if available. + - Root (stream) defaults to True. + + Args: + breadcrumb: Breadcrumb to check. + + Returns: + True if the breadcrumb is selected, False otherwise. + """ + if len(breadcrumb) >= 2: # noqa: PLR2004 + parent = breadcrumb[:-2] + return self[parent] + + return True + + +@dataclass +class Metadata: + """Base stream or property metadata.""" + + class InclusionType(str, enum.Enum): + """Catalog inclusion types.""" + + AVAILABLE = "available" + AUTOMATIC = "automatic" + UNSUPPORTED = "unsupported" + + inclusion: InclusionType | None = None + selected: bool | None = None + selected_by_default: bool | None = None + + @classmethod + def from_dict(cls: type[Metadata], value: dict[str, t.Any]) -> Metadata: + """Parse metadata dictionary. + + Args: + value: Metadata dictionary. + + Returns: + Metadata object. + """ + return cls( + **{ + object_field.name: value.get(object_field.name.replace("_", "-")) + for object_field in fields(cls) + }, + ) + + def to_dict(self) -> dict[str, t.Any]: + """Convert metadata to a JSON-encodeable dictionary. + + Returns: + Metadata object. + """ + result = {} + + for object_field in fields(self): + value = getattr(self, object_field.name) + if value is not None: + result[object_field.name.replace("_", "-")] = value + + return result + + +@dataclass +class StreamMetadata(Metadata): + """Stream metadata.""" + + table_key_properties: list[str] | None = None + forced_replication_method: str | None = None + valid_replication_keys: list[str] | None = None + schema_name: str | None = None + + +AnyMetadata: TypeAlias = t.Union[Metadata, StreamMetadata] + + +class MetadataMapping(t.Dict[Breadcrumb, AnyMetadata]): + """Stream metadata mapping.""" + + @classmethod + def from_iterable( + cls: type[MetadataMapping], + iterable: t.Iterable[dict[str, t.Any]], + ) -> MetadataMapping: + """Create a metadata mapping from an iterable of metadata dictionaries. + + Args: + iterable: t.Iterable of metadata dictionaries. + + Returns: + Metadata mapping. + """ + mapping = cls() + for d in iterable: + breadcrumb = tuple(d["breadcrumb"]) + metadata = d["metadata"] + if breadcrumb: + mapping[breadcrumb] = Metadata.from_dict(metadata) + else: + mapping[breadcrumb] = StreamMetadata.from_dict(metadata) + + return mapping + + def to_list(self) -> list[dict[str, t.Any]]: + """Convert mapping to a JSON-encodable list. + + Returns: + List of metadata dictionaries. + """ + return [ + {"breadcrumb": list(k), "metadata": v.to_dict()} for k, v in self.items() + ] + + def __missing__(self, breadcrumb: Breadcrumb) -> AnyMetadata: + """Handle missing metadata entries. + + Args: + breadcrumb: Breadcrumb to check. + + Returns: + Metadata object. + """ + self[breadcrumb] = Metadata() if breadcrumb else StreamMetadata() + return self[breadcrumb] + + @property + def root(self) -> StreamMetadata: + """Get stream (root) metadata from this mapping. + + Returns: + Stream metadata. + """ + return self[()] # type: ignore[return-value] + + @classmethod + def get_standard_metadata( + cls: type[MetadataMapping], + *, + schema: dict[str, t.Any] | None = None, + schema_name: str | None = None, + key_properties: list[str] | None = None, + valid_replication_keys: list[str] | None = None, + replication_method: str | None = None, + selected_by_default: bool | None = None, + ) -> MetadataMapping: + """Get default metadata for a stream. + + Args: + schema: Stream schema. + schema_name: Stream schema name. + key_properties: Stream key properties. + valid_replication_keys: Stream valid replication keys. + replication_method: Stream replication method. + selected_by_default: Whether the stream is selected by default. + + Returns: + Metadata mapping. + """ + mapping = cls() + root = StreamMetadata( + table_key_properties=key_properties, + forced_replication_method=replication_method, + valid_replication_keys=valid_replication_keys, + selected_by_default=selected_by_default, + ) + + if schema: + root.inclusion = Metadata.InclusionType.AVAILABLE + + if schema_name: + root.schema_name = schema_name + + for field_name in schema.get("properties", {}): + if ( + key_properties + and field_name in key_properties + or (valid_replication_keys and field_name in valid_replication_keys) + ): + entry = Metadata(inclusion=Metadata.InclusionType.AUTOMATIC) + else: + entry = Metadata(inclusion=Metadata.InclusionType.AVAILABLE) + + mapping[("properties", field_name)] = entry + + mapping[()] = root + + return mapping + + def resolve_selection(self) -> SelectionMask: + """Resolve selection for metadata breadcrumbs and store them in a mapping. + + Returns: + Selection mask. + """ + return SelectionMask( + (breadcrumb, self._breadcrumb_is_selected(breadcrumb)) + for breadcrumb in self + ) + + def _breadcrumb_is_selected(self, breadcrumb: Breadcrumb) -> bool: # noqa: PLR0911 + """Determine if a property breadcrumb is selected based on existing metadata. + + An empty breadcrumb (empty tuple) indicates the stream itself. Otherwise, the + breadcrumb is the path to a property within the stream. + + Args: + breadcrumb: Breadcrumb to check. + + Returns: + True if the breadcrumb is selected, False otherwise. + """ + if not self: + # Default to true if no metadata to say otherwise + return True + + md_entry = self.get(breadcrumb, Metadata()) + parent_value = None + + if len(breadcrumb) > 0: + parent_breadcrumb = breadcrumb[:-2] + parent_value = self._breadcrumb_is_selected(parent_breadcrumb) + + if parent_value is False: + return parent_value + + if md_entry.inclusion == Metadata.InclusionType.UNSUPPORTED: + if md_entry.selected is True: + logger.debug( + "Property '%s' was selected but is not supported. " + "Ignoring selected==True input.", + ":".join(breadcrumb), + ) + return False + + if md_entry.inclusion == Metadata.InclusionType.AUTOMATIC: + if md_entry.selected is False: + logger.debug( + "Property '%s' was deselected while also set " + "for automatic inclusion. Ignoring selected==False input.", + ":".join(breadcrumb), + ) + return True + + if md_entry.selected is not None: + return md_entry.selected + + if md_entry.selected_by_default is not None: + return md_entry.selected_by_default + + logger.debug( + "Selection metadata omitted for '%s'. " + "Using parent value of selected=%s.", + breadcrumb, + parent_value, + ) + return parent_value or False + + +@dataclass +class CatalogEntry: + """Singer catalog entry.""" + + tap_stream_id: str + metadata: MetadataMapping + schema: Schema + stream: str | None = None + key_properties: list[str] | None = None + replication_key: str | None = None + is_view: bool | None = None + database: str | None = None + table: str | None = None + row_count: int | None = None + stream_alias: str | None = None + replication_method: str | None = None + + @classmethod + def from_dict(cls: type[CatalogEntry], stream: dict[str, t.Any]) -> CatalogEntry: + """Create a catalog entry from a dictionary. + + Args: + stream: A dictionary with the defined catalog stream. + + Returns: + A catalog entry. + """ + return cls( + tap_stream_id=stream["tap_stream_id"], + stream=stream.get("stream"), + replication_key=stream.get("replication_key"), + key_properties=stream.get("key_properties"), + database=stream.get("database_name"), + table=stream.get("table_name"), + schema=Schema.from_dict(stream.get("schema", {})), + is_view=stream.get("is_view"), + row_count=stream.get("row_count"), + stream_alias=stream.get("stream_alias"), + metadata=MetadataMapping.from_iterable(stream.get("metadata", [])), + replication_method=stream.get("replication_method"), + ) + + def to_dict(self) -> dict[str, t.Any]: # noqa: C901 + """Convert entry to a dictionary. + + Returns: + A dictionary representation of the catalog entry. + """ + result: dict[str, t.Any] = {} + if self.tap_stream_id: + result["tap_stream_id"] = self.tap_stream_id + if self.database: + result["database_name"] = self.database + if self.table: + result["table_name"] = self.table + if self.replication_key is not None: + result["replication_key"] = self.replication_key + if self.replication_method is not None: + result["replication_method"] = self.replication_method + if self.key_properties is not None: + result["key_properties"] = self.key_properties + if self.schema is not None: + schema = self.schema.to_dict() # pylint: disable=no-member + result["schema"] = schema + if self.is_view is not None: + result["is_view"] = self.is_view + if self.stream is not None: + result["stream"] = self.stream + if self.row_count is not None: + result["row_count"] = self.row_count + if self.stream_alias is not None: + result["stream_alias"] = self.stream_alias + if self.metadata is not None: + result["metadata"] = self.metadata.to_list() + return result + + +class Catalog(t.Dict[str, CatalogEntry]): + """Singer catalog mapping of stream entries.""" + + @classmethod + def from_dict( + cls: type[Catalog], + data: dict[str, list[dict[str, t.Any]]], + ) -> Catalog: + """Create a catalog from a dictionary. + + Args: + data: A dictionary with the defined catalog streams. + + Returns: + A catalog. + """ + instance = cls() + for stream in data.get("streams", []): + entry = CatalogEntry.from_dict(stream) + instance[entry.tap_stream_id] = entry + return instance + + def to_dict(self) -> dict[str, t.Any]: + """Return a dictionary representation of the catalog. + + Returns: + A dictionary with the defined catalog streams. + """ + return {"streams": [stream.to_dict() for stream in self.streams]} + + @property + def streams(self) -> list[CatalogEntry]: + """Get catalog entries. + + Returns: + A list of catalog entries. + """ + return list(self.values()) + + def add_stream(self, entry: CatalogEntry) -> None: + """Add a stream entry to the catalog. + + Args: + entry: The stream entry to add. + """ + self[entry.tap_stream_id] = entry + + def get_stream(self, stream_id: str) -> CatalogEntry | None: + """Retrieve a stream entry from the catalog. + + Args: + stream_id: The tap stream id of the stream to retrieve. + + Returns: + The stream entry if found, otherwise None. + """ + return self.get(stream_id) diff --git a/singer_sdk/_singerlib/messages.py b/singer_sdk/_singerlib/messages.py new file mode 100644 index 000000000..7fc17e57d --- /dev/null +++ b/singer_sdk/_singerlib/messages.py @@ -0,0 +1,202 @@ +"""Singer message types and utilities.""" + +from __future__ import annotations + +import enum +import sys +import typing as t +from dataclasses import asdict, dataclass, field + +import pytz +import simplejson as json + +if t.TYPE_CHECKING: + from datetime import datetime + + +class SingerMessageType(str, enum.Enum): + """Singer specification message types.""" + + RECORD = "RECORD" + SCHEMA = "SCHEMA" + STATE = "STATE" + ACTIVATE_VERSION = "ACTIVATE_VERSION" + BATCH = "BATCH" + + +def exclude_null_dict(pairs: list[tuple[str, t.Any]]) -> dict[str, t.Any]: + """Exclude null values from a dictionary. + + Args: + pairs: The dictionary key-value pairs. + + Returns: + The filtered key-value pairs. + """ + return {key: value for key, value in pairs if value is not None} + + +@dataclass +class Message: + """Singer base message.""" + + type: SingerMessageType = field(init=False) # noqa: A003 + """The message type.""" + + def to_dict(self) -> dict[str, t.Any]: + """Return a dictionary representation of the message. + + Returns: + A dictionary with the defined message fields. + """ + return asdict(self, dict_factory=exclude_null_dict) + + @classmethod + def from_dict( + cls: t.Type[Message], # noqa: UP006 + data: dict[str, t.Any], + ) -> Message: + """Create an encoding from a dictionary. + + Args: + data: The dictionary to create the message from. + + Returns: + The created message. + """ + data.pop("type") + return cls(**data) + + +@dataclass +class RecordMessage(Message): + """Singer record message.""" + + stream: str + """The stream name.""" + + record: dict[str, t.Any] + """The record data.""" + + version: int | None = None + """The record version.""" + + time_extracted: datetime | None = None + """The time the record was extracted.""" + + def to_dict(self) -> dict[str, t.Any]: + """Return a dictionary representation of the message. + + This overrides the default conversion logic, since it uses unnecessary + deep copying and is very slow. + + Returns: + A dictionary with the defined message fields. + """ + result: dict[str, t.Any] = { + "type": "RECORD", + "stream": self.stream, + "record": self.record, + } + if self.version is not None: + result["version"] = self.version + if self.time_extracted is not None: + result["time_extracted"] = self.time_extracted + return result + + def __post_init__(self) -> None: + """Post-init processing. + + Raises: + ValueError: If the time_extracted is not timezone-aware. + """ + self.type = SingerMessageType.RECORD + if self.time_extracted and not self.time_extracted.tzinfo: + msg = ( + "'time_extracted' must be either None or an aware datetime (with a " + "time zone)" + ) + raise ValueError(msg) + + if self.time_extracted: + self.time_extracted = self.time_extracted.astimezone(pytz.utc) + + +@dataclass +class SchemaMessage(Message): + """Singer schema message.""" + + stream: str + """The stream name.""" + + schema: dict[str, t.Any] + """The schema definition.""" + + key_properties: list[str] | None = None + """The key properties.""" + + bookmark_properties: list[str] | None = None + """The bookmark properties.""" + + def __post_init__(self) -> None: + """Post-init processing. + + Raises: + ValueError: If bookmark_properties is not a string or list of strings. + """ + self.type = SingerMessageType.SCHEMA + + if isinstance(self.bookmark_properties, (str, bytes)): + self.bookmark_properties = [self.bookmark_properties] + if self.bookmark_properties and not isinstance(self.bookmark_properties, list): + msg = "bookmark_properties must be a string or list of strings" + raise ValueError(msg) + + +@dataclass +class StateMessage(Message): + """Singer state message.""" + + value: dict[str, t.Any] + """The state value.""" + + def __post_init__(self) -> None: + """Post-init processing.""" + self.type = SingerMessageType.STATE + + +@dataclass +class ActivateVersionMessage(Message): + """Singer activate version message.""" + + stream: str + """The stream name.""" + + version: int + """The version to activate.""" + + def __post_init__(self) -> None: + """Post-init processing.""" + self.type = SingerMessageType.ACTIVATE_VERSION + + +def format_message(message: Message) -> str: + """Format a message as a JSON string. + + Args: + message: The message to format. + + Returns: + The formatted message. + """ + return json.dumps(message.to_dict(), use_decimal=True, default=str) + + +def write_message(message: Message) -> None: + """Write a message to stdout. + + Args: + message: The message to write. + """ + sys.stdout.write(format_message(message) + "\n") + sys.stdout.flush() diff --git a/singer_sdk/_singerlib/schema.py b/singer_sdk/_singerlib/schema.py new file mode 100644 index 000000000..9ef615e0d --- /dev/null +++ b/singer_sdk/_singerlib/schema.py @@ -0,0 +1,183 @@ +"""Provides an object model for JSON Schema.""" + +from __future__ import annotations + +import typing as t +from dataclasses import dataclass + +from jsonschema import RefResolver + +# These are keys defined in the JSON Schema spec that do not themselves contain +# schemas (or lists of schemas) +STANDARD_KEYS = [ + "title", + "description", + "minimum", + "maximum", + "exclusiveMinimum", + "exclusiveMaximum", + "multipleOf", + "maxLength", + "minLength", + "format", + "type", + "required", + "enum", + "pattern", + "contentMediaType", + "contentEncoding", + # These are NOT simple keys (they can contain schemas themselves). We could + # consider adding extra handling to them. + "additionalProperties", + "anyOf", + "patternProperties", +] + + +@dataclass +class Schema: + """Object model for JSON Schema. + + Tap and Target authors may find this to be more convenient than + working directly with JSON Schema data structures. + + This is based on, and overwrites + https://github.com/transferwise/pipelinewise-singer-python/blob/master/singer/schema.py. + This is because we wanted to expand it with extra STANDARD_KEYS. + """ + + type: str | list[str] | None = None # noqa: A003 + properties: dict | None = None + items: t.Any | None = None + description: str | None = None + minimum: float | None = None + maximum: float | None = None + exclusiveMinimum: float | None = None # noqa: N815 + exclusiveMaximum: float | None = None # noqa: N815 + multipleOf: float | None = None # noqa: N815 + maxLength: int | None = None # noqa: N815 + minLength: int | None = None # noqa: N815 + anyOf: t.Any | None = None # noqa: N815 + format: str | None = None # noqa: A003 + additionalProperties: t.Any | None = None # noqa: N815 + patternProperties: t.Any | None = None # noqa: N815 + required: list[str] | None = None + enum: list[t.Any] | None = None + title: str | None = None + pattern: str | None = None + contentMediaType: str | None = None # noqa: N815 + contentEncoding: str | None = None # noqa: N815 + + def to_dict(self) -> dict[str, t.Any]: + """Return the raw JSON Schema as a (possibly nested) dict. + + Returns: + The raw JSON Schema as a (possibly nested) dict. + """ + result = {} + + if self.properties is not None: + result["properties"] = {k: v.to_dict() for k, v in self.properties.items()} + + if self.items is not None: + result["items"] = self.items.to_dict() + + for key in STANDARD_KEYS: + if self.__dict__.get(key) is not None: + result[key] = self.__dict__[key] + + return result + + @classmethod + def from_dict( + cls: t.Type[Schema], # noqa: UP006 + data: dict, + **schema_defaults: t.Any, + ) -> Schema: + """Initialize a Schema object based on the JSON Schema structure. + + Args: + data: The JSON Schema structure. + schema_defaults: Default values for the schema. + + Returns: + The initialized Schema object. + """ + kwargs = schema_defaults.copy() + properties = data.get("properties") + items = data.get("items") + + if properties is not None: + kwargs["properties"] = { + k: cls.from_dict(v, **schema_defaults) for k, v in properties.items() + } + if items is not None: + kwargs["items"] = cls.from_dict(items, **schema_defaults) + for key in STANDARD_KEYS: + if key in data: + kwargs[key] = data[key] + return cls(**kwargs) + + +class _SchemaKey: + ref = "$ref" + items = "items" + properties = "properties" + pattern_properties = "patternProperties" + any_of = "anyOf" + + +def resolve_schema_references( + schema: dict[str, t.Any], + refs: dict[str, str] | None = None, +) -> dict: + """Resolves and replaces json-schema $refs with the appropriate dict. + + Recursively walks the given schema dict, converting every instance of $ref in a + 'properties' structure with a resolved dict. + + This modifies the input schema and also returns it. + + Args: + schema: The schema dict + refs: A dict of <string, dict> which forms a store of referenced schemata. + + Returns: + A schema dict with all $refs replaced with the appropriate dict. + """ + refs = refs or {} + return _resolve_schema_references(schema, RefResolver("", schema, store=refs)) + + +def _resolve_schema_references( + schema: dict[str, t.Any], + resolver: RefResolver, +) -> dict[str, t.Any]: + if _SchemaKey.ref in schema: + reference_path = schema.pop(_SchemaKey.ref, None) + resolved = resolver.resolve(reference_path)[1] + schema.update(resolved) + return _resolve_schema_references(schema, resolver) + + if _SchemaKey.properties in schema: + for k, val in schema[_SchemaKey.properties].items(): + schema[_SchemaKey.properties][k] = _resolve_schema_references(val, resolver) + + if _SchemaKey.pattern_properties in schema: + for k, val in schema[_SchemaKey.pattern_properties].items(): + schema[_SchemaKey.pattern_properties][k] = _resolve_schema_references( + val, + resolver, + ) + + if _SchemaKey.items in schema: + schema[_SchemaKey.items] = _resolve_schema_references( + schema[_SchemaKey.items], + resolver, + ) + + if _SchemaKey.any_of in schema: + for i, element in enumerate(schema[_SchemaKey.any_of]): + schema[_SchemaKey.any_of][i] = _resolve_schema_references(element, resolver) + + return schema diff --git a/singer_sdk/_singerlib/utils.py b/singer_sdk/_singerlib/utils.py new file mode 100644 index 000000000..778b23a49 --- /dev/null +++ b/singer_sdk/_singerlib/utils.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from datetime import datetime, timedelta + +import dateutil.parser +import pytz + +DATETIME_FMT = "%04Y-%m-%dT%H:%M:%S.%fZ" +DATETIME_FMT_SAFE = "%Y-%m-%dT%H:%M:%S.%fZ" + + +class NonUTCDatetimeError(Exception): + """Raised when a non-UTC datetime is passed to a function expecting UTC.""" + + def __init__(self) -> None: + """Initialize the exception.""" + super().__init__("datetime must be pegged at UTC tzoneinfo") + + +def strptime_to_utc(dtimestr: str) -> datetime: + """Parses a provide datetime string into a UTC datetime object. + + Args: + dtimestr: a string representation of a datetime + + Returns: + A UTC datetime.datetime object + """ + d_object: datetime = dateutil.parser.parse(dtimestr) + if d_object.tzinfo is None: + return d_object.replace(tzinfo=pytz.UTC) + + return d_object.astimezone(tz=pytz.UTC) + + +def strftime(dtime: datetime, format_str: str = DATETIME_FMT) -> str: + """Formats a provided datetime object as a string. + + Args: + dtime: a datetime + format_str: output format specification + + Returns: + A string in the specified format + + Raises: + NonUTCDatetimeError: if the datetime is not UTC (if it has a nonzero time zone + offset) + """ + if dtime.utcoffset() != timedelta(0): + raise NonUTCDatetimeError + + dt_str = None + try: + dt_str = dtime.strftime(format_str) + if dt_str.startswith("4Y"): + dt_str = dtime.strftime(DATETIME_FMT_SAFE) + except ValueError: + dt_str = dtime.strftime(DATETIME_FMT_SAFE) + return dt_str diff --git a/singer_sdk/about.py b/singer_sdk/about.py new file mode 100644 index 000000000..78478c417 --- /dev/null +++ b/singer_sdk/about.py @@ -0,0 +1,206 @@ +"""About information for a plugin.""" + +from __future__ import annotations + +import abc +import dataclasses +import json +import typing as t +from collections import OrderedDict +from textwrap import dedent + +if t.TYPE_CHECKING: + from singer_sdk.helpers.capabilities import CapabilitiesEnum + +__all__ = [ + "AboutInfo", + "AboutFormatter", + "JSONFormatter", + "MarkdownFormatter", +] + + +@dataclasses.dataclass +class AboutInfo: + """About information for a plugin.""" + + name: str + description: str | None + version: str + sdk_version: str + supported_python_versions: list[str] | None + + capabilities: list[CapabilitiesEnum] + settings: dict + + +class AboutFormatter(abc.ABC): + """Abstract base class for about formatters.""" + + formats: t.ClassVar[dict[str, type[AboutFormatter]]] = {} + format_name: str + + def __init_subclass__(cls, format_name: str) -> None: + """Initialize subclass. + + Args: + format_name: Name of the format. + """ + cls.formats[format_name] = cls + super().__init_subclass__() + + @classmethod + def get_formatter(cls, name: str) -> AboutFormatter: + """Get a formatter by name. + + Args: + name: Name of the formatter. + + Returns: + A formatter. + """ + return cls.formats[name]() + + @abc.abstractmethod + def format_about(self, about_info: AboutInfo) -> str: + """Render about information. + + Args: + about_info: About information. + """ + ... + + +class TextFormatter(AboutFormatter, format_name="text"): + """About formatter for text output.""" + + def format_about(self, about_info: AboutInfo) -> str: + """Render about information. + + Args: + about_info: About information. + + Returns: + A formatted string. + """ + return dedent( + f"""\ + Name: {about_info.name} + Description: {about_info.description} + Version: {about_info.version} + SDK Version: {about_info.sdk_version} + Supported Python Versions: {about_info.supported_python_versions} + Capabilities: {about_info.capabilities} + Settings: {about_info.settings}""", + ) + + +class JSONFormatter(AboutFormatter, format_name="json"): + """About formatter for JSON output.""" + + def __init__(self) -> None: + """Initialize a JSONAboutFormatter.""" + self.indent = 2 + self.default = str + + def format_about(self, about_info: AboutInfo) -> str: + """Render about information. + + Args: + about_info: About information. + + Returns: + A formatted string. + """ + data = OrderedDict( + [ + ("name", about_info.name), + ("description", about_info.description), + ("version", about_info.version), + ("sdk_version", about_info.sdk_version), + ("supported_python_versions", about_info.supported_python_versions), + ("capabilities", [c.value for c in about_info.capabilities]), + ("settings", about_info.settings), + ], + ) + return json.dumps(data, indent=self.indent, default=self.default) + + +class MarkdownFormatter(AboutFormatter, format_name="markdown"): + """About formatter for Markdown output.""" + + def format_about(self, about_info: AboutInfo) -> str: + """Render about information. + + Args: + about_info: About information. + + Returns: + A formatted string. + """ + max_setting_len = t.cast( + int, + max(len(k) for k in about_info.settings["properties"]), + ) + + # Set table base for markdown + table_base = ( + f"| {'Setting':{max_setting_len}}| Required | Default | Description |\n" + f"|:{'-' * max_setting_len}|:--------:|:-------:|:------------|\n" + ) + + # Empty list for string parts + md_list = [] + # Get required settings for table + required_settings = about_info.settings.get("required", []) + + # Iterate over Dict to set md + md_list.append( + f"# `{about_info.name}`\n\n" + f"{about_info.description}\n\n" + f"Built with the [Meltano Singer SDK](https://sdk.meltano.com).\n\n", + ) + + # Process capabilities and settings + + capabilities = "## Capabilities\n\n" + capabilities += "\n".join([f"* `{v}`" for v in about_info.capabilities]) + capabilities += "\n\n" + md_list.append(capabilities) + + setting = "## Settings\n\n" + + for k, v in about_info.settings.get("properties", {}).items(): + md_description = v.get("description", "").replace("\n", "<BR/>") + table_base += ( + f"| {k}{' ' * (max_setting_len - len(k))}" + f"| {'True' if k in required_settings else 'False':8} | " + f"{v.get('default', 'None'):7} | " + f"{md_description:11} |\n" + ) + + setting += table_base + setting += ( + "\n" + + "\n".join( + [ + "A full list of supported settings and capabilities " + f"is available by running: `{about_info.name} --about`", + ], + ) + + "\n" + ) + setting += "\n" + md_list.append(setting) + + # Process Supported Python Versions + + if about_info.supported_python_versions: + supported_python_versions = "## Supported Python Versions\n\n" + supported_python_versions += "\n".join( + [f"* {v}" for v in about_info.supported_python_versions], + ) + supported_python_versions += "\n" + md_list.append(supported_python_versions) + + return "".join(md_list) diff --git a/singer_sdk/authenticators.py b/singer_sdk/authenticators.py index 270ac0634..61382daba 100644 --- a/singer_sdk/authenticators.py +++ b/singer_sdk/authenticators.py @@ -3,20 +3,47 @@ from __future__ import annotations import base64 -import logging import math +import typing as t from datetime import datetime, timedelta from types import MappingProxyType -from typing import Any, Mapping +from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit import jwt import requests from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization -from singer import utils from singer_sdk.helpers._util import utc_now -from singer_sdk.streams import Stream as RESTStreamBase + +if t.TYPE_CHECKING: + import logging + + from singer_sdk.streams.rest import RESTStream + + +def _add_parameters(initial_url: str, extra_parameters: dict) -> str: + """Add parameters to an URL and return the new URL. + + Args: + initial_url: The URL to add parameters to. + extra_parameters: The parameters to add. + + Returns: + The new URL with the parameters added. + """ + scheme, netloc, path, query_string, fragment = urlsplit(initial_url) + query_params = parse_qs(query_string) + query_params.update( + { + parameter_name: [parameter_value] + for parameter_name, parameter_value in extra_parameters.items() + }, + ) + + new_query_string = urlencode(query_params, doseq=True) + + return urlunsplit((scheme, netloc, path, new_query_string, fragment)) class SingletonMeta(type): @@ -35,7 +62,7 @@ def __init__(cls, name: str, bases: tuple[type], dic: dict) -> None: cls.__single_instance = None super().__init__(name, bases, dic) - def __call__(cls, *args: Any, **kwargs: Any) -> Any: # noqa: ANN401 + def __call__(cls, *args: t.Any, **kwargs: t.Any) -> t.Any: # noqa: ANN401 """Create or reuse the singleton. Args: @@ -47,7 +74,7 @@ def __call__(cls, *args: Any, **kwargs: Any) -> Any: # noqa: ANN401 """ if cls.__single_instance: return cls.__single_instance - single_obj = cls.__new__(cls, None) # type: ignore + single_obj = cls.__new__(cls, None) # type: ignore[call-overload] single_obj.__init__(*args, **kwargs) cls.__single_instance = single_obj return single_obj @@ -56,20 +83,20 @@ def __call__(cls, *args: Any, **kwargs: Any) -> Any: # noqa: ANN401 class APIAuthenticatorBase: """Base class for offloading API auth.""" - def __init__(self, stream: RESTStreamBase) -> None: + def __init__(self, stream: RESTStream) -> None: """Init authenticator. Args: stream: A stream for a RESTful endpoint. """ self.tap_name: str = stream.tap_name - self._config: dict[str, Any] = dict(stream.config) - self._auth_headers: dict[str, Any] = {} - self._auth_params: dict[str, Any] = {} + self._config: dict[str, t.Any] = dict(stream.config) + self._auth_headers: dict[str, t.Any] = {} + self._auth_params: dict[str, t.Any] = {} self.logger: logging.Logger = stream.logger @property - def config(self) -> Mapping[str, Any]: + def config(self) -> t.Mapping[str, t.Any]: """Get stream or tap config. Returns: @@ -95,6 +122,46 @@ def auth_params(self) -> dict: """ return self._auth_params or {} + def authenticate_request( + self, + request: requests.PreparedRequest, + ) -> requests.PreparedRequest: + """Authenticate a request. + + Args: + request: A `request object`_. + + Returns: + The authenticated request object. + + .. _request object: + https://requests.readthedocs.io/en/latest/api/#requests.PreparedRequest + """ + request.headers.update(self.auth_headers) + + if request.url: + request.url = _add_parameters(request.url, self.auth_params) + + return request + + def __call__(self, r: requests.PreparedRequest) -> requests.PreparedRequest: + """Authenticate a request. + + Calls + :meth:`~singer_sdk.authenticators.APIAuthenticatorBase.authenticate_request` + and returns the result. + + Args: + r: A `request object`_. + + Returns: + The authenticated request object. + + .. _request object: + https://requests.readthedocs.io/en/latest/api/#requests.PreparedRequest + """ + return self.authenticate_request(r) + class SimpleAuthenticator(APIAuthenticatorBase): """DEPRECATED: Please use a more specific authenticator. @@ -105,7 +172,7 @@ class SimpleAuthenticator(APIAuthenticatorBase): def __init__( self, - stream: RESTStreamBase, + stream: RESTStream, auth_headers: dict | None = None, ) -> None: """Create a new authenticator. @@ -135,7 +202,7 @@ class APIKeyAuthenticator(APIAuthenticatorBase): def __init__( self, - stream: RESTStreamBase, + stream: RESTStream, key: str, value: str, location: str = "header", @@ -155,7 +222,8 @@ def __init__( auth_credentials = {key: value} if location not in ["header", "params"]: - raise ValueError("`type` must be one of 'header' or 'params'.") + msg = "`type` must be one of 'header' or 'params'." + raise ValueError(msg) if location == "header": if self._auth_headers is None: @@ -169,7 +237,7 @@ def __init__( @classmethod def create_for_stream( cls: type[APIKeyAuthenticator], - stream: RESTStreamBase, + stream: RESTStream, key: str, value: str, location: str, @@ -197,7 +265,7 @@ class BearerTokenAuthenticator(APIAuthenticatorBase): 'Bearer '. The token will be merged with HTTP headers on the stream. """ - def __init__(self, stream: RESTStreamBase, token: str) -> None: + def __init__(self, stream: RESTStream, token: str) -> None: """Create a new authenticator. Args: @@ -213,7 +281,9 @@ def __init__(self, stream: RESTStreamBase, token: str) -> None: @classmethod def create_for_stream( - cls: type[BearerTokenAuthenticator], stream: RESTStreamBase, token: str + cls: type[BearerTokenAuthenticator], + stream: RESTStream, + token: str, ) -> BearerTokenAuthenticator: """Create an Authenticator object specific to the Stream class. @@ -238,7 +308,7 @@ class BasicAuthenticator(APIAuthenticatorBase): def __init__( self, - stream: RESTStreamBase, + stream: RESTStream, username: str, password: str, ) -> None: @@ -261,7 +331,7 @@ def __init__( @classmethod def create_for_stream( cls: type[BasicAuthenticator], - stream: RESTStreamBase, + stream: RESTStream, username: str, password: str, ) -> BasicAuthenticator: @@ -284,23 +354,26 @@ class OAuthAuthenticator(APIAuthenticatorBase): def __init__( self, - stream: RESTStreamBase, + stream: RESTStream, auth_endpoint: str | None = None, oauth_scopes: str | None = None, default_expiration: int | None = None, + oauth_headers: dict | None = None, ) -> None: """Create a new authenticator. Args: stream: The stream instance to use with this authenticator. - auth_endpoint: API username. - oauth_scopes: API password. + auth_endpoint: The OAuth 2.0 authorization endpoint. + oauth_scopes: A comma-separated list of OAuth scopes. default_expiration: Default token expiry in seconds. + oauth_headers: An optional dict of headers required to get a token. """ super().__init__(stream=stream) self._auth_endpoint = auth_endpoint self._default_expiration = default_expiration self._oauth_scopes = oauth_scopes + self._oauth_headers = oauth_headers or {} # Initialize internal tracking attributes self.access_token: str | None = None @@ -334,7 +407,8 @@ def auth_endpoint(self) -> str: ValueError: If the endpoint is not set. """ if not self._auth_endpoint: - raise ValueError("Authorization endpoint not set.") + msg = "Authorization endpoint not set." + raise ValueError(msg) return self._auth_endpoint @property @@ -378,9 +452,8 @@ def oauth_request_body(self) -> dict: Raises: NotImplementedError: If derived class does not override this method. """ - raise NotImplementedError( - "The `oauth_request_body` property was not defined in the subclass." - ) + msg = "The `oauth_request_body` property was not defined in the subclass." + raise NotImplementedError(msg) @property def client_id(self) -> str | None: @@ -414,7 +487,7 @@ def is_token_valid(self) -> bool: return False if not self.expires_in: return True - if self.expires_in > (utils.now() - self.last_refreshed).total_seconds(): + if self.expires_in > (utc_now() - self.last_refreshed).total_seconds(): return True return False @@ -427,22 +500,29 @@ def update_access_token(self) -> None: """ request_time = utc_now() auth_request_payload = self.oauth_request_payload - token_response = requests.post(self.auth_endpoint, data=auth_request_payload) + token_response = requests.post( + self.auth_endpoint, + headers=self._oauth_headers, + data=auth_request_payload, + timeout=60, + ) try: token_response.raise_for_status() - self.logger.info("OAuth authorization attempt was successful.") - except Exception as ex: - raise RuntimeError( - f"Failed OAuth login, response was '{token_response.json()}'. {ex}" - ) + except requests.HTTPError as ex: + msg = f"Failed OAuth login, response was '{token_response.json()}'. {ex}" + raise RuntimeError(msg) from ex + + self.logger.info("OAuth authorization attempt was successful.") + token_json = token_response.json() self.access_token = token_json["access_token"] - self.expires_in = token_json.get("expires_in", self._default_expiration) + expiration = token_json.get("expires_in", self._default_expiration) + self.expires_in = int(expiration) if expiration else None if self.expires_in is None: self.logger.debug( "No expires_in receied in OAuth response and no " "default_expiration set. Token will be treated as if it never " - "expires." + "expires.", ) self.last_refreshed = request_time @@ -495,9 +575,10 @@ def oauth_request_payload(self) -> dict: ValueError: If the private key is not set. """ if not self.private_key: - raise ValueError("Missing 'private_key' property for OAuth payload.") + msg = "Missing 'private_key' property for OAuth payload." + raise ValueError(msg) - private_key: bytes | Any = bytes(self.private_key, "UTF-8") + private_key: bytes | t.Any = bytes(self.private_key, "UTF-8") if self.private_key_passphrase: passphrase = bytes(self.private_key_passphrase, "UTF-8") private_key = serialization.load_pem_private_key( @@ -505,10 +586,12 @@ def oauth_request_payload(self) -> dict: password=passphrase, backend=default_backend(), ) - private_key_string: str | Any = private_key.decode("UTF-8") + private_key_string: str | t.Any = private_key.decode("UTF-8") return { "grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer", "assertion": jwt.encode( - self.oauth_request_body, private_key_string, "RS256" + self.oauth_request_body, + private_key_string, + "RS256", ), } diff --git a/singer_sdk/batch.py b/singer_sdk/batch.py new file mode 100644 index 000000000..0cbf11917 --- /dev/null +++ b/singer_sdk/batch.py @@ -0,0 +1,111 @@ +"""Batching utilities for Singer SDK.""" +from __future__ import annotations + +import gzip +import itertools +import json +import typing as t +from abc import ABC, abstractmethod +from uuid import uuid4 + +if t.TYPE_CHECKING: + from singer_sdk.helpers._batch import BatchConfig + +_T = t.TypeVar("_T") + + +def lazy_chunked_generator( + iterable: t.Iterable[_T], + chunk_size: int, +) -> t.Generator[t.Iterator[_T], None, None]: + """Yield a generator for each chunk of the given iterable. + + Args: + iterable: The iterable to chunk. + chunk_size: The size of each chunk. + + Yields: + A generator for each chunk of the given iterable. + """ + iterator = iter(iterable) + while True: + chunk = list(itertools.islice(iterator, chunk_size)) + if not chunk: + break + yield iter(chunk) + + +class BaseBatcher(ABC): + """Base Record Batcher.""" + + def __init__( + self, + tap_name: str, + stream_name: str, + batch_config: BatchConfig, + ) -> None: + """Initialize the batcher. + + Args: + tap_name: The name of the tap. + stream_name: The name of the stream. + batch_config: The batch configuration. + """ + self.tap_name = tap_name + self.stream_name = stream_name + self.batch_config = batch_config + + @abstractmethod + def get_batches( + self, + records: t.Iterator[dict], + ) -> t.Iterator[list[str]]: + """Yield manifest of batches. + + Args: + records: The records to batch. + + Raises: + NotImplementedError: If the method is not implemented. + """ + raise NotImplementedError + + +class JSONLinesBatcher(BaseBatcher): + """JSON Lines Record Batcher.""" + + def get_batches( + self, + records: t.Iterator[dict], + ) -> t.Iterator[list[str]]: + """Yield manifest of batches. + + Args: + records: The records to batch. + + Yields: + A list of file paths (called a manifest). + """ + sync_id = f"{self.tap_name}--{self.stream_name}-{uuid4()}" + prefix = self.batch_config.storage.prefix or "" + + for i, chunk in enumerate( + lazy_chunked_generator( + records, + self.batch_config.batch_size, + ), + start=1, + ): + filename = f"{prefix}{sync_id}-{i}.json.gz" + with self.batch_config.storage.fs(create=True) as fs: + # TODO: Determine compression from config. + with fs.open(filename, "wb") as f, gzip.GzipFile( + fileobj=f, + mode="wb", + ) as gz: + gz.writelines( + (json.dumps(record, default=str) + "\n").encode() + for record in chunk + ) + file_url = fs.geturl(filename) + yield [file_url] diff --git a/singer_sdk/cli/__init__.py b/singer_sdk/cli/__init__.py index 11bf18ab7..cb0d72607 100644 --- a/singer_sdk/cli/__init__.py +++ b/singer_sdk/cli/__init__.py @@ -1 +1,35 @@ """Helpers for the tap, target and mapper CLIs.""" + +from __future__ import annotations + +import typing as t + +if t.TYPE_CHECKING: + import click + +_T = t.TypeVar("_T") + + +class plugin_cli: # noqa: N801 + """Decorator to create a plugin CLI.""" + + def __init__(self, method: t.Callable[..., click.Command]) -> None: + """Create a new plugin CLI. + + Args: + method: The method to call to get the command. + """ + self.method = method + self.name: str | None = None + + def __get__(self, instance: _T, owner: type[_T]) -> click.Command: + """Get the command. + + Args: + instance: The instance of the plugin. + owner: The plugin class. + + Returns: + The CLI entrypoint. + """ + return self.method(owner) diff --git a/singer_sdk/cli/common_options.py b/singer_sdk/cli/common_options.py index c9d4fef16..8a736e810 100644 --- a/singer_sdk/cli/common_options.py +++ b/singer_sdk/cli/common_options.py @@ -1,27 +1,32 @@ """Common CLI options for plugins.""" +from __future__ import annotations + +import typing as t + import click -PLUGIN_VERSION = click.option( +PLUGIN_VERSION: t.Callable[..., t.Any] = click.option( "--version", is_flag=True, help="Display the package version.", ) -PLUGIN_ABOUT = click.option( +PLUGIN_ABOUT: t.Callable[..., t.Any] = click.option( "--about", is_flag=True, help="Display package metadata and settings.", ) -PLUGIN_ABOUT_FORMAT = click.option( +PLUGIN_ABOUT_FORMAT: t.Callable[..., t.Any] = click.option( "--format", + "about_format", help="Specify output style for --about", type=click.Choice(["json", "markdown"], case_sensitive=False), default=None, ) -PLUGIN_CONFIG = click.option( +PLUGIN_CONFIG: t.Callable[..., t.Any] = click.option( "--config", multiple=True, help="Configuration file location or 'ENV' to use environment variables.", @@ -29,7 +34,7 @@ default=(), ) -PLUGIN_FILE_INPUT = click.option( +PLUGIN_FILE_INPUT: t.Callable[..., t.Any] = click.option( "--input", "file_input", help="A path to read messages from instead of from standard in.", diff --git a/singer_sdk/configuration/__init__.py b/singer_sdk/configuration/__init__.py index 3fec3c9fc..cbe1be755 100644 --- a/singer_sdk/configuration/__init__.py +++ b/singer_sdk/configuration/__init__.py @@ -1 +1,3 @@ """Configuration parsing and handling.""" + +from __future__ import annotations diff --git a/singer_sdk/configuration/_dict_config.py b/singer_sdk/configuration/_dict_config.py index f5061f07a..fd8217f01 100644 --- a/singer_sdk/configuration/_dict_config.py +++ b/singer_sdk/configuration/_dict_config.py @@ -4,8 +4,8 @@ import logging import os +import typing as t from pathlib import Path -from typing import Any, Iterable from dotenv import find_dotenv from dotenv.main import DotEnv @@ -17,10 +17,10 @@ def parse_environment_config( - config_schema: dict[str, Any], + config_schema: dict[str, t.Any], prefix: str, dotenv_path: str | None = None, -) -> dict[str, Any]: +) -> dict[str, t.Any]: """Parse configuration from environment variables. Args: @@ -35,7 +35,7 @@ def parse_environment_config( Returns: A configuration dictionary. """ - result: dict[str, Any] = {} + result: dict[str, t.Any] = {} if not dotenv_path: dotenv_path = find_dotenv() @@ -43,7 +43,7 @@ def parse_environment_config( logger.debug("Loading configuration from %s", dotenv_path) DotEnv(dotenv_path).set_as_environment_variables() - for config_key in config_schema["properties"].keys(): + for config_key in config_schema["properties"]: env_var_name = prefix + config_key.upper().replace("-", "_") if env_var_name in os.environ: env_var_value = os.environ[env_var_name] @@ -54,11 +54,12 @@ def parse_environment_config( ) if is_string_array_type(config_schema["properties"][config_key]): if env_var_value[0] == "[" and env_var_value[-1] == "]": - raise ValueError( + msg = ( "A bracketed list was detected in the environment variable " - f"'{env_var_name}'. This syntax is no longer supported. " - "Please remove the brackets and try again." + f"'{env_var_name}'. This syntax is no longer supported. Please " + "remove the brackets and try again." ) + raise ValueError(msg) result[config_key] = env_var_value.split(",") else: result[config_key] = env_var_value @@ -66,10 +67,10 @@ def parse_environment_config( def merge_config_sources( - inputs: Iterable[str], - config_schema: dict[str, Any], + inputs: t.Iterable[str], + config_schema: dict[str, t.Any], env_prefix: str, -) -> dict[str, Any]: +) -> dict[str, t.Any]: """Merge configuration from multiple sources into a single dictionary. Args: @@ -83,19 +84,37 @@ def merge_config_sources( Returns: A single configuration dictionary. """ - config: dict[str, Any] = {} - for config_path in inputs: - if config_path == "ENV": + config: dict[str, t.Any] = {} + for config_input in inputs: + if config_input == "ENV": env_config = parse_environment_config(config_schema, prefix=env_prefix) config.update(env_config) continue - if not Path(config_path).is_file(): - raise FileNotFoundError( - f"Could not locate config file at '{config_path}'." - "Please check that the file exists." + config_path = Path(config_input) + + if not config_path.is_file(): + msg = ( + f"Could not locate config file at '{config_path}'.Please check that " + "the file exists." ) + raise FileNotFoundError(msg) config.update(read_json_file(config_path)) return config + + +def merge_missing_config_jsonschema( + source_jsonschema: dict, + target_jsonschema: dict, +) -> None: + """Append any missing properties in the target with those from source. + + Args: + source_jsonschema: The source json schema from which to import. + target_jsonschema: The json schema to update. + """ + for k, v in source_jsonschema["properties"].items(): + if k not in target_jsonschema["properties"]: + target_jsonschema["properties"][k] = v diff --git a/singer_sdk/connectors/__init__.py b/singer_sdk/connectors/__init__.py new file mode 100644 index 000000000..32799417a --- /dev/null +++ b/singer_sdk/connectors/__init__.py @@ -0,0 +1,7 @@ +"""Module for SQL-related operations.""" + +from __future__ import annotations + +from .sql import SQLConnector + +__all__ = ["SQLConnector"] diff --git a/singer_sdk/connectors/sql.py b/singer_sdk/connectors/sql.py new file mode 100644 index 000000000..e05e359da --- /dev/null +++ b/singer_sdk/connectors/sql.py @@ -0,0 +1,1166 @@ +"""Common SQL connectors for Streams and Sinks.""" + +from __future__ import annotations + +import decimal +import json +import logging +import typing as t +import warnings +from contextlib import contextmanager +from datetime import datetime +from functools import lru_cache + +import simplejson +import sqlalchemy +from sqlalchemy.engine import Engine + +from singer_sdk import typing as th +from singer_sdk._singerlib import CatalogEntry, MetadataMapping, Schema +from singer_sdk.exceptions import ConfigValidationError + +if t.TYPE_CHECKING: + from sqlalchemy.engine.reflection import Inspector + + +class SQLConnector: + """Base class for SQLAlchemy-based connectors. + + The connector class serves as a wrapper around the SQL connection. + + The functions of the connector are: + - connecting to the source + - generating SQLAlchemy connection and engine objects + - discovering schema catalog entries + - performing type conversions to/from JSONSchema types + - dialect-specific functions, such as escaping and fully qualified names + """ + + allow_column_add: bool = True # Whether ADD COLUMN is supported. + allow_column_rename: bool = True # Whether RENAME COLUMN is supported. + allow_column_alter: bool = False # Whether altering column types is supported. + allow_merge_upsert: bool = False # Whether MERGE UPSERT is supported. + allow_temp_tables: bool = True # Whether temp tables are supported. + _cached_engine: Engine | None = None + + def __init__( + self, + config: dict | None = None, + sqlalchemy_url: str | None = None, + ) -> None: + """Initialize the SQL connector. + + Args: + config: The parent tap or target object's config. + sqlalchemy_url: Optional URL for the connection. + """ + self._config: dict[str, t.Any] = config or {} + self._sqlalchemy_url: str | None = sqlalchemy_url or None + + @property + def config(self) -> dict: + """If set, provides access to the tap or target config. + + Returns: + The settings as a dict. + """ + return self._config + + @property + def logger(self) -> logging.Logger: + """Get logger. + + Returns: + Plugin logger. + """ + return logging.getLogger("sqlconnector") + + @contextmanager + def _connect(self) -> t.Iterator[sqlalchemy.engine.Connection]: + with self._engine.connect().execution_options(stream_results=True) as conn: + yield conn + + def create_sqlalchemy_connection(self) -> sqlalchemy.engine.Connection: + """(DEPRECATED) Return a new SQLAlchemy connection using the provided config. + + Do not use the SQLConnector's connection directly. Instead, if you need + to execute something that isn't available on the connector currently, + make a child class and add a method on that connector. + + By default this will create using the sqlalchemy `stream_results=True` option + described here: + + https://docs.sqlalchemy.org/en/14/core/connections.html#using-server-side-cursors-a-k-a-stream-results + + Developers may override this method if their provider does not support + server side cursors (`stream_results`) or in order to use different + configurations options when creating the connection object. + + Returns: + A newly created SQLAlchemy engine object. + """ + warnings.warn( + "`SQLConnector.create_sqlalchemy_connection` is deprecated. " + "If you need to execute something that isn't available " + "on the connector currently, make a child class and " + "add your required method on that connector.", + DeprecationWarning, + stacklevel=2, + ) + return self._engine.connect().execution_options(stream_results=True) + + def create_sqlalchemy_engine(self) -> Engine: + """(DEPRECATED) Return a new SQLAlchemy engine using the provided config. + + Developers can generally override just one of the following: + `sqlalchemy_engine`, sqlalchemy_url`. + + Returns: + A newly created SQLAlchemy engine object. + """ + warnings.warn( + "`SQLConnector.create_sqlalchemy_engine` is deprecated. Override" + "`_engine` or sqlalchemy_url` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._engine + + @property + def connection(self) -> sqlalchemy.engine.Connection: + """(DEPRECATED) Return or set the SQLAlchemy connection object. + + Do not use the SQLConnector's connection directly. Instead, if you need + to execute something that isn't available on the connector currently, + make a child class and add a method on that connector. + + Returns: + The active SQLAlchemy connection object. + """ + warnings.warn( + "`SQLConnector.connection` is deprecated. If you need to execute something " + "that isn't available on the connector currently, make a child " + "class and add your required method on that connector.", + DeprecationWarning, + stacklevel=2, + ) + return self.create_sqlalchemy_connection() + + @property + def sqlalchemy_url(self) -> str: + """Return the SQLAlchemy URL string. + + Returns: + The URL as a string. + """ + if not self._sqlalchemy_url: + self._sqlalchemy_url = self.get_sqlalchemy_url(self.config) + + return self._sqlalchemy_url + + def get_sqlalchemy_url(self, config: dict[str, t.Any]) -> str: + """Return the SQLAlchemy URL string. + + Developers can generally override just one of the following: + `sqlalchemy_engine`, `get_sqlalchemy_url`. + + Args: + config: A dictionary of settings from the tap or target config. + + Returns: + The URL as a string. + + Raises: + ConfigValidationError: If no valid sqlalchemy_url can be found. + """ + if "sqlalchemy_url" not in config: + msg = "Could not find or create 'sqlalchemy_url' for connection." + raise ConfigValidationError(msg) + + return t.cast(str, config["sqlalchemy_url"]) + + @staticmethod + def to_jsonschema_type( + sql_type: ( + str # noqa: ANN401 + | sqlalchemy.types.TypeEngine + | type[sqlalchemy.types.TypeEngine] + | t.Any + ), + ) -> dict: + """Return a JSON Schema representation of the provided type. + + By default will call `typing.to_jsonschema_type()` for strings and SQLAlchemy + types. + + Developers may override this method to accept additional input argument types, + to support non-standard types, or to provide custom typing logic. + + Args: + sql_type: The string representation of the SQL type, a SQLAlchemy + TypeEngine class or object, or a custom-specified object. + + Raises: + ValueError: If the type received could not be translated to jsonschema. + + Returns: + The JSON Schema representation of the provided type. + """ + if isinstance(sql_type, (str, sqlalchemy.types.TypeEngine)): + return th.to_jsonschema_type(sql_type) + + if isinstance(sql_type, type): + if issubclass(sql_type, sqlalchemy.types.TypeEngine): + return th.to_jsonschema_type(sql_type) + + msg = f"Unexpected type received: '{sql_type.__name__}'" + raise ValueError(msg) + + msg = f"Unexpected type received: '{type(sql_type).__name__}'" + raise ValueError(msg) + + @staticmethod + def to_sql_type(jsonschema_type: dict) -> sqlalchemy.types.TypeEngine: + """Return a JSON Schema representation of the provided type. + + By default will call `typing.to_sql_type()`. + + Developers may override this method to accept additional input argument types, + to support non-standard types, or to provide custom typing logic. + If overriding this method, developers should call the default implementation + from the base class for all unhandled cases. + + Args: + jsonschema_type: The JSON Schema representation of the source type. + + Returns: + The SQLAlchemy type representation of the data type. + """ + return th.to_sql_type(jsonschema_type) + + @staticmethod + def get_fully_qualified_name( + table_name: str | None = None, + schema_name: str | None = None, + db_name: str | None = None, + delimiter: str = ".", + ) -> str: + """Concatenates a fully qualified name from the parts. + + Args: + table_name: The name of the table. + schema_name: The name of the schema. Defaults to None. + db_name: The name of the database. Defaults to None. + delimiter: Generally: '.' for SQL names and '-' for Singer names. + + Raises: + ValueError: If all 3 name parts not supplied. + + Returns: + The fully qualified name as a string. + """ + parts = [] + + if db_name: + parts.append(db_name) + if schema_name: + parts.append(schema_name) + if table_name: + parts.append(table_name) + + if not parts: + raise ValueError( + "Could not generate fully qualified name: " + + ":".join( + [ + db_name or "(unknown-db)", + schema_name or "(unknown-schema)", + table_name or "(unknown-table-name)", + ], + ), + ) + + return delimiter.join(parts) + + @property + def _dialect(self) -> sqlalchemy.engine.Dialect: + """Return the dialect object. + + Returns: + The dialect object. + """ + return t.cast(sqlalchemy.engine.Dialect, self._engine.dialect) + + @property + def _engine(self) -> Engine: + """Return the engine object. + + This is the correct way to access the Connector's engine, if needed + (e.g. to inspect tables). + + Returns: + The SQLAlchemy Engine that's attached to this SQLConnector instance. + """ + if not self._cached_engine: + self._cached_engine = self.create_engine() + return t.cast(Engine, self._cached_engine) + + def create_engine(self) -> Engine: + """Creates and returns a new engine. Do not call outside of _engine. + + NOTE: Do not call this method. The only place that this method should + be called is inside the self._engine method. If you'd like to access + the engine on a connector, use self._engine. + + This method exists solely so that tap/target developers can override it + on their subclass of SQLConnector to perform custom engine creation + logic. + + Returns: + A new SQLAlchemy Engine. + """ + return sqlalchemy.create_engine( + self.sqlalchemy_url, + echo=False, + json_serializer=self.serialize_json, + json_deserializer=self.deserialize_json, + ) + + def quote(self, name: str) -> str: + """Quote a name if it needs quoting, using '.' as a name-part delimiter. + + Examples: + "my_table" => "`my_table`" + "my_schema.my_table" => "`my_schema`.`my_table`" + + Args: + name: The unquoted name. + + Returns: + str: The quoted name. + """ + return ".".join( + [ + self._dialect.identifier_preparer.quote(name_part) + for name_part in name.split(".") + ], + ) + + @lru_cache() # noqa: B019 + def _warn_no_view_detection(self) -> None: + """Print a warning, but only the first time.""" + self.logger.warning( + "Provider does not support get_view_names(). " + "Streams list may be incomplete or `is_view` may be unpopulated.", + ) + + def get_schema_names( + self, + engine: Engine, # noqa: ARG002 + inspected: Inspector, + ) -> list[str]: + """Return a list of schema names in DB. + + Args: + engine: SQLAlchemy engine + inspected: SQLAlchemy inspector instance for engine + + Returns: + List of schema names + """ + return inspected.get_schema_names() + + def get_object_names( + self, + engine: Engine, # noqa: ARG002 + inspected: Inspector, + schema_name: str, + ) -> list[tuple[str, bool]]: + """Return a list of syncable objects. + + Args: + engine: SQLAlchemy engine + inspected: SQLAlchemy inspector instance for engine + schema_name: Schema name to inspect + + Returns: + List of tuples (<table_or_view_name>, <is_view>) + """ + # Get list of tables and views + table_names = inspected.get_table_names(schema=schema_name) + try: + view_names = inspected.get_view_names(schema=schema_name) + except NotImplementedError: + # Some DB providers do not understand 'views' + self._warn_no_view_detection() + view_names = [] + return [(t, False) for t in table_names] + [(v, True) for v in view_names] + + # TODO maybe should be splitted into smaller parts? + def discover_catalog_entry( + self, + engine: Engine, # noqa: ARG002 + inspected: Inspector, + schema_name: str, + table_name: str, + is_view: bool, # noqa: FBT001 + ) -> CatalogEntry: + """Create `CatalogEntry` object for the given table or a view. + + Args: + engine: SQLAlchemy engine + inspected: SQLAlchemy inspector instance for engine + schema_name: Schema name to inspect + table_name: Name of the table or a view + is_view: Flag whether this object is a view, returned by `get_object_names` + + Returns: + `CatalogEntry` object for the given table or a view + """ + # Initialize unique stream name + unique_stream_id = self.get_fully_qualified_name( + db_name=None, + schema_name=schema_name, + table_name=table_name, + delimiter="-", + ) + + # Detect key properties + possible_primary_keys: list[list[str]] = [] + pk_def = inspected.get_pk_constraint(table_name, schema=schema_name) + if pk_def and "constrained_columns" in pk_def: + possible_primary_keys.append(pk_def["constrained_columns"]) + + possible_primary_keys.extend( + index_def["column_names"] + for index_def in inspected.get_indexes(table_name, schema=schema_name) + if index_def.get("unique", False) + ) + + key_properties = next(iter(possible_primary_keys), None) + + # Initialize columns list + table_schema = th.PropertiesList() + for column_def in inspected.get_columns(table_name, schema=schema_name): + column_name = column_def["name"] + is_nullable = column_def.get("nullable", False) + jsonschema_type: dict = self.to_jsonschema_type( + t.cast(sqlalchemy.types.TypeEngine, column_def["type"]), + ) + table_schema.append( + th.Property( + name=column_name, + wrapped=th.CustomType(jsonschema_type), + required=not is_nullable, + ), + ) + schema = table_schema.to_dict() + + # Initialize available replication methods + addl_replication_methods: list[str] = [""] # By default an empty list. + # Notes regarding replication methods: + # - 'INCREMENTAL' replication must be enabled by the user by specifying + # a replication_key value. + # - 'LOG_BASED' replication must be enabled by the developer, according + # to source-specific implementation capabilities. + replication_method = next(reversed(["FULL_TABLE", *addl_replication_methods])) + + # Create the catalog entry object + return CatalogEntry( + tap_stream_id=unique_stream_id, + stream=unique_stream_id, + table=table_name, + key_properties=key_properties, + schema=Schema.from_dict(schema), + is_view=is_view, + replication_method=replication_method, + metadata=MetadataMapping.get_standard_metadata( + schema_name=schema_name, + schema=schema, + replication_method=replication_method, + key_properties=key_properties, + valid_replication_keys=None, # Must be defined by user + ), + database=None, # Expects single-database context + row_count=None, + stream_alias=None, + replication_key=None, # Must be defined by user + ) + + def discover_catalog_entries(self) -> list[dict]: + """Return a list of catalog entries from discovery. + + Returns: + The discovered catalog entries as a list. + """ + result: list[dict] = [] + engine = self._engine + inspected = sqlalchemy.inspect(engine) + for schema_name in self.get_schema_names(engine, inspected): + # Iterate through each table and view + for table_name, is_view in self.get_object_names( + engine, + inspected, + schema_name, + ): + catalog_entry = self.discover_catalog_entry( + engine, + inspected, + schema_name, + table_name, + is_view, + ) + result.append(catalog_entry.to_dict()) + + return result + + def parse_full_table_name( + self, + full_table_name: str, + ) -> tuple[str | None, str | None, str]: + """Parse a fully qualified table name into its parts. + + Developers may override this method if their platform does not support the + traditional 3-part convention: `db_name.schema_name.table_name` + + Args: + full_table_name: A table name or a fully qualified table name. Depending on + SQL the platform, this could take the following forms: + - `<db>.<schema>.<table>` (three part names) + - `<db>.<table>` (platforms which do not use schema groupings) + - `<schema>.<name>` (if DB name is already in context) + - `<table>` (if DB name and schema name are already in context) + + Returns: + A three part tuple (db_name, schema_name, table_name) with any unspecified + or unused parts returned as None. + """ + db_name: str | None = None + schema_name: str | None = None + + parts = full_table_name.split(".") + if len(parts) == 1: + table_name = full_table_name + if len(parts) == 2: # noqa: PLR2004 + schema_name, table_name = parts + if len(parts) == 3: # noqa: PLR2004 + db_name, schema_name, table_name = parts + + return db_name, schema_name, table_name + + def table_exists(self, full_table_name: str) -> bool: + """Determine if the target table already exists. + + Args: + full_table_name: the target table name. + + Returns: + True if table exists, False if not, None if unsure or undetectable. + """ + _, schema_name, table_name = self.parse_full_table_name(full_table_name) + + return t.cast( + bool, + sqlalchemy.inspect(self._engine).has_table(table_name, schema_name), + ) + + def schema_exists(self, schema_name: str) -> bool: + """Determine if the target database schema already exists. + + Args: + schema_name: The target database schema name. + + Returns: + True if the database schema exists, False if not. + """ + schema_names = sqlalchemy.inspect(self._engine).get_schema_names() + return schema_name in schema_names + + def get_table_columns( + self, + full_table_name: str, + column_names: list[str] | None = None, + ) -> dict[str, sqlalchemy.Column]: + """Return a list of table columns. + + Args: + full_table_name: Fully qualified table name. + column_names: A list of column names to filter to. + + Returns: + An ordered list of column objects. + """ + _, schema_name, table_name = self.parse_full_table_name(full_table_name) + inspector = sqlalchemy.inspect(self._engine) + columns = inspector.get_columns(table_name, schema_name) + + return { + col_meta["name"]: sqlalchemy.Column( + col_meta["name"], + col_meta["type"], + nullable=col_meta.get("nullable", False), + ) + for col_meta in columns + if not column_names + or col_meta["name"].casefold() in {col.casefold() for col in column_names} + } + + def get_table( + self, + full_table_name: str, + column_names: list[str] | None = None, + ) -> sqlalchemy.Table: + """Return a table object. + + Args: + full_table_name: Fully qualified table name. + column_names: A list of column names to filter to. + + Returns: + A table object with column list. + """ + columns = self.get_table_columns( + full_table_name=full_table_name, + column_names=column_names, + ).values() + _, schema_name, table_name = self.parse_full_table_name(full_table_name) + meta = sqlalchemy.MetaData() + return sqlalchemy.schema.Table( + table_name, + meta, + *list(columns), + schema=schema_name, + ) + + def column_exists(self, full_table_name: str, column_name: str) -> bool: + """Determine if the target table already exists. + + Args: + full_table_name: the target table name. + column_name: the target column name. + + Returns: + True if table exists, False if not. + """ + return column_name in self.get_table_columns(full_table_name) + + def create_schema(self, schema_name: str) -> None: + """Create target schema. + + Args: + schema_name: The target schema to create. + """ + with self._connect() as conn: + conn.execute(sqlalchemy.schema.CreateSchema(schema_name)) + + def create_empty_table( + self, + full_table_name: str, + schema: dict, + primary_keys: list[str] | None = None, + partition_keys: list[str] | None = None, + as_temp_table: bool = False, # noqa: FBT001, FBT002 + ) -> None: + """Create an empty target table. + + Args: + full_table_name: the target table name. + schema: the JSON schema for the new table. + primary_keys: list of key properties. + partition_keys: list of partition keys. + as_temp_table: True to create a temp table. + + Raises: + NotImplementedError: if temp tables are unsupported and as_temp_table=True. + RuntimeError: if a variant schema is passed with no properties defined. + """ + if as_temp_table: + msg = "Temporary tables are not supported." + raise NotImplementedError(msg) + + _ = partition_keys # Not supported in generic implementation. + + _, schema_name, table_name = self.parse_full_table_name(full_table_name) + meta = sqlalchemy.MetaData(schema=schema_name) + columns: list[sqlalchemy.Column] = [] + primary_keys = primary_keys or [] + try: + properties: dict = schema["properties"] + except KeyError as e: + msg = f"Schema for '{full_table_name}' does not define properties: {schema}" + raise RuntimeError(msg) from e + for property_name, property_jsonschema in properties.items(): + is_primary_key = property_name in primary_keys + columns.append( + sqlalchemy.Column( + property_name, + self.to_sql_type(property_jsonschema), + primary_key=is_primary_key, + ), + ) + + _ = sqlalchemy.Table(table_name, meta, *columns) + meta.create_all(self._engine) + + def _create_empty_column( + self, + full_table_name: str, + column_name: str, + sql_type: sqlalchemy.types.TypeEngine, + ) -> None: + """Create a new column. + + Args: + full_table_name: The target table name. + column_name: The name of the new column. + sql_type: SQLAlchemy type engine to be used in creating the new column. + + Raises: + NotImplementedError: if adding columns is not supported. + """ + if not self.allow_column_add: + msg = "Adding columns is not supported." + raise NotImplementedError(msg) + + column_add_ddl = self.get_column_add_ddl( + table_name=full_table_name, + column_name=column_name, + column_type=sql_type, + ) + with self._connect() as conn, conn.begin(): + conn.execute(column_add_ddl) + + def prepare_schema(self, schema_name: str) -> None: + """Create the target database schema. + + Args: + schema_name: The target schema name. + """ + schema_exists = self.schema_exists(schema_name) + if not schema_exists: + self.create_schema(schema_name) + + def prepare_table( + self, + full_table_name: str, + schema: dict, + primary_keys: list[str], + partition_keys: list[str] | None = None, + as_temp_table: bool = False, # noqa: FBT002, FBT001 + ) -> None: + """Adapt target table to provided schema if possible. + + Args: + full_table_name: the target table name. + schema: the JSON Schema for the table. + primary_keys: list of key properties. + partition_keys: list of partition keys. + as_temp_table: True to create a temp table. + """ + if not self.table_exists(full_table_name=full_table_name): + self.create_empty_table( + full_table_name=full_table_name, + schema=schema, + primary_keys=primary_keys, + partition_keys=partition_keys, + as_temp_table=as_temp_table, + ) + return + + for property_name, property_def in schema["properties"].items(): + self.prepare_column( + full_table_name, + property_name, + self.to_sql_type(property_def), + ) + + def prepare_column( + self, + full_table_name: str, + column_name: str, + sql_type: sqlalchemy.types.TypeEngine, + ) -> None: + """Adapt target table to provided schema if possible. + + Args: + full_table_name: the target table name. + column_name: the target column name. + sql_type: the SQLAlchemy type. + """ + if not self.column_exists(full_table_name, column_name): + self._create_empty_column( + full_table_name=full_table_name, + column_name=column_name, + sql_type=sql_type, + ) + return + + self._adapt_column_type( + full_table_name, + column_name=column_name, + sql_type=sql_type, + ) + + def rename_column(self, full_table_name: str, old_name: str, new_name: str) -> None: + """Rename the provided columns. + + Args: + full_table_name: The fully qualified table name. + old_name: The old column to be renamed. + new_name: The new name for the column. + + Raises: + NotImplementedError: If `self.allow_column_rename` is false. + """ + if not self.allow_column_rename: + msg = "Renaming columns is not supported." + raise NotImplementedError(msg) + + column_rename_ddl = self.get_column_rename_ddl( + table_name=full_table_name, + column_name=old_name, + new_column_name=new_name, + ) + with self._connect() as conn: + conn.execute(column_rename_ddl) + + def merge_sql_types( + self, + sql_types: list[sqlalchemy.types.TypeEngine], + ) -> sqlalchemy.types.TypeEngine: + """Return a compatible SQL type for the selected type list. + + Args: + sql_types: List of SQL types. + + Returns: + A SQL type that is compatible with the input types. + + Raises: + ValueError: If sql_types argument has zero members. + """ + if not sql_types: + msg = "Expected at least one member in `sql_types` argument." + raise ValueError(msg) + + if len(sql_types) == 1: + return sql_types[0] + + # Gathering Type to match variables + # sent in _adapt_column_type + current_type = sql_types[0] + cur_len: int = getattr(current_type, "length", 0) + + # Convert the two types given into a sorted list + # containing the best conversion classes + sql_types = self._sort_types(sql_types) + + # If greater than two evaluate the first pair then on down the line + if len(sql_types) > 2: # noqa: PLR2004 + return self.merge_sql_types( + [self.merge_sql_types([sql_types[0], sql_types[1]])] + sql_types[2:], + ) + + # Get the generic type class + for opt in sql_types: + # Get the length + opt_len: int = getattr(opt, "length", 0) + generic_type = type(opt.as_generic()) + + if isinstance(generic_type, type): + if issubclass( + generic_type, + (sqlalchemy.types.String, sqlalchemy.types.Unicode), + ) or issubclass( + generic_type, + (sqlalchemy.types.String, sqlalchemy.types.Unicode), + ): + # If length None or 0 then is varchar max ? + if ( + (opt_len is None) + or (opt_len == 0) + or (cur_len and (opt_len >= cur_len)) + ): + return opt + # If best conversion class is equal to current type + # return the best conversion class + elif str(opt) == str(current_type): + return opt + + msg = f"Unable to merge sql types: {', '.join([str(t) for t in sql_types])}" + raise ValueError(msg) + + def _sort_types( + self, + sql_types: t.Iterable[sqlalchemy.types.TypeEngine], + ) -> list[sqlalchemy.types.TypeEngine]: + """Return the input types sorted from most to least compatible. + + For example, [Smallint, Integer, Datetime, String, Double] would become + [Unicode, String, Double, Integer, Smallint, Datetime]. + + String types will be listed first, then decimal types, then integer types, + then bool types, and finally datetime and date. Higher precision, scale, and + length will be sorted earlier. + + Args: + sql_types (List[sqlalchemy.types.TypeEngine]): [description] + + Returns: + The sorted list. + """ + + def _get_type_sort_key( + sql_type: sqlalchemy.types.TypeEngine, + ) -> tuple[int, int]: + # return rank, with higher numbers ranking first + + _len = int(getattr(sql_type, "length", 0) or 0) + + _pytype = t.cast(type, sql_type.python_type) + if issubclass(_pytype, (str, bytes)): + return 900, _len + if issubclass(_pytype, datetime): + return 600, _len + if issubclass(_pytype, float): + return 400, _len + if issubclass(_pytype, int): + return 300, _len + + return 0, _len + + return sorted(sql_types, key=_get_type_sort_key, reverse=True) + + def _get_column_type( + self, + full_table_name: str, + column_name: str, + ) -> sqlalchemy.types.TypeEngine: + """Get the SQL type of the declared column. + + Args: + full_table_name: The name of the table. + column_name: The name of the column. + + Returns: + The type of the column. + + Raises: + KeyError: If the provided column name does not exist. + """ + try: + column = self.get_table_columns(full_table_name)[column_name] + except KeyError as ex: + msg = f"Column `{column_name}` does not exist in table `{full_table_name}`." + raise KeyError(msg) from ex + + return t.cast(sqlalchemy.types.TypeEngine, column.type) + + @staticmethod + def get_column_add_ddl( + table_name: str, + column_name: str, + column_type: sqlalchemy.types.TypeEngine, + ) -> sqlalchemy.DDL: + """Get the create column DDL statement. + + Override this if your database uses a different syntax for creating columns. + + Args: + table_name: Fully qualified table name of column to alter. + column_name: Column name to create. + column_type: New column sqlalchemy type. + + Returns: + A sqlalchemy DDL instance. + """ + create_column_clause = sqlalchemy.schema.CreateColumn( + sqlalchemy.Column( + column_name, + column_type, + ), + ) + return sqlalchemy.DDL( + "ALTER TABLE %(table_name)s ADD COLUMN %(create_column_clause)s", + { + "table_name": table_name, + "create_column_clause": create_column_clause, + }, + ) + + @staticmethod + def get_column_rename_ddl( + table_name: str, + column_name: str, + new_column_name: str, + ) -> sqlalchemy.DDL: + """Get the create column DDL statement. + + Override this if your database uses a different syntax for renaming columns. + + Args: + table_name: Fully qualified table name of column to alter. + column_name: Existing column name. + new_column_name: New column name. + + Returns: + A sqlalchemy DDL instance. + """ + return sqlalchemy.DDL( + "ALTER TABLE %(table_name)s " + "RENAME COLUMN %(column_name)s to %(new_column_name)s", + { + "table_name": table_name, + "column_name": column_name, + "new_column_name": new_column_name, + }, + ) + + @staticmethod + def get_column_alter_ddl( + table_name: str, + column_name: str, + column_type: sqlalchemy.types.TypeEngine, + ) -> sqlalchemy.DDL: + """Get the alter column DDL statement. + + Override this if your database uses a different syntax for altering columns. + + Args: + table_name: Fully qualified table name of column to alter. + column_name: Column name to alter. + column_type: New column type string. + + Returns: + A sqlalchemy DDL instance. + """ + return sqlalchemy.DDL( + "ALTER TABLE %(table_name)s ALTER COLUMN %(column_name)s (%(column_type)s)", + { + "table_name": table_name, + "column_name": column_name, + "column_type": column_type, + }, + ) + + @staticmethod + def remove_collation( + column_type: sqlalchemy.types.TypeEngine, + ) -> str | None: + """Removes collation for the given column TypeEngine instance. + + Args: + column_type: Column SQLAlchemy type. + + Returns: + The removed collation as a string. + """ + if hasattr(column_type, "collation") and column_type.collation: + column_type_collation: str = column_type.collation + column_type.collation = None + return column_type_collation + return None + + @staticmethod + def update_collation( + column_type: sqlalchemy.types.TypeEngine, + collation: str | None, + ) -> None: + """Sets column collation if column type has a collation attribute. + + Args: + column_type: Column SQLAlchemy type. + collation: The colation + """ + if hasattr(column_type, "collation") and collation: + column_type.collation = collation + + def _adapt_column_type( + self, + full_table_name: str, + column_name: str, + sql_type: sqlalchemy.types.TypeEngine, + ) -> None: + """Adapt table column type to support the new JSON schema type. + + Args: + full_table_name: The target table name. + column_name: The target column name. + sql_type: The new SQLAlchemy type. + + Raises: + NotImplementedError: if altering columns is not supported. + """ + current_type: sqlalchemy.types.TypeEngine = self._get_column_type( + full_table_name, + column_name, + ) + + # remove collation if present and save it + current_type_collation = self.remove_collation(current_type) + + # Check if the existing column type and the sql type are the same + if str(sql_type) == str(current_type): + # The current column and sql type are the same + # Nothing to do + return + + # Not the same type, generic type or compatible types + # calling merge_sql_types for assistnace + compatible_sql_type = self.merge_sql_types([current_type, sql_type]) + + if str(compatible_sql_type) == str(current_type): + # Nothing to do + return + + # Put the collation level back before altering the column + if current_type_collation: + self.update_collation(compatible_sql_type, current_type_collation) + + if not self.allow_column_alter: + msg = ( + "Altering columns is not supported. Could not convert column " + f"'{full_table_name}.{column_name}' from '{current_type}' to " + f"'{compatible_sql_type}'." + ) + raise NotImplementedError(msg) + + alter_column_ddl = self.get_column_alter_ddl( + table_name=full_table_name, + column_name=column_name, + column_type=compatible_sql_type, + ) + with self._connect() as conn: + conn.execute(alter_column_ddl) + + def serialize_json(self, obj: object) -> str: + """Serialize an object to a JSON string. + + Target connectors may override this method to provide custom serialization logic + for JSON types. + + Args: + obj: The object to serialize. + + Returns: + The JSON string. + + .. versionadded:: 0.31.0 + """ + return simplejson.dumps(obj, use_decimal=True) + + def deserialize_json(self, json_str: str) -> object: + """Deserialize a JSON string to an object. + + Tap connectors may override this method to provide custom deserialization + logic for JSON types. + + Args: + json_str: The JSON string to deserialize. + + Returns: + The deserialized object. + + .. versionadded:: 0.31.0 + """ + return json.loads(json_str, parse_float=decimal.Decimal) diff --git a/singer_sdk/default_logging.yml b/singer_sdk/default_logging.yml new file mode 100644 index 000000000..80b08059e --- /dev/null +++ b/singer_sdk/default_logging.yml @@ -0,0 +1,14 @@ +version: 1 +disable_existing_loggers: false +formatters: + console: + format: "{asctime:23s} | {levelname:8s} | {name:20s} | {message}" + style: "{" +handlers: + default: + class: logging.StreamHandler + formatter: console + stream: ext://sys.stderr +root: + level: INFO + handlers: [default] diff --git a/singer_sdk/exceptions.py b/singer_sdk/exceptions.py index eddc7af52..351776291 100644 --- a/singer_sdk/exceptions.py +++ b/singer_sdk/exceptions.py @@ -1,5 +1,12 @@ """Defines a common set of exceptions which developers can raise and/or catch.""" -import requests + +from __future__ import annotations + +import abc +import typing as t + +if t.TYPE_CHECKING: + import requests class ConfigValidationError(Exception): @@ -10,6 +17,10 @@ class FatalAPIError(Exception): """Exception raised when a failed request should not be considered retriable.""" +class InvalidReplicationKeyException(Exception): + """Exception to raise if the replication key is not in the stream properties.""" + + class InvalidStreamSortException(Exception): """Exception to raise if sorting errors are found while syncing the records.""" @@ -18,18 +29,69 @@ class MapExpressionError(Exception): """Failed map expression evaluation.""" -class MaxRecordsLimitException(Exception): - """Exception to raise if the maximum number of allowable records is exceeded.""" +class RequestedAbortException(Exception): + """Base class for abort and interrupt requests. + + Whenever this exception is raised, streams will attempt to shut down gracefully and + will emit a final resumable `STATE` message if it is possible to do so. + """ + + +class MaxRecordsLimitException(RequestedAbortException): + """Exception indicating the sync aborted due to too many records.""" + + +class AbortedSyncExceptionBase(Exception, metaclass=abc.ABCMeta): + """Base exception to raise when a stream sync is aborted. + + Developers should not raise this directly, and instead should use: + 1. `FatalAbortedSyncException` - Indicates the stream aborted abnormally and was not + able to reach a stable and resumable state. + 2. `PausedSyncException` - Indicates the stream aborted abnormally and successfully + reached a 'paused' and resumable state. + + Notes: + - `FULL_TABLE` sync operations cannot be paused and will always trigger a fatal + exception if aborted. + - `INCREMENTAL` and `LOG_BASED` streams are able to be paused only if a number of + preconditions are met, specifically, `state_partitioning_keys` cannot be + overridden and the stream must be declared with `is_sorted=True`. + """ + +class AbortedSyncFailedException(AbortedSyncExceptionBase): + """Exception to raise when sync is aborted and unable to reach a stable state. -class RecordsWitoutSchemaException(Exception): + This signifies that `FULL_TABLE` streams (if applicable) were successfully + completed, and any bookmarks from `INCREMENTAL` and `LOG_BASED` streams were + advanced and finalized successfully. + """ + + +class AbortedSyncPausedException(AbortedSyncExceptionBase): + """Exception to raise when an aborted sync operation is paused successfully. + + This exception indicates the stream aborted abnormally and successfully + reached a 'paused' status, and emitted a resumable state artifact before exiting. + + Streams synced with `FULL_TABLE` replication can never have partial success or + 'paused' status. + + If this exception is raised, this signifies that additional records were left + on the source system and the sync operation aborted before reaching the end of the + stream. This exception signifies that bookmarks from `INCREMENTAL` + and `LOG_BASED` streams were successfully emitted and are resumable. + """ + + +class RecordsWithoutSchemaException(Exception): """Raised if a target receives RECORD messages prior to a SCHEMA message.""" class RetriableAPIError(Exception): """Exception raised when a failed request can be safely retried.""" - def __init__(self, message: str, response: requests.Response = None) -> None: + def __init__(self, message: str, response: requests.Response | None = None) -> None: """Extends the default with the failed response as an attribute. Args: @@ -50,3 +112,14 @@ class TapStreamConnectionFailure(Exception): class TooManyRecordsException(Exception): """Exception to raise when query returns more records than max_records.""" + + +class ConformedNameClashException(Exception): + """Raised when name conforming produces clashes. + + e.g. two columns conformed to the same name + """ + + +class MissingKeyPropertiesError(Exception): + """Raised when a recieved (and/or transformed) record is missing key properties.""" diff --git a/singer_sdk/helpers/__init__.py b/singer_sdk/helpers/__init__.py index 4a94cbfc1..2a6daa1fb 100644 --- a/singer_sdk/helpers/__init__.py +++ b/singer_sdk/helpers/__init__.py @@ -1 +1,3 @@ """Helper library for the SDK.""" + +from __future__ import annotations diff --git a/singer_sdk/helpers/_batch.py b/singer_sdk/helpers/_batch.py new file mode 100644 index 000000000..62447ddb3 --- /dev/null +++ b/singer_sdk/helpers/_batch.py @@ -0,0 +1,245 @@ +"""Batch helpers.""" + +from __future__ import annotations + +import enum +import platform +import typing as t +from contextlib import contextmanager +from dataclasses import asdict, dataclass, field +from urllib.parse import ParseResult, urlencode, urlparse + +import fs + +from singer_sdk._singerlib.messages import Message, SingerMessageType + +if t.TYPE_CHECKING: + from fs.base import FS + +DEFAULT_BATCH_SIZE = 10000 + + +class BatchFileFormat(str, enum.Enum): + """Batch file format.""" + + JSONL = "jsonl" + """JSON Lines format.""" + + +@dataclass +class BaseBatchFileEncoding: + """Base class for batch file encodings.""" + + registered_encodings: t.ClassVar[dict[str, type[BaseBatchFileEncoding]]] = {} + __encoding_format__: t.ClassVar[str] = "OVERRIDE_ME" + + # Base encoding fields + format: str = field(init=False) # noqa: A003 + """The format of the batch file.""" + + compression: str | None = None + """The compression of the batch file.""" + + def __init_subclass__(cls, **kwargs: t.Any) -> None: + """Register subclasses. + + Args: + **kwargs: Keyword arguments. + """ + super().__init_subclass__(**kwargs) + cls.registered_encodings[cls.__encoding_format__] = cls + + def __post_init__(self) -> None: + """Post-init hook.""" + self.format = self.__encoding_format__ + + @classmethod + def from_dict(cls, data: dict[str, t.Any]) -> BaseBatchFileEncoding: + """Create an encoding from a dictionary.""" + data = data.copy() + encoding_format = data.pop("format") + encoding_cls = cls.registered_encodings[encoding_format] + return encoding_cls(**data) + + +@dataclass +class JSONLinesEncoding(BaseBatchFileEncoding): + """JSON Lines encoding for batch files.""" + + __encoding_format__ = "jsonl" + + +@dataclass +class SDKBatchMessage(Message): + """Singer batch message in the Meltano Singer SDK flavor.""" + + stream: str + """The stream name.""" + + encoding: BaseBatchFileEncoding + """The file encoding of the batch.""" + + manifest: list[str] = field(default_factory=list) + """The manifest of files in the batch.""" + + def __post_init__(self): + if isinstance(self.encoding, dict): + self.encoding = BaseBatchFileEncoding.from_dict(self.encoding) + + self.type = SingerMessageType.BATCH + + +@dataclass +class StorageTarget: + """Storage target.""" + + root: str + """"The root directory of the storage target.""" + + prefix: str | None = None + """"The file prefix.""" + + params: dict = field(default_factory=dict) + """"The storage parameters.""" + + def asdict(self): + """Return a dictionary representation of the message. + + Returns: + A dictionary with the defined message fields. + """ + return asdict(self) + + @classmethod + def from_dict(cls, data: dict[str, t.Any]) -> StorageTarget: + """Create an encoding from a dictionary. + + Args: + data: The dictionary to create the message from. + + Returns: + The created message. + """ + return cls(**data) + + @staticmethod + def split_url(url: str) -> tuple[str, str]: + """Split a URL into a head and tail pair. + + Args: + url: The URL to split. + + Returns: + A tuple of the head and tail parts of the URL. + """ + if platform.system() == "Windows" and "\\" in url: + # Original code from pyFileSystem split + # Augemnted slitly to properly Windows paths + split = url.rsplit("\\", 1) + return (split[0] or "\\", split[1]) + + return fs.path.split(url) + + @classmethod + def from_url(cls, url: str) -> StorageTarget: + """Create a storage target from a file URL. + + Args: + url: The URL to create the storage target from. + + Returns: + The created storage target. + """ + parsed_url = urlparse(url) + new_url = parsed_url._replace(query="") + return cls(root=new_url.geturl()) + + @property + def fs_url(self) -> ParseResult: + """Get the storage target URL. + + Returns: + The storage target URL. + """ + return urlparse(self.root)._replace(query=urlencode(self.params)) + + @contextmanager + def fs(self, **kwargs: t.Any) -> t.Generator[FS, None, None]: + """Get a filesystem object for the storage target. + + Args: + kwargs: Additional arguments to pass ``f`.open_fs``. + + Returns: + The filesystem object. + """ + filesystem = fs.open_fs(self.fs_url.geturl(), **kwargs) + yield filesystem + filesystem.close() + + @contextmanager + def open( # noqa: A003 + self, + filename: str, + mode: str = "rb", + ) -> t.Generator[t.IO, None, None]: + """Open a file in the storage target. + + Args: + filename: The filename to open. + mode: The mode to open the file in. + + Returns: + The opened file. + """ + filesystem = fs.open_fs(self.root, writeable=True, create=True) + fo = filesystem.open(filename, mode=mode) + try: + yield fo + finally: + fo.close() + filesystem.close() + + +@dataclass +class BatchConfig: + """Batch configuration.""" + + encoding: BaseBatchFileEncoding + """The encoding of the batch file.""" + + storage: StorageTarget + """The storage target of the batch file.""" + + batch_size: int = DEFAULT_BATCH_SIZE + """The max number of records in a batch.""" + + def __post_init__(self): + if isinstance(self.encoding, dict): + self.encoding = BaseBatchFileEncoding.from_dict(self.encoding) + + if isinstance(self.storage, dict): + self.storage = StorageTarget.from_dict(self.storage) + + if self.batch_size is None: + self.batch_size = DEFAULT_BATCH_SIZE + + def asdict(self): + """Return a dictionary representation of the message. + + Returns: + A dictionary with the defined message fields. + """ + return asdict(self) + + @classmethod + def from_dict(cls, data: dict[str, t.Any]) -> BatchConfig: + """Create an encoding from a dictionary. + + Args: + data: The dictionary to create the message from. + + Returns: + The created message. + """ + return cls(**data) diff --git a/singer_sdk/helpers/_catalog.py b/singer_sdk/helpers/_catalog.py index df7493b4d..49ea2f1cc 100644 --- a/singer_sdk/helpers/_catalog.py +++ b/singer_sdk/helpers/_catalog.py @@ -1,20 +1,28 @@ """Private helper functions for catalog and selection logic.""" +from __future__ import annotations + +import typing as t from copy import deepcopy -from logging import Logger -from typing import Any, Dict, Optional, Tuple from memoization import cached -from singer_sdk.helpers._singer import Catalog, SelectionMask from singer_sdk.helpers._typing import is_object_type +if t.TYPE_CHECKING: + from logging import Logger + + from singer_sdk._singerlib import Catalog, SelectionMask + _MAX_LRU_CACHE = 500 @cached(max_size=_MAX_LRU_CACHE) def get_selected_schema( - stream_name: str, schema: dict, mask: SelectionMask, logger: Logger + stream_name: str, + schema: dict, + mask: SelectionMask, + logger: Logger, ) -> dict: """Return a copy of the provided JSON schema, dropping any fields not selected.""" new_schema = deepcopy(schema) @@ -26,7 +34,7 @@ def _pop_deselected_schema( schema: dict, mask: SelectionMask, stream_name: str, - breadcrumb: Tuple[str, ...], + breadcrumb: tuple[str, ...], logger: Logger, ) -> None: """Remove anything from schema that is not selected. @@ -39,18 +47,21 @@ def _pop_deselected_schema( schema_at_breadcrumb = schema_at_breadcrumb.get(crumb, {}) if not isinstance(schema_at_breadcrumb, dict): - raise ValueError( - f"Expected dictionary type instead of " - f"'{type(schema_at_breadcrumb).__name__}' '{schema_at_breadcrumb}' " - f"for '{stream_name}' bookmark '{str(breadcrumb)}' in '{schema}'" + msg = ( + "Expected dictionary type instead of " + f"'{type(schema_at_breadcrumb).__name__}' '{schema_at_breadcrumb}' for " + f"'{stream_name}' bookmark '{breadcrumb!s}' in '{schema}'" ) + raise ValueError(msg) if "properties" not in schema_at_breadcrumb: return for property_name, property_def in list(schema_at_breadcrumb["properties"].items()): - property_breadcrumb: Tuple[str, ...] = tuple( - list(breadcrumb) + ["properties", property_name] + property_breadcrumb: tuple[str, ...] = ( + *breadcrumb, + "properties", + property_name, ) selected = mask[property_breadcrumb] if not selected: @@ -60,16 +71,20 @@ def _pop_deselected_schema( if is_object_type(property_def): # call recursively in case any subproperties are deselected. _pop_deselected_schema( - schema, mask, stream_name, property_breadcrumb, logger + schema, + mask, + stream_name, + property_breadcrumb, + logger, ) def pop_deselected_record_properties( - record: Dict[str, Any], + record: dict[str, t.Any], schema: dict, mask: SelectionMask, logger: Logger, - breadcrumb: Tuple[str, ...] = (), + breadcrumb: tuple[str, ...] = (), ) -> None: """Remove anything from record properties that is not selected. @@ -77,7 +92,7 @@ def pop_deselected_record_properties( updating in place. """ for property_name, val in list(record.items()): - property_breadcrumb = breadcrumb + ("properties", property_name) + property_breadcrumb = (*breadcrumb, "properties", property_name) selected = mask[property_breadcrumb] if not selected: record.pop(property_name) @@ -86,7 +101,11 @@ def pop_deselected_record_properties( if isinstance(val, dict): # call recursively in case any subproperties are deselected. pop_deselected_record_properties( - val, schema, mask, logger, property_breadcrumb + val, + schema, + mask, + logger, + property_breadcrumb, ) @@ -99,8 +118,9 @@ def deselect_all_streams(catalog: Catalog) -> None: def set_catalog_stream_selected( catalog: Catalog, stream_name: str, + *, selected: bool, - breadcrumb: Optional[Tuple[str, ...]] = None, + breadcrumb: tuple[str, ...] | None = None, ) -> None: """Return True if the property is selected for extract. @@ -109,14 +129,16 @@ def set_catalog_stream_selected( """ breadcrumb = breadcrumb or () if not isinstance(breadcrumb, tuple): - raise ValueError( - f"Expected tuple value for breadcrumb '{breadcrumb}'. " - f"Got {type(breadcrumb).__name__}" + msg = ( + f"Expected tuple value for breadcrumb '{breadcrumb}'. Got " + f"{type(breadcrumb).__name__}" ) + raise ValueError(msg) catalog_entry = catalog.get_stream(stream_name) if not catalog_entry: - raise ValueError(f"Catalog entry missing for '{stream_name}'. Skipping.") + msg = f"Catalog entry missing for '{stream_name}'. Skipping." + raise ValueError(msg) md_entry = catalog_entry.metadata[breadcrumb] md_entry.selected = selected diff --git a/singer_sdk/helpers/_classproperty.py b/singer_sdk/helpers/_classproperty.py index c14256a5e..2e3b43775 100644 --- a/singer_sdk/helpers/_classproperty.py +++ b/singer_sdk/helpers/_classproperty.py @@ -2,7 +2,7 @@ """Defines the `classproperty` decorator.""" -# noqa +from __future__ import annotations class classproperty(property): diff --git a/singer_sdk/helpers/_compat.py b/singer_sdk/helpers/_compat.py index 2c39f71fb..20b7a399a 100644 --- a/singer_sdk/helpers/_compat.py +++ b/singer_sdk/helpers/_compat.py @@ -1,15 +1,19 @@ """Compatibility helpers.""" -try: - from typing import final -except ImportError: - # Final not available until Python3.8 - final = lambda f: f # noqa: E731 +from __future__ import annotations -try: +import sys + +if sys.version_info < (3, 8): + import importlib_metadata as metadata + from typing_extensions import final +else: from importlib import metadata -except ImportError: - # Running on pre-3.8 Python; use importlib-metadata package - import importlib_metadata as metadata # type: ignore + from typing import final # noqa: ICN003 + +if sys.version_info < (3, 9): + import importlib_resources as resources +else: + from importlib import resources -__all__ = ["metadata", "final"] +__all__ = ["metadata", "final", "resources"] diff --git a/singer_sdk/helpers/_conformers.py b/singer_sdk/helpers/_conformers.py new file mode 100644 index 000000000..46963284e --- /dev/null +++ b/singer_sdk/helpers/_conformers.py @@ -0,0 +1,46 @@ +"""Helper functions for conforming identifiers.""" +from __future__ import annotations + +import re +from string import ascii_lowercase, digits + + +def snakecase(string: str) -> str: + """Convert string into snake case. + + Args: + string: String to convert. + + Returns: + string: Snake cased string. + """ + string = re.sub(r"[\-\.\s]", "_", string) + string = ( + string[0].lower() + + re.sub( + r"[A-Z]", + lambda matched: "_" + str(matched.group(0).lower()), + string[1:], + ) + if string + else string + ) + return re.sub(r"_{2,}", "_", string).rstrip("_") + + +def replace_leading_digit(string: str) -> str: + """Replace leading numeric character with equivalent letter. + + Args: + string: String to process. + + Returns: + A modified string if original starts with a number, + else the unmodified original. + """ + if string[0] in digits: + letters = list(ascii_lowercase) + numbers = [int(d) for d in digits] + digit_map = {n: letters[n] for n in numbers} + return digit_map[int(string[0])] + string[1:] + return string diff --git a/singer_sdk/helpers/_flattening.py b/singer_sdk/helpers/_flattening.py index 018d8cd4a..02585dd80 100644 --- a/singer_sdk/helpers/_flattening.py +++ b/singer_sdk/helpers/_flattening.py @@ -1,18 +1,20 @@ """Internal helper library for record flatteting functions.""" +from __future__ import annotations + import collections import itertools import json import re +import typing as t from copy import deepcopy -from typing import Any, List, Mapping, MutableMapping, NamedTuple, Optional, Tuple import inflection DEFAULT_FLATTENING_SEPARATOR = "__" -class FlatteningOptions(NamedTuple): +class FlatteningOptions(t.NamedTuple): """A stream map which performs the flattening role.""" max_level: int @@ -21,8 +23,8 @@ class FlatteningOptions(NamedTuple): def get_flattening_options( - plugin_config: Mapping, -) -> Optional[FlatteningOptions]: + plugin_config: t.Mapping, +) -> FlatteningOptions | None: """Get flattening options, if flattening is enabled. Args: @@ -37,7 +39,7 @@ def get_flattening_options( return None -def flatten_key(key_name: str, parent_keys: List[str], separator: str = "__") -> str: +def flatten_key(key_name: str, parent_keys: list[str], separator: str = "__") -> str: """Concatenate `key_name` with its `parent_keys` using `separator`. Args: @@ -54,14 +56,18 @@ def flatten_key(key_name: str, parent_keys: List[str], separator: str = "__") -> >>> flatten_key("foo", ["bar", "baz"], separator=".") 'bar.baz.foo' """ - full_key = parent_keys + [key_name] + full_key = [*parent_keys, key_name] inflected_key = full_key.copy() reducer_index = 0 - while len(separator.join(inflected_key)) >= 255 and reducer_index < len( - inflected_key + while len( + separator.join(inflected_key), + ) >= 255 and reducer_index < len( # noqa: PLR2004 + inflected_key, ): reduced_key = re.sub( - r"[a-z]", "", inflection.camelize(inflected_key[reducer_index]) + r"[a-z]", + "", + inflection.camelize(inflected_key[reducer_index]), ) inflected_key[reducer_index] = ( reduced_key if len(reduced_key) > 1 else inflected_key[reducer_index][0:3] @@ -204,9 +210,9 @@ def flatten_schema( return new_schema -def _flatten_schema( +def _flatten_schema( # noqa: C901 schema_node: dict, - parent_keys: List[str] = None, + parent_keys: list[str] | None = None, separator: str = "__", level: int = 0, max_level: int = 0, @@ -215,7 +221,7 @@ def _flatten_schema( Args: schema_node: The schema node to flatten. - parent_key: The parent's key, provided as a list of node names. + parent_keys: The parent's key, provided as a list of node names. separator: The string to use when concatenating key names. level: The current recursion level (zero-based). max_level: The max recursion level (zero-based, exclusive). @@ -226,36 +232,35 @@ def _flatten_schema( if parent_keys is None: parent_keys = [] - items: List[Tuple[str, dict]] = [] + items: list[tuple[str, dict]] = [] if "properties" not in schema_node: return {} for k, v in schema_node["properties"].items(): new_key = flatten_key(k, parent_keys, separator) - if "type" in v.keys(): + if "type" in v: if "object" in v["type"] and "properties" in v and level < max_level: items.extend( _flatten_schema( v, - parent_keys + [k], + [*parent_keys, k], separator=separator, level=level + 1, max_level=max_level, - ).items() + ).items(), ) else: items.append((new_key, v)) - else: - if len(v.values()) > 0: - if list(v.values())[0][0]["type"] == "string": - list(v.values())[0][0]["type"] = ["null", "string"] - items.append((new_key, list(v.values())[0][0])) - elif list(v.values())[0][0]["type"] == "array": - list(v.values())[0][0]["type"] = ["null", "array"] - items.append((new_key, list(v.values())[0][0])) - elif list(v.values())[0][0]["type"] == "object": - list(v.values())[0][0]["type"] = ["null", "object"] - items.append((new_key, list(v.values())[0][0])) + elif len(v.values()) > 0: + if next(iter(v.values()))[0]["type"] == "string": + next(iter(v.values()))[0]["type"] = ["null", "string"] + items.append((new_key, next(iter(v.values()))[0])) + elif next(iter(v.values()))[0]["type"] == "array": + next(iter(v.values()))[0]["type"] = ["null", "array"] + items.append((new_key, next(iter(v.values()))[0])) + elif next(iter(v.values()))[0]["type"] == "object": + next(iter(v.values()))[0]["type"] = ["null", "object"] + items.append((new_key, next(iter(v.values()))[0])) # Sort and check for duplicates def _key_func(item): @@ -264,7 +269,8 @@ def _key_func(item): sorted_items = sorted(items, key=_key_func) for k, g in itertools.groupby(sorted_items, key=_key_func): if len(list(g)) > 1: - raise ValueError(f"Duplicate column name produced in schema: {k}") + msg = f"Duplicate column name produced in schema: {k}" + raise ValueError(msg) # Return the (unsorted) result as a dict. return dict(items) @@ -296,9 +302,10 @@ def flatten_record( def _flatten_record( - record_node: MutableMapping[Any, Any], - flattened_schema: dict = None, - parent_key: List[str] = None, + record_node: t.MutableMapping[t.Any, t.Any], + *, + flattened_schema: dict | None = None, + parent_key: list[str] | None = None, separator: str = "__", level: int = 0, max_level: int = 0, @@ -322,19 +329,19 @@ def _flatten_record( if parent_key is None: parent_key = [] - items: List[Tuple[str, Any]] = [] + items: list[tuple[str, t.Any]] = [] for k, v in record_node.items(): new_key = flatten_key(k, parent_key, separator) if isinstance(v, collections.abc.MutableMapping) and level < max_level: items.extend( _flatten_record( v, - flattened_schema, - parent_key + [k], + flattened_schema=flattened_schema, + parent_key=[*parent_key, k], separator=separator, level=level + 1, max_level=max_level, - ).items() + ).items(), ) else: items.append( @@ -343,19 +350,19 @@ def _flatten_record( json.dumps(v) if _should_jsondump_value(k, v, flattened_schema) else v, - ) + ), ) return dict(items) -def _should_jsondump_value(key: str, value: Any, flattened_schema=None) -> bool: +def _should_jsondump_value(key: str, value: t.Any, flattened_schema=None) -> bool: """Return True if json.dump() should be used to serialize the value. Args: key: [description] value: [description] - schema: [description]. Defaults to None. + flattened_schema: [description]. Defaults to None. Returns: [description] diff --git a/singer_sdk/helpers/_resources.py b/singer_sdk/helpers/_resources.py new file mode 100644 index 000000000..39fe9c592 --- /dev/null +++ b/singer_sdk/helpers/_resources.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +import sys +import typing as t + +if t.TYPE_CHECKING: + from types import ModuleType + +if sys.version_info < (3, 9): + import importlib_resources + from importlib_resources.abc import Traversable +else: + import importlib.resources as importlib_resources + from importlib.abc import Traversable + + +def get_package_files(package: str | ModuleType) -> Traversable: + """Load a file from a package. + + Args: + package: The package to load the file from. + file: The file to load. + + Returns: + The file as a Traversable object. + """ + return t.cast(Traversable, importlib_resources.files(package)) diff --git a/singer_sdk/helpers/_secrets.py b/singer_sdk/helpers/_secrets.py index f1d7561dd..ad7d05032 100644 --- a/singer_sdk/helpers/_secrets.py +++ b/singer_sdk/helpers/_secrets.py @@ -1,5 +1,7 @@ """Helpers function for secrets management.""" +from __future__ import annotations + COMMON_SECRET_KEYS = [ "db_password", "password", @@ -18,10 +20,8 @@ def is_common_secret_key(key_name: str) -> bool: if key_name in COMMON_SECRET_KEYS: return True return any( - [ - key_name.lower().endswith(key_suffix) - for key_suffix in COMMON_SECRET_KEY_SUFFIXES - ] + key_name.lower().endswith(key_suffix) + for key_suffix in COMMON_SECRET_KEY_SUFFIXES ) diff --git a/singer_sdk/helpers/_simpleeval.py b/singer_sdk/helpers/_simpleeval.py index f0692c3d4..c3fb41c3f 100644 --- a/singer_sdk/helpers/_simpleeval.py +++ b/singer_sdk/helpers/_simpleeval.py @@ -98,6 +98,7 @@ """ # flake8: noqa # Ignoring flake errors in imported module +# isort: dont-add-imports import ast import operator as op @@ -125,7 +126,7 @@ # builtins is a dict in python >3.6 but a module before DISALLOW_FUNCTIONS = {type, isinstance, eval, getattr, setattr, repr, compile, open} if hasattr(__builtins__, "help") or ( - hasattr(__builtins__, "__contains__") and "help" in __builtins__ # type: ignore + hasattr(__builtins__, "__contains__") and "help" in __builtins__ ): # PyInstaller environment doesn't include this module. DISALLOW_FUNCTIONS.add(help) diff --git a/singer_sdk/helpers/_singer.py b/singer_sdk/helpers/_singer.py deleted file mode 100644 index 465e58fd9..000000000 --- a/singer_sdk/helpers/_singer.py +++ /dev/null @@ -1,279 +0,0 @@ -import logging -from dataclasses import dataclass, fields -from enum import Enum -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast - -from singer.catalog import Catalog as BaseCatalog -from singer.catalog import CatalogEntry as BaseCatalogEntry -from singer.schema import Schema - -Breadcrumb = Tuple[str, ...] - -logger = logging.getLogger(__name__) - - -class SelectionMask(Dict[Breadcrumb, bool]): - """Boolean mask for property selection in schemas and records.""" - - def __missing__(self, breadcrumb: Breadcrumb) -> bool: - """Handle missing breadcrumbs. - - - Properties default to parent value if available. - - Root (stream) defaults to True. - """ - if len(breadcrumb) >= 2: - parent = breadcrumb[:-2] - return self[parent] - else: - return True - - -@dataclass -class Metadata: - """Base stream or property metadata.""" - - class InclusionType(str, Enum): - """Catalog inclusion types.""" - - AVAILABLE = "available" - AUTOMATIC = "automatic" - UNSUPPORTED = "unsupported" - - inclusion: Optional[InclusionType] = None - selected: Optional[bool] = None - selected_by_default: Optional[bool] = None - - @classmethod - def from_dict(cls, value: Dict[str, Any]): - """Parse metadata dictionary.""" - return cls( - **{ - field.name: value.get(field.name.replace("_", "-")) - for field in fields(cls) - } - ) - - def to_dict(self) -> Dict[str, Any]: - """Convert metadata to a JSON-encodeable dictionary.""" - result = {} - - for field in fields(self): - value = getattr(self, field.name) - if value is not None: - result[field.name.replace("_", "-")] = value - - return result - - -@dataclass -class StreamMetadata(Metadata): - """Stream metadata.""" - - table_key_properties: Optional[List[str]] = None - forced_replication_method: Optional[str] = None - valid_replication_keys: Optional[List[str]] = None - schema_name: Optional[str] = None - - -class MetadataMapping(Dict[Breadcrumb, Union[Metadata, StreamMetadata]]): - """Stream metadata mapping.""" - - @classmethod - def from_iterable(cls, iterable: Iterable[Dict[str, Any]]): - """Create a metadata mapping from an iterable of metadata dictionaries.""" - mapping = cls() - for d in iterable: - breadcrumb = tuple(d["breadcrumb"]) - metadata = d["metadata"] - if breadcrumb: - mapping[breadcrumb] = Metadata.from_dict(metadata) - else: - mapping[breadcrumb] = StreamMetadata.from_dict(metadata) - - return mapping - - def to_list(self) -> List[Dict[str, Any]]: - """Convert mapping to a JSON-encodable list.""" - return [ - {"breadcrumb": list(k), "metadata": v.to_dict()} for k, v in self.items() - ] - - def __missing__(self, breadcrumb: Breadcrumb): - """Handle missing metadata entries.""" - self[breadcrumb] = Metadata() if breadcrumb else StreamMetadata() - return self[breadcrumb] - - @property - def root(self): - """Get stream (root) metadata from this mapping.""" - meta: StreamMetadata = self[()] - return meta - - @classmethod - def get_standard_metadata( - cls, - schema: Optional[Dict[str, Any]] = None, - schema_name: Optional[str] = None, - key_properties: Optional[List[str]] = None, - valid_replication_keys: Optional[List[str]] = None, - replication_method: Optional[str] = None, - ): - """Get default metadata for a stream.""" - mapping = cls() - root = StreamMetadata( - table_key_properties=key_properties, - forced_replication_method=replication_method, - valid_replication_keys=valid_replication_keys, - ) - - if schema: - root.inclusion = Metadata.InclusionType.AVAILABLE - - if schema_name: - root.schema_name = schema_name - - for field_name in schema.get("properties", {}).keys(): - if key_properties and field_name in key_properties: - entry = Metadata(inclusion=Metadata.InclusionType.AUTOMATIC) - else: - entry = Metadata(inclusion=Metadata.InclusionType.AVAILABLE) - - mapping[("properties", field_name)] = entry - - mapping[()] = root - - return mapping - - def resolve_selection(self) -> SelectionMask: - """Resolve selection for metadata breadcrumbs and store them in a mapping.""" - return SelectionMask( - (breadcrumb, self._breadcrumb_is_selected(breadcrumb)) - for breadcrumb in self - ) - - def _breadcrumb_is_selected(self, breadcrumb: Breadcrumb) -> bool: - """Determine if a property breadcrumb is selected based on existing metadata. - - An empty breadcrumb (empty tuple) indicates the stream itself. Otherwise, the - breadcrumb is the path to a property within the stream. - """ - if not self: - # Default to true if no metadata to say otherwise - return True - - md_entry = self.get(breadcrumb, Metadata()) - parent_value = None - - if len(breadcrumb) > 0: - parent_breadcrumb = breadcrumb[:-2] - parent_value = self._breadcrumb_is_selected(parent_breadcrumb) - - if parent_value is False: - return parent_value - - if md_entry.inclusion == Metadata.InclusionType.UNSUPPORTED: - if md_entry.selected is True: - logger.debug( - "Property '%s' was selected but is not supported. " - "Ignoring selected==True input.", - ":".join(breadcrumb), - ) - return False - - if md_entry.inclusion == Metadata.InclusionType.AUTOMATIC: - if md_entry.selected is False: - logger.debug( - "Property '%s' was deselected while also set " - "for automatic inclusion. Ignoring selected==False input.", - ":".join(breadcrumb), - ) - return True - - if md_entry.selected is not None: - return md_entry.selected - - if md_entry.selected_by_default is not None: - return md_entry.selected_by_default - - logger.debug( - "Selection metadata omitted for '%s'. " - "Using parent value of selected=%s.", - breadcrumb, - parent_value, - ) - return parent_value or False - - -@dataclass -class CatalogEntry(BaseCatalogEntry): - """Singer catalog entry.""" - - tap_stream_id: str - metadata: MetadataMapping - schema: Schema - stream: Optional[str] = None - key_properties: Optional[List[str]] = None - replication_key: Optional[str] = None - is_view: Optional[bool] = None - database: Optional[str] = None - table: Optional[str] = None - row_count: Optional[int] = None - stream_alias: Optional[str] = None - replication_method: Optional[str] = None - - @classmethod - def from_dict(cls, stream: Dict[str, Any]): - """Create a catalog entry from a dictionary.""" - return cls( - tap_stream_id=stream["tap_stream_id"], - stream=stream.get("stream"), - replication_key=stream.get("replication_key"), - key_properties=stream.get("key_properties"), - database=stream.get("database_name"), - table=stream.get("table_name"), - schema=Schema.from_dict(stream.get("schema", {})), - is_view=stream.get("is_view"), - stream_alias=stream.get("stream_alias"), - metadata=MetadataMapping.from_iterable(stream.get("metadata", [])), - replication_method=stream.get("replication_method"), - ) - - def to_dict(self): - """Convert entry to a dictionary.""" - d = super().to_dict() - d["metadata"] = self.metadata.to_list() - return d - - -class Catalog(Dict[str, CatalogEntry], BaseCatalog): - """Singer catalog mapping of stream entries.""" - - @classmethod - def from_dict(cls, data: Dict[str, List[Dict[str, Any]]]) -> "Catalog": - """Create a catalog from a dictionary.""" - instance = cls() - for stream in data.get("streams", []): - entry = CatalogEntry.from_dict(stream) - instance[entry.tap_stream_id] = entry - return instance - - def to_dict(self) -> Dict[str, Any]: - """Return a dictionary representation of the catalog. - - Returns: - A dictionary with the defined catalog streams. - """ - return cast(Dict[str, Any], super().to_dict()) - - @property - def streams(self) -> List[CatalogEntry]: - """Get catalog entries.""" - return list(self.values()) - - def add_stream(self, entry: CatalogEntry) -> None: - """Add a stream entry to the catalog.""" - self[entry.tap_stream_id] = entry - - def get_stream(self, stream_id: str) -> Optional[CatalogEntry]: - """Retrieve a stream entry from the catalog.""" - return self.get(stream_id) diff --git a/singer_sdk/helpers/_state.py b/singer_sdk/helpers/_state.py index 57449348d..9d0102186 100644 --- a/singer_sdk/helpers/_state.py +++ b/singer_sdk/helpers/_state.py @@ -1,46 +1,44 @@ """Helper functions for state and bookmark management.""" -import datetime -from typing import Any, Callable, List, Optional, Union, cast +from __future__ import annotations + +import typing as t from singer_sdk.exceptions import InvalidStreamSortException from singer_sdk.helpers._typing import to_json_compatible +if t.TYPE_CHECKING: + import datetime + + _T = t.TypeVar("_T", datetime.datetime, str, int, float) + PROGRESS_MARKERS = "progress_markers" PROGRESS_MARKER_NOTE = "Note" SIGNPOST_MARKER = "replication_key_signpost" STARTING_MARKER = "starting_replication_value" -def get_state_if_exists( +def get_state_if_exists( # noqa: PLR0911 tap_state: dict, tap_stream_id: str, - state_partition_context: Optional[dict] = None, - key: Optional[str] = None, -) -> Optional[Any]: + state_partition_context: dict | None = None, + key: str | None = None, +) -> t.Any | None: """Return the stream or partition state, creating a new one if it does not exist. - Parameters - ---------- - tap_state : dict - the existing state dict which contains all streams. - tap_stream_id : str - the id of the stream - state_partition_context : Optional[dict], optional - keys which identify the partition context, by default None (not partitioned) - key : Optional[str], optional - name of the key searched for, by default None (return entire state if found) - - Returns - ------- - Optional[Any] - Returns the state if exists, otherwise None + Args: + tap_state: the existing state dict which contains all streams. + tap_stream_id: the id of the stream + state_partition_context: keys which identify the partition context, + by default None (not partitioned) + key: name of the key searched for, by default None (return entire state if + found) - Raises - ------ - ValueError - Raised if state is invalid or cannot be parsed. + Returns: + Returns the state if exists, otherwise None + Raises: + ValueError: Raised if state is invalid or cannot be parsed. """ if "bookmarks" not in tap_state: return None @@ -56,7 +54,8 @@ def get_state_if_exists( return None # No partitions defined matched_partition = _find_in_partitions_list( - stream_state["partitions"], state_partition_context + stream_state["partitions"], + state_partition_context, ) if matched_partition is None: return None # Partition definition not present @@ -65,35 +64,35 @@ def get_state_if_exists( return matched_partition -def get_state_partitions_list( - tap_state: dict, tap_stream_id: str -) -> Optional[List[dict]]: +def get_state_partitions_list(tap_state: dict, tap_stream_id: str) -> list[dict] | None: """Return a list of partitions defined in the state, or None if not defined.""" return (get_state_if_exists(tap_state, tap_stream_id) or {}).get("partitions", None) def _find_in_partitions_list( - partitions: List[dict], state_partition_context: dict -) -> Optional[dict]: + partitions: list[dict], + state_partition_context: dict, +) -> dict | None: found = [ partition_state for partition_state in partitions if partition_state["context"] == state_partition_context ] if len(found) > 1: - raise ValueError( - f"State file contains duplicate entries for partition: " - "{state_partition_context}.\n" - f"Matching state values were: {str(found)}" + msg = ( + "State file contains duplicate entries for partition: " + f"{{state_partition_context}}.\nMatching state values were: {found!s}" ) + raise ValueError(msg) if found: - return cast(dict, found[0]) + return t.cast(dict, found[0]) return None def _create_in_partitions_list( - partitions: List[dict], state_partition_context: dict + partitions: list[dict], + state_partition_context: dict, ) -> dict: # Existing partition not found. Creating new state entry in partitions list... new_partition_state = {"context": state_partition_context} @@ -102,44 +101,39 @@ def _create_in_partitions_list( def get_writeable_state_dict( - tap_state: dict, tap_stream_id: str, state_partition_context: Optional[dict] = None + tap_state: dict, + tap_stream_id: str, + state_partition_context: dict | None = None, ) -> dict: """Return the stream or partition state, creating a new one if it does not exist. - Parameters - ---------- - tap_state : dict - the existing state dict which contains all streams. - tap_stream_id : str - the id of the stream - state_partition_context : Optional[dict], optional - keys which identify the partition context, by default None (not partitioned) - - Returns - ------- - dict - Returns a writeable dict at the stream or partition level. + Args: + tap_state: the existing state dict which contains all streams. + tap_stream_id: the id of the stream + state_partition_context: keys which identify the partition context, + by default None (not partitioned) - Raises - ------ - ValueError - Raise an error if duplicate entries are found. + Returns: + Returns a writeable dict at the stream or partition level. + Raises: + ValueError: Raise an error if duplicate entries are found. """ if tap_state is None: - raise ValueError("Cannot write state to missing state dictionary.") + msg = "Cannot write state to missing state dictionary." + raise ValueError(msg) if "bookmarks" not in tap_state: tap_state["bookmarks"] = {} if tap_stream_id not in tap_state["bookmarks"]: tap_state["bookmarks"][tap_stream_id] = {} - stream_state = cast(dict, tap_state["bookmarks"][tap_stream_id]) + stream_state = t.cast(dict, tap_state["bookmarks"][tap_stream_id]) if not state_partition_context: return stream_state if "partitions" not in stream_state: stream_state["partitions"] = [] - stream_state_partitions: List[dict] = stream_state["partitions"] + stream_state_partitions: list[dict] = stream_state["partitions"] found = _find_in_partitions_list(stream_state_partitions, state_partition_context) if found: return found @@ -153,16 +147,18 @@ def write_stream_state( key, val, *, - state_partition_context: Optional[dict] = None, + state_partition_context: dict | None = None, ) -> None: """Write stream state.""" state_dict = get_writeable_state_dict( - tap_state, tap_stream_id, state_partition_context=state_partition_context + tap_state, + tap_stream_id, + state_partition_context=state_partition_context, ) state_dict[key] = val -def reset_state_progress_markers(stream_or_partition_state: dict) -> Optional[dict]: +def reset_state_progress_markers(stream_or_partition_state: dict) -> dict | None: """Wipe the state once sync is complete. For logging purposes, return the wiped 'progress_markers' object if it existed. @@ -176,7 +172,7 @@ def reset_state_progress_markers(stream_or_partition_state: dict) -> Optional[di def write_replication_key_signpost( stream_or_partition_state: dict, - new_signpost_value: Any, + new_signpost_value: t.Any, ) -> None: """Write signpost value.""" stream_or_partition_state[SIGNPOST_MARKER] = to_json_compatible(new_signpost_value) @@ -184,7 +180,7 @@ def write_replication_key_signpost( def write_starting_replication_value( stream_or_partition_state: dict, - initial_value: Any, + initial_value: t.Any, ) -> None: """Write initial replication value to state.""" stream_or_partition_state[STARTING_MARKER] = to_json_compatible(initial_value) @@ -199,6 +195,7 @@ def get_starting_replication_value(stream_or_partition_state: dict): def increment_state( stream_or_partition_state: dict, + *, latest_record: dict, replication_key: str, is_sorted: bool, @@ -213,7 +210,7 @@ def increment_state( if not is_sorted: if PROGRESS_MARKERS not in stream_or_partition_state: stream_or_partition_state[PROGRESS_MARKERS] = { - PROGRESS_MARKER_NOTE: "Progress is not resumable if interrupted." + PROGRESS_MARKER_NOTE: "Progress is not resumable if interrupted.", } progress_dict = stream_or_partition_state[PROGRESS_MARKERS] old_rk_value = to_json_compatible(progress_dict.get("replication_key_value")) @@ -224,52 +221,67 @@ def increment_state( return if is_sorted: - raise InvalidStreamSortException( + msg = ( f"Unsorted data detected in stream. Latest value '{new_rk_value}' is " f"smaller than previous max '{old_rk_value}'." ) + raise InvalidStreamSortException(msg) def _greater_than_signpost( - signpost: Union[datetime.datetime, str, int, float], - new_value: Union[datetime.datetime, str, int, float], + signpost: _T, + new_value: _T, ) -> bool: """Compare and return True if new_value is greater than signpost.""" - return ( # fails if signpost and bookmark are incompatible types - new_value > signpost # type: ignore - ) + # fails if signpost and bookmark are incompatible types + return new_value > signpost + + +def is_state_non_resumable(stream_or_partition_state: dict) -> bool: + """Return True when state is non-resumable. + + This is determined by checking for a "progress marker" tag in the state artifact. + """ + return PROGRESS_MARKERS in stream_or_partition_state -def finalize_state_progress_markers(stream_or_partition_state: dict) -> Optional[dict]: - """Promote or wipe progress markers once sync is complete.""" +def finalize_state_progress_markers(stream_or_partition_state: dict) -> dict | None: + """Promote or wipe progress markers once sync is complete. + + This marks any non-resumable progress markers as finalized. If there are + valid bookmarks present, they will be promoted to be resumable. + """ signpost_value = stream_or_partition_state.pop(SIGNPOST_MARKER, None) stream_or_partition_state.pop(STARTING_MARKER, None) - if PROGRESS_MARKERS in stream_or_partition_state: - if "replication_key" in stream_or_partition_state[PROGRESS_MARKERS]: - # Replication keys valid (only) after sync is complete - progress_markers = stream_or_partition_state[PROGRESS_MARKERS] - stream_or_partition_state["replication_key"] = progress_markers.pop( - "replication_key" - ) - new_rk_value = progress_markers.pop("replication_key_value") - if signpost_value and _greater_than_signpost(signpost_value, new_rk_value): - new_rk_value = signpost_value - stream_or_partition_state["replication_key_value"] = new_rk_value + if ( + is_state_non_resumable(stream_or_partition_state) + and "replication_key" in stream_or_partition_state[PROGRESS_MARKERS] + ): + # Replication keys valid (only) after sync is complete + progress_markers = stream_or_partition_state[PROGRESS_MARKERS] + stream_or_partition_state["replication_key"] = progress_markers.pop( + "replication_key", + ) + new_rk_value = progress_markers.pop("replication_key_value") + if signpost_value and _greater_than_signpost(signpost_value, new_rk_value): + new_rk_value = signpost_value + stream_or_partition_state["replication_key_value"] = new_rk_value # Wipe and return any markers that have not been promoted return reset_state_progress_markers(stream_or_partition_state) def log_sort_error( + *, ex: Exception, - log_fn: Callable, + log_fn: t.Callable, stream_name: str, - current_context: Optional[dict], - state_partition_context: Optional[dict], + current_context: dict | None, + state_partition_context: dict | None, record_count: int, partition_record_count: int, ) -> None: """Log a sort error.""" - msg = f"Sorting error detected in '{stream_name}'." f"on record #{record_count}. " + msg = f"Sorting error detected in '{stream_name}' on record #{record_count}. " if partition_record_count != record_count: msg += ( f"Record was partition record " @@ -277,6 +289,6 @@ def log_sort_error( f" state partition context {state_partition_context}. " ) if current_context: - msg += f"Context was {str(current_context)}. " + msg += f"Context was {current_context!s}. " msg += str(ex) log_fn(msg) diff --git a/singer_sdk/helpers/_typing.py b/singer_sdk/helpers/_typing.py index 187eb0d0e..d3df38a5b 100644 --- a/singer_sdk/helpers/_typing.py +++ b/singer_sdk/helpers/_typing.py @@ -1,16 +1,23 @@ """General helper functions for json typing.""" +from __future__ import annotations + import copy import datetime -import logging +import typing as t from enum import Enum from functools import lru_cache -from typing import Any, Dict, List, Optional, Tuple, cast import pendulum +if t.TYPE_CHECKING: + import logging + _MAX_TIMESTAMP = "9999-12-31 23:59:59.999999" _MAX_TIME = "23:59:59.999999" +JSONSCHEMA_ANNOTATION_SECRET = "secret" # noqa: S105 +JSONSCHEMA_ANNOTATION_WRITEONLY = "writeOnly" +UTC = datetime.timezone.utc class DatetimeErrorTreatmentEnum(Enum): @@ -21,13 +28,24 @@ class DatetimeErrorTreatmentEnum(Enum): NULL = "null" -def to_json_compatible(val: Any) -> Any: +class EmptySchemaTypeError(Exception): + """Exception for when trying to detect type from empty type_dict.""" + + def __init__(self, *args: object) -> None: + msg = ( + "Could not detect type from empty type_dict. Did you forget to define a " + "property in the stream schema?" + ) + super().__init__(msg, *args) + + +def to_json_compatible(val: t.Any) -> t.Any: """Return as string if datetime. JSON does not support proper datetime types. If given a naive datetime object, pendulum automatically makes it utc """ if isinstance(val, (datetime.datetime, pendulum.DateTime)): - val = pendulum.instance(val).isoformat() + return pendulum.instance(val).isoformat() return val @@ -41,27 +59,76 @@ def append_type(type_dict: dict, new_type: str) -> dict: result["anyOf"] = [result["anyOf"], new_type] return result - elif "type" in result: - if isinstance(result["type"], list) and new_type not in result["type"]: - result["type"].append(new_type) - elif new_type != result["type"]: - result["type"] = [result["type"], new_type] + if "type" in result: + type_array = ( + result["type"] if isinstance(result["type"], list) else [result["type"]] + ) + if new_type not in type_array: + result["type"] = [*type_array, new_type] return result - raise ValueError( + msg = ( "Could not append type because the JSON schema for the dictionary " f"`{type_dict}` appears to be invalid." ) + raise ValueError(msg) + + +def is_secret_type(type_dict: dict) -> bool: + """Return True if JSON Schema type definition appears to be a secret. + + Will return true if either `writeOnly` or `secret` are true on this type + or any of the type's subproperties. + + Args: + type_dict: The JSON Schema type to check. + + Raises: + ValueError: If type_dict is None or empty. + + Returns: + True if we detect any sensitive property nodes. + """ + if type_dict.get(JSONSCHEMA_ANNOTATION_WRITEONLY) or type_dict.get( + JSONSCHEMA_ANNOTATION_SECRET, + ): + return True + + if "properties" in type_dict: + # Recursively check subproperties and return True if any child is secret. + return any( + is_secret_type(child_type_dict) + for child_type_dict in type_dict["properties"].values() + ) + + return False -def is_object_type(property_schema: dict) -> Optional[bool]: +def is_object_type(property_schema: dict) -> bool | None: """Return true if the JSON Schema type is an object or None if detection fails.""" if "anyOf" not in property_schema and "type" not in property_schema: return None # Could not detect data type - for property_type in property_schema.get("anyOf", [property_schema.get("type")]): - if "object" in property_type or property_type == "object": - return True - return False + return any( + "object" in property_type or property_type == "object" + for property_type in property_schema.get( + "anyOf", + [property_schema.get("type")], + ) + ) + + +def is_uniform_list(property_schema: dict) -> bool | None: + """Return true if the JSON Schema type is an array with a single schema. + + This is as opposed to 'tuples' where different indices have different schemas; + https://json-schema.org/understanding-json-schema/reference/array.html#array + """ + return ( + is_array_type(property_schema) is True + and "items" in property_schema + and "prefixItems" not in property_schema + and isinstance(property_schema["items"], dict) + ) def is_datetime_type(type_dict: dict) -> bool: @@ -70,33 +137,50 @@ def is_datetime_type(type_dict: dict) -> bool: Also returns True if 'date-time' is nested within an 'anyOf' type Array. """ if not type_dict: - raise ValueError( - "Could not detect type from empty type_dict. " - "Did you forget to define a property in the stream schema?" - ) + raise EmptySchemaTypeError if "anyOf" in type_dict: - for type_dict in type_dict["anyOf"]: - if is_datetime_type(type_dict): - return True - return False - elif "type" in type_dict: + return any(is_datetime_type(type_dict) for type_dict in type_dict["anyOf"]) + if "type" in type_dict: return type_dict.get("format") == "date-time" - raise ValueError( - f"Could not detect type of replication key using schema '{type_dict}'" - ) + msg = f"Could not detect type of replication key using schema '{type_dict}'" + raise ValueError(msg) + + +def is_date_or_datetime_type(type_dict: dict) -> bool: + """Return True if JSON Schema type definition is a 'date'/'date-time' type. + + Also returns True if type is nested within an 'anyOf' type Array. + + Args: + type_dict: The JSON Schema definition. + + Raises: + ValueError: If type is empty or null. + + Returns: + True if date or date-time, else False. + """ + if "anyOf" in type_dict: + return any(is_date_or_datetime_type(option) for option in type_dict["anyOf"]) + if "type" in type_dict: + return type_dict.get("format") in {"date", "date-time"} -def get_datelike_property_type(property_schema: Dict) -> Optional[str]: + msg = f"Could not detect type of replication key using schema '{type_dict}'" + raise ValueError(msg) + + +def get_datelike_property_type(property_schema: dict) -> str | None: """Return one of 'date-time', 'time', or 'date' if property is date-like. Otherwise return None. """ if _is_string_with_format(property_schema): - return cast(str, property_schema["format"]) - elif "anyOf" in property_schema: + return t.cast(str, property_schema["format"]) + if "anyOf" in property_schema: for type_dict in property_schema["anyOf"]: if _is_string_with_format(type_dict): - return cast(str, type_dict["format"]) + return t.cast(str, type_dict["format"]) return None @@ -107,17 +191,18 @@ def _is_string_with_format(type_dict): "date", }: return True + return None def handle_invalid_timestamp_in_record( - record, - key_breadcrumb: List[str], + record, # noqa: ARG001 + key_breadcrumb: list[str], invalid_value: str, datelike_typename: str, ex: Exception, - treatment: Optional[DatetimeErrorTreatmentEnum], + treatment: DatetimeErrorTreatmentEnum | None, logger: logging.Logger, -) -> Any: +) -> t.Any: """Apply treatment or raise an error for invalid time values.""" treatment = treatment or DatetimeErrorTreatmentEnum.ERROR msg = ( @@ -125,11 +210,11 @@ def handle_invalid_timestamp_in_record( f"field '{':'.join(key_breadcrumb)}'." ) if treatment == DatetimeErrorTreatmentEnum.MAX: - logger.warning(f"{msg}. Replacing with MAX value.\n{ex}\n") + logger.warning("%s. Replacing with MAX value.\n%s\n", msg, ex) return _MAX_TIMESTAMP if datelike_typename != "time" else _MAX_TIME if treatment == DatetimeErrorTreatmentEnum.NULL: - logger.warning(f"{msg}. Replacing with NULL.\n{ex}\n") + logger.warning("%s. Replacing with NULL.\n%s\n", msg, ex) return None raise ValueError(msg) @@ -138,102 +223,270 @@ def handle_invalid_timestamp_in_record( def is_string_array_type(type_dict: dict) -> bool: """Return True if JSON Schema type definition is a string array.""" if not type_dict: - raise ValueError( - "Could not detect type from empty type_dict. " - "Did you forget to define a property in the stream schema?" - ) + raise EmptySchemaTypeError if "anyOf" in type_dict: - return any([is_string_array_type(t) for t in type_dict["anyOf"]]) + return any(is_string_array_type(t) for t in type_dict["anyOf"]) if "type" not in type_dict: - raise ValueError(f"Could not detect type from schema '{type_dict}'") + msg = f"Could not detect type from schema '{type_dict}'" + raise ValueError(msg) return "array" in type_dict["type"] and bool(is_string_type(type_dict["items"])) -def is_boolean_type(property_schema: dict) -> Optional[bool]: +def is_array_type(type_dict: dict) -> bool: + """Return True if JSON Schema type is an array.""" + if not type_dict: + raise EmptySchemaTypeError + + if "anyOf" in type_dict: + return any(is_array_type(t) for t in type_dict["anyOf"]) + + if "type" not in type_dict: + msg = f"Could not detect type from schema '{type_dict}'" + raise ValueError(msg) + + return "array" in type_dict["type"] + + +def is_boolean_type(property_schema: dict) -> bool | None: """Return true if the JSON Schema type is a boolean or None if detection fails.""" if "anyOf" not in property_schema and "type" not in property_schema: return None # Could not detect data type for property_type in property_schema.get("anyOf", [property_schema.get("type")]): - if "boolean" in property_type or property_type == "boolean": + schema_type = ( + property_type.get("type", []) + if isinstance(property_type, dict) + else property_type + ) + if "boolean" in schema_type or schema_type == "boolean": return True return False -def is_string_type(property_schema: dict) -> Optional[bool]: - """Return true if the JSON Schema type is a boolean or None if detection fails.""" +def is_integer_type(property_schema: dict) -> bool | None: + """Return true if the JSON Schema type is an integer or None if detection fails.""" + if "anyOf" not in property_schema and "type" not in property_schema: + return None # Could not detect data type + for property_type in property_schema.get("anyOf", [property_schema.get("type")]): + schema_type = ( + property_type.get("type", []) + if isinstance(property_type, dict) + else property_type + ) + if "integer" in schema_type or schema_type == "integer": + return True + return False + + +def is_string_type(property_schema: dict) -> bool | None: + """Return true if the JSON Schema type is a string or None if detection fails.""" + if "anyOf" not in property_schema and "type" not in property_schema: + return None # Could not detect data type + for property_type in property_schema.get("anyOf", [property_schema.get("type")]): + schema_type = ( + property_type.get("type", []) + if isinstance(property_type, dict) + else property_type + ) + if "string" in schema_type or schema_type == "string": + return True + return False + + +def is_null_type(property_schema: dict) -> bool | None: + """Return true if the JSON Schema type is a null or None if detection fails.""" + if "anyOf" not in property_schema and "type" not in property_schema: + return None # Could not detect data type + for property_type in property_schema.get("anyOf", [property_schema.get("type")]): + schema_type = ( + property_type.get("type", []) + if isinstance(property_type, dict) + else property_type + ) + if "null" in schema_type or schema_type == "null": + return True + return False + + +def is_number_type(property_schema: dict) -> bool | None: + """Return true if the JSON Schema type is a number or None if detection fails.""" if "anyOf" not in property_schema and "type" not in property_schema: return None # Could not detect data type for property_type in property_schema.get("anyOf", [property_schema.get("type")]): - if "string" in property_type or property_type == "string": + schema_type = ( + property_type.get("type", []) + if isinstance(property_type, dict) + else property_type + ) + if "number" in schema_type or schema_type == "number": return True return False @lru_cache() def _warn_unmapped_properties( - stream_name: str, property_names: Tuple[str], logger: logging.Logger + stream_name: str, + property_names: tuple[str], + logger: logging.Logger, ): - logger.info( - f"Properties {property_names} were present in the '{stream_name}' stream but " - "not found in catalog schema. Ignoring." + logger.warning( + "Properties %s were present in the '%s' stream but " + "not found in catalog schema. Ignoring.", + property_names, + stream_name, ) -def conform_record_data_types( # noqa: C901 - stream_name: str, row: Dict[str, Any], schema: dict, logger: logging.Logger -) -> Dict[str, Any]: +class TypeConformanceLevel(Enum): + """Used to configure how data is conformed to json compatible types. + + Before outputting data as JSON, it is conformed to types that are valid in json, + based on the current types and the schema. For example, dates are converted to + strings. + + By default, all data is conformed recursively. If this is not necessary (because + data is already valid types, or you are manually converting it) then it may be more + performant to use a lesser conformance level. + """ + + RECURSIVE = 1 + """ + All data is recursively conformed + """ + + ROOT_ONLY = 2 + """ + Only properties on the root object, excluding array elements, are conformed + """ + + NONE = 3 + """ + No conformance is performed + """ + + +def conform_record_data_types( + stream_name: str, + record: dict[str, t.Any], + schema: dict, + level: TypeConformanceLevel, + logger: logging.Logger, +) -> dict[str, t.Any]: """Translate values in record dictionary to singer-compatible data types. - Any property names not found in the schema catalog will be removed, and a - warning will be logged exactly once per unmapped property name. + Any property names not found in the schema catalog will be removed, and a single + warning will be logged listing each unmapped property name. """ - rec: Dict[str, Any] = {} - unmapped_properties: List[str] = [] - for property_name, elem in row.items(): + rec, unmapped_properties = _conform_record_data_types(record, schema, level, None) + + if len(unmapped_properties) > 0: + _warn_unmapped_properties(stream_name, tuple(unmapped_properties), logger) + + return rec + + +def _conform_record_data_types( # noqa: PLR0912 + input_object: dict[str, t.Any], + schema: dict, + level: TypeConformanceLevel, + parent: str | None, +) -> tuple[dict[str, t.Any], list[str]]: + """Translate values in record dictionary to singer-compatible data types. + + Any property names not found in the schema catalog will be removed, and a single + warning will be logged listing each unmapped property name. + + This is called recursively to process nested objects and arrays. + + Args: + input_object: A single record + schema: JSON schema the given input_object is expected to meet + level: Specifies how recursive the conformance process should be + parent: '.' seperated path to this element from the object root (for logging) + """ + output_object: dict[str, t.Any] = {} + unmapped_properties: list[str] = [] + + if level == TypeConformanceLevel.NONE: + return input_object, unmapped_properties + + for property_name, elem in input_object.items(): + property_path = property_name if parent is None else f"{parent}.{property_name}" if property_name not in schema["properties"]: - unmapped_properties.append(property_name) + unmapped_properties.append(property_path) continue property_schema = schema["properties"][property_name] - if isinstance(elem, (datetime.datetime, pendulum.DateTime)): - rec[property_name] = to_json_compatible(elem) - elif isinstance(elem, datetime.date): - rec[property_name] = elem.isoformat() + "T00:00:00+00:00" - elif isinstance(elem, datetime.timedelta): - epoch = datetime.datetime.utcfromtimestamp(0) - timedelta_from_epoch = epoch + elem - rec[property_name] = timedelta_from_epoch.isoformat() + "+00:00" - elif isinstance(elem, datetime.time): - rec[property_name] = str(elem) - elif isinstance(elem, bytes): - # for BIT value, treat 0 as False and anything else as True - bit_representation: bool - if is_boolean_type(property_schema): - bit_representation = elem != b"\x00" - rec[property_name] = bit_representation + if isinstance(elem, list) and is_uniform_list(property_schema): + if level == TypeConformanceLevel.RECURSIVE: + item_schema = property_schema["items"] + output = [] + for item in elem: + if is_object_type(item_schema) and isinstance(item, dict): + ( + output_item, + sub_unmapped_properties, + ) = _conform_record_data_types( + item, + item_schema, + level, + property_path, + ) + unmapped_properties.extend(sub_unmapped_properties) + output.append(output_item) + else: + output.append(_conform_primitive_property(item, item_schema)) + output_object[property_name] = output else: - rec[property_name] = elem.hex() - elif is_boolean_type(property_schema): - is_standard_boolean: bool = True - boolean_representation: Optional[bool] - if elem is None: - boolean_representation = None - elif elem == 0: - boolean_representation = False - elif elem == 1: - boolean_representation = True - elif isinstance(elem, str): - if elem.lower() in ['false', 'f', '0']: - boolean_representation = False - elif elem.lower() in ['true', 't', '1']: - boolean_representation = True + output_object[property_name] = elem + elif ( + isinstance(elem, dict) + and is_object_type(property_schema) + and "properties" in property_schema + ): + if level == TypeConformanceLevel.RECURSIVE: + ( + output_object[property_name], + sub_unmapped_properties, + ) = _conform_record_data_types( + elem, + property_schema, + level, + property_path, + ) + unmapped_properties.extend(sub_unmapped_properties) else: - is_standard_boolean = False - rec[property_name] = boolean_representation if is_standard_boolean else elem + output_object[property_name] = elem else: - rec[property_name] = elem - _warn_unmapped_properties(stream_name, tuple(unmapped_properties), logger) - return rec + output_object[property_name] = _conform_primitive_property( + elem, + property_schema, + ) + return output_object, unmapped_properties + + +def _conform_primitive_property( # noqa: PLR0911 + elem: t.Any, + property_schema: dict, +) -> t.Any: + """Converts a primitive (i.e. not object or array) to a json compatible type.""" + if isinstance(elem, (datetime.datetime, pendulum.DateTime)): + return to_json_compatible(elem) + if isinstance(elem, datetime.date): + return f"{elem.isoformat()}T00:00:00+00:00" + if isinstance(elem, datetime.timedelta): + epoch = datetime.datetime.fromtimestamp(0, UTC) + timedelta_from_epoch = epoch + elem + if timedelta_from_epoch.tzinfo is None: + timedelta_from_epoch = timedelta_from_epoch.replace(tzinfo=UTC) + return timedelta_from_epoch.isoformat() + if isinstance(elem, datetime.time): + return str(elem) + if isinstance(elem, bytes): + # for BIT value, treat 0 as False and anything else as True + return elem != b"\x00" if is_boolean_type(property_schema) else elem.hex() + if is_boolean_type(property_schema): + return None if elem is None else elem != 0 + return elem diff --git a/singer_sdk/helpers/_util.py b/singer_sdk/helpers/_util.py index 67bc8e816..d0079c40d 100644 --- a/singer_sdk/helpers/_util.py +++ b/singer_sdk/helpers/_util.py @@ -1,16 +1,19 @@ """General helper functions, helper classes, and decorators.""" +from __future__ import annotations + import json +import typing as t from pathlib import Path, PurePath -from typing import Any, Dict, Union, cast import pendulum -def read_json_file(path: Union[PurePath, str]) -> Dict[str, Any]: - """Read json file, thowing an error if missing.""" +def read_json_file(path: PurePath | str) -> dict[str, t.Any]: + """Read json file, throwing an error if missing.""" if not path: - raise RuntimeError("Could not open file. Filepath not provided.") + msg = "Could not open file. Filepath not provided." + raise RuntimeError(msg) if not Path(path).exists(): msg = f"File at '{path}' was not found." @@ -19,7 +22,7 @@ def read_json_file(path: Union[PurePath, str]) -> Dict[str, Any]: msg += f"\nFor more info, please see the sample template at: {template}" raise FileExistsError(msg) - return cast(dict, json.loads(Path(path).read_text())) + return t.cast(dict, json.loads(Path(path).read_text())) def utc_now() -> pendulum.DateTime: diff --git a/singer_sdk/helpers/capabilities.py b/singer_sdk/helpers/capabilities.py index 82e33f7e6..f5b5fa305 100644 --- a/singer_sdk/helpers/capabilities.py +++ b/singer_sdk/helpers/capabilities.py @@ -2,8 +2,8 @@ from __future__ import annotations +import typing as t from enum import Enum, EnumMeta -from typing import Any, TypeVar from warnings import warn from singer_sdk.typing import ( @@ -12,9 +12,10 @@ ObjectType, PropertiesList, Property, + StringType, ) -_EnumMemberT = TypeVar("_EnumMemberT") +_EnumMemberT = t.TypeVar("_EnumMemberT") # Default JSON Schema to support config for built-in capabilities: @@ -22,9 +23,11 @@ Property( "stream_maps", ObjectType(), - description="Config object for stream maps capability. " - + "For more information check out " - + "[Stream Maps](https://sdk.meltano.com/en/latest/stream_maps.html).", + description=( + "Config object for stream maps capability. " + "For more information check out " + "[Stream Maps](https://sdk.meltano.com/en/latest/stream_maps.html)." + ), ), Property( "stream_map_config", @@ -47,6 +50,62 @@ description="The max depth to flatten schemas.", ), ).to_dict() +BATCH_CONFIG = PropertiesList( + Property( + "batch_config", + description="", + wrapped=ObjectType( + Property( + "encoding", + description="Specifies the format and compression of the batch files.", + wrapped=ObjectType( + Property( + "format", + StringType, + allowed_values=["jsonl"], + description="Format to use for batch files.", + ), + Property( + "compression", + StringType, + allowed_values=["gzip", "none"], + description="Compression format to use for batch files.", + ), + ), + ), + Property( + "storage", + description="Defines the storage layer to use when writing batch files", + wrapped=ObjectType( + Property( + "root", + StringType, + description="Root path to use when writing batch files.", + ), + Property( + "prefix", + StringType, + description="Prefix to use when writing batch files.", + ), + ), + ), + ), + ), +).to_dict() +TARGET_SCHEMA_CONFIG = PropertiesList( + Property( + "default_target_schema", + StringType(), + description="The default target database schema name to use for all streams.", + ), +).to_dict() +ADD_RECORD_METADATA_CONFIG = PropertiesList( + Property( + "add_record_metadata", + BooleanType(), + description="Add metadata to records.", + ), +).to_dict() class DeprecatedEnum(Enum): @@ -93,7 +152,7 @@ def emit_warning(self) -> None: class DeprecatedEnumMeta(EnumMeta): """Metaclass for enumeration with deprecation support.""" - def __getitem__(self, name: str) -> Any: # noqa: ANN401 + def __getitem__(self, name: str) -> t.Any: # noqa: ANN401 """Retrieve mapping item. Args: @@ -107,7 +166,7 @@ def __getitem__(self, name: str) -> Any: # noqa: ANN401 obj.emit_warning() return obj - def __getattribute__(cls, name: str) -> Any: # noqa: ANN401 + def __getattribute__(cls, name: str) -> t.Any: # noqa: ANN401, N805 """Retrieve enum attribute. Args: @@ -121,7 +180,7 @@ def __getattribute__(cls, name: str) -> Any: # noqa: ANN401 obj.emit_warning() return obj - def __call__(self, *args: Any, **kwargs: Any) -> Any: # noqa: ANN401 + def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any: # noqa: ANN401 """Call enum member. Args: diff --git a/singer_sdk/helpers/jsonpath.py b/singer_sdk/helpers/jsonpath.py index 8c7845cd6..9e2956f19 100644 --- a/singer_sdk/helpers/jsonpath.py +++ b/singer_sdk/helpers/jsonpath.py @@ -1,15 +1,20 @@ """JSONPath helpers.""" -from typing import Any, Generator, Union +from __future__ import annotations + +import typing as t -import jsonpath_ng import memoization from jsonpath_ng.ext import parse +if t.TYPE_CHECKING: + import jsonpath_ng + def extract_jsonpath( - expression: str, input: Union[dict, list] -) -> Generator[Any, None, None]: + expression: str, + input: dict | list, # noqa: A002 +) -> t.Generator[t.Any, None, None]: """Extract records from an input based on a JSONPath expression. Args: diff --git a/singer_sdk/internal/__init__.py b/singer_sdk/internal/__init__.py new file mode 100644 index 000000000..e143e7773 --- /dev/null +++ b/singer_sdk/internal/__init__.py @@ -0,0 +1 @@ +"""Internal utilities for the Singer SDK.""" diff --git a/singer_sdk/io_base.py b/singer_sdk/io_base.py index f5da20d19..07da6e63e 100644 --- a/singer_sdk/io_base.py +++ b/singer_sdk/io_base.py @@ -3,33 +3,24 @@ from __future__ import annotations import abc -import enum +import decimal import json import logging import sys +import typing as t from collections import Counter, defaultdict -from typing import IO -from typing import Counter as CounterType +from singer_sdk._singerlib import SingerMessageType from singer_sdk.helpers._compat import final logger = logging.getLogger(__name__) -class SingerMessageType(str, enum.Enum): - """Singer specification message types.""" - - RECORD = "RECORD" - SCHEMA = "SCHEMA" - STATE = "STATE" - ACTIVATE_VERSION = "ACTIVATE_VERSION" - - class SingerReader(metaclass=abc.ABCMeta): """Interface for all plugins reading Singer messages from stdin.""" @final - def listen(self, file_input: IO[str] | None = None) -> None: + def listen(self, file_input: t.IO[str] | None = None) -> None: """Read from input until all messages are processed. Args: @@ -56,11 +47,31 @@ def _assert_line_requires(line_dict: dict, requires: set[str]) -> None: """ if not requires.issubset(line_dict): missing = requires - set(line_dict) - raise Exception( - f"Line is missing required {', '.join(missing)} key(s): {line_dict}" + msg = f"Line is missing required {', '.join(missing)} key(s): {line_dict}" + raise Exception(msg) + + def deserialize_json(self, line: str) -> dict: + """Deserialize a line of json. + + Args: + line: A single line of json. + + Returns: + A dictionary of the deserialized json. + + Raises: + json.decoder.JSONDecodeError: raised if any lines are not valid json + """ + try: + return json.loads( # type: ignore[no-any-return] + line, + parse_float=decimal.Decimal, ) + except json.decoder.JSONDecodeError as exc: + logger.error("Unable to parse:\n%s", line, exc_info=exc) + raise - def _process_lines(self, file_input: IO[str]) -> CounterType[str]: + def _process_lines(self, file_input: t.IO[str]) -> t.Counter[str]: """Internal method to process jsonl lines from a Singer tap. Args: @@ -68,18 +79,10 @@ def _process_lines(self, file_input: IO[str]) -> CounterType[str]: Returns: A counter object for the processed lines. - - Raises: - json.decoder.JSONDecodeError: raised if any lines are not valid json """ stats: dict[str, int] = defaultdict(int) for line in file_input: - try: - line_dict = json.loads(line) - except json.decoder.JSONDecodeError as exc: - logger.error("Unable to parse:\n%s", line, exc_info=exc) - raise - + line_dict = self.deserialize_json(line) self._assert_line_requires(line_dict, requires={"type"}) record_type: SingerMessageType = line_dict["type"] @@ -95,6 +98,9 @@ def _process_lines(self, file_input: IO[str]) -> CounterType[str]: elif record_type == SingerMessageType.STATE: self._process_state_message(line_dict) + elif record_type == SingerMessageType.BATCH: + self._process_batch_message(line_dict) + else: self._process_unknown_message(line_dict) @@ -118,6 +124,10 @@ def _process_state_message(self, message_dict: dict) -> None: def _process_activate_version_message(self, message_dict: dict) -> None: ... + @abc.abstractmethod + def _process_batch_message(self, message_dict: dict) -> None: + ... + def _process_unknown_message(self, message_dict: dict) -> None: """Internal method to process unknown message types from a Singer tap. @@ -128,7 +138,8 @@ def _process_unknown_message(self, message_dict: dict) -> None: ValueError: raised if a message type is not recognized """ record_type = message_dict["type"] - raise ValueError(f"Unknown message type '{record_type}' in message.") + msg = f"Unknown message type '{record_type}' in message." + raise ValueError(msg) def _process_endofpipe(self) -> None: - pass + logger.debug("End of pipe reached") diff --git a/singer_sdk/mapper.py b/singer_sdk/mapper.py index c94a2d34d..031ca0c82 100644 --- a/singer_sdk/mapper.py +++ b/singer_sdk/mapper.py @@ -7,9 +7,10 @@ import abc import copy +import datetime import hashlib import logging -from typing import Any, Callable +import typing as t from singer_sdk.exceptions import MapExpressionError, StreamMapConfigError from singer_sdk.helpers import _simpleeval as simpleeval @@ -20,7 +21,6 @@ flatten_schema, get_flattening_options, ) -from singer_sdk.helpers._singer import Catalog from singer_sdk.typing import ( CustomType, IntegerType, @@ -31,23 +31,38 @@ StringType, ) +if t.TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 10): + from typing import TypeAlias # noqa: ICN003 + else: + from typing_extensions import TypeAlias + + from singer_sdk._singerlib.catalog import Catalog + + MAPPER_ELSE_OPTION = "__else__" MAPPER_FILTER_OPTION = "__filter__" MAPPER_SOURCE_OPTION = "__source__" MAPPER_ALIAS_OPTION = "__alias__" MAPPER_KEY_PROPERTIES_OPTION = "__key_properties__" +NULL_STRING = "__NULL__" -def md5(input: str) -> str: +def md5(string: str) -> str: """Digest a string using MD5. This is a function for inline calculations. Args: - input: String to digest. + string: String to digest. Returns: A string digested into MD5. """ - return hashlib.md5(input.encode("utf-8")).hexdigest() + return hashlib.md5(string.encode("utf-8")).hexdigest() # noqa: S324 + + +StreamMapsDict: TypeAlias = t.Dict[str, t.Union[str, dict, None]] class StreamMap(metaclass=abc.ABCMeta): @@ -174,14 +189,10 @@ def transform(self, record: dict) -> None: Args: record: An individual record dictionary in a stream. - - Returns: - None """ _ = record # Drop the record - return None - def get_filter_result(self, record: dict) -> bool: + def get_filter_result(self, record: dict) -> bool: # noqa: ARG002 """Exclude all records. Args: @@ -207,7 +218,7 @@ def transform(self, record: dict) -> dict | None: """ return super().transform(record) - def get_filter_result(self, record: dict) -> bool: + def get_filter_result(self, record: dict) -> bool: # noqa: ARG002 """Return True (always include). Args: @@ -249,8 +260,8 @@ def __init__( ) self.map_config = map_config - self._transform_fn: Callable[[dict], dict | None] - self._filter_fn: Callable[[dict], bool] + self._transform_fn: t.Callable[[dict], dict | None] + self._filter_fn: t.Callable[[dict], bool] ( self._filter_fn, self._transform_fn, @@ -284,18 +295,22 @@ def get_filter_result(self, record: dict) -> bool: return self._filter_fn(record) @property - def functions(self) -> dict[str, Callable]: + def functions(self) -> dict[str, t.Callable]: """Get availabale transformation functions. Returns: Functions which should be available for expression evaluation. """ - funcs: dict[str, Any] = simpleeval.DEFAULT_FUNCTIONS.copy() + funcs: dict[str, t.Any] = simpleeval.DEFAULT_FUNCTIONS.copy() funcs["md5"] = md5 + funcs["datetime"] = datetime return funcs def _eval( - self, expr: str, record: dict, property_name: str | None + self, + expr: str, + record: dict, + property_name: str | None, ) -> str | int | float: """Solve an expression. @@ -319,28 +334,38 @@ def _eval( names["self"] = record[property_name] try: result: str | int | float = simpleeval.simple_eval( - expr, functions=self.functions, names=names + expr, + functions=self.functions, + names=names, ) - logging.debug(f"Eval result: {expr} = {result}") - except Exception as ex: - raise MapExpressionError( - f"Failed to evaluate simpleeval expressions {expr}." - ) from ex + except (simpleeval.InvalidExpression, SyntaxError) as ex: + msg = f"Failed to evaluate simpleeval expressions {expr}." + raise MapExpressionError(msg) from ex + + logging.debug("Eval result: %s = %s", expr, result) + return result def _eval_type( - self, expr: str, default: JSONTypeHelper | None = None + self, + expr: str, + default: JSONTypeHelper | None = None, ) -> JSONTypeHelper: """Evaluate an expression's type. Args: expr: String expression to evaluate. - default: TODO. + default: Default type. Returns: - TODO + The evaluated expression's type. + + Raises: + ValueError: If the expression is ``None``. """ - assert expr is not None, "Expression should be str, not None" + if expr is None: + msg = "Expression should be str, not None" + raise ValueError(msg) default = default or StringType() @@ -358,9 +383,10 @@ def _eval_type( return default - def _init_functions_and_schema( - self, stream_map: dict - ) -> tuple[Callable[[dict], bool], Callable[[dict], dict | None], dict]: + def _init_functions_and_schema( # noqa: PLR0912, PLR0915, C901 + self, + stream_map: dict, + ) -> tuple[t.Callable[[dict], bool], t.Callable[[dict], dict | None], dict]: """Return a tuple: filter_fn, transform_fn, transformed_schema. Args: @@ -379,29 +405,36 @@ def _init_functions_and_schema( include_by_default = True if stream_map and MAPPER_FILTER_OPTION in stream_map: filter_rule = stream_map.pop(MAPPER_FILTER_OPTION) - logging.info(f"Found '{self.stream_alias}' filter rule: {filter_rule}") + logging.info( + "Found '%s' filter rule: %s", + self.stream_alias, + filter_rule, + ) if stream_map and MAPPER_KEY_PROPERTIES_OPTION in stream_map: self.transformed_key_properties: list[str] = stream_map.pop( - MAPPER_KEY_PROPERTIES_OPTION + MAPPER_KEY_PROPERTIES_OPTION, ) logging.info( - f"Found stream map override for '{self.stream_alias}' key properties: " - f"{str(self.transformed_key_properties)}" + "Found stream map override for '%s' key properties: %s", + self.stream_alias, + self.transformed_key_properties, ) if stream_map and MAPPER_ELSE_OPTION in stream_map: - if stream_map[MAPPER_ELSE_OPTION] is None: + if stream_map[MAPPER_ELSE_OPTION] in {None, NULL_STRING}: logging.info( - f"Detected `{MAPPER_ELSE_OPTION}=None` rule. " - "Unmapped, non-key properties will be excluded from output." + "Detected `%s=None` rule. " + "Unmapped, non-key properties will be excluded from output.", + MAPPER_ELSE_OPTION, ) include_by_default = False else: - raise NotImplementedError( + msg = ( f"Option '{MAPPER_ELSE_OPTION}={stream_map[MAPPER_ELSE_OPTION]}' " "is not supported." ) + raise NotImplementedError(msg) stream_map.pop(MAPPER_ELSE_OPTION) # Transform the schema as needed @@ -415,20 +448,24 @@ def _init_functions_and_schema( transformed_schema["properties"] = {} for prop_key, prop_def in list(stream_map.items()): - if prop_def is None: + if prop_def in {None, NULL_STRING}: if prop_key in (self.transformed_key_properties or []): - raise StreamMapConfigError( + msg = ( f"Removing key property '{prop_key}' is not permitted in " f"'{self.stream_alias}' stream map config. To remove a key " - "property, use the `__key_properties__` operator " - "to specify either a new list of key property names or `null` " - "to replicate with no key properties in the stream." + "property, use the `__key_properties__` operator to specify " + "either a new list of key property names or `null` to " + "replicate with no key properties in the stream." ) + raise StreamMapConfigError(msg) transformed_schema["properties"].pop(prop_key, None) elif isinstance(prop_def, str): default_type: JSONTypeHelper = StringType() # Fallback to string - existing_schema: dict = transformed_schema["properties"].get( - prop_key, {} + existing_schema: dict = ( + # Use transformed schema if available + transformed_schema["properties"].get(prop_key, {}) + # ...or original schema for passthrough + or self.raw_schema["properties"].get(prop_def, {}) ) if existing_schema: # Set default type if property exists already in JSON Schema @@ -436,36 +473,42 @@ def _init_functions_and_schema( transformed_schema["properties"].update( Property( - prop_key, self._eval_type(prop_def, default=default_type) - ).to_dict() + prop_key, + self._eval_type(prop_def, default=default_type), + ).to_dict(), ) else: - raise StreamMapConfigError( - f"Unexpected type '{type(prop_def).__name__}' in stream map " - f"for '{self.stream_alias}:{prop_key}'." + msg = ( + f"Unexpected type '{type(prop_def).__name__}' in stream map for " + f"'{self.stream_alias}:{prop_key}'." ) + raise StreamMapConfigError(msg) for key_property in self.transformed_key_properties or []: if key_property not in transformed_schema["properties"]: - raise StreamMapConfigError( + msg = ( f"Invalid key properties for '{self.stream_alias}': " - f"[{','.join(self.transformed_key_properties)}]. " - f"Property '{key_property}' was not detected in schema." + f"[{','.join(self.transformed_key_properties)}]. Property " + f"'{key_property}' was not detected in schema." ) + raise StreamMapConfigError(msg) if self.flattening_enabled: transformed_schema = self.flatten_schema(transformed_schema) # Declare function variables - def eval_filter(filter_rule: str) -> Callable[[dict], bool]: + def eval_filter(filter_rule: str) -> t.Callable[[dict], bool]: def _inner(record: dict) -> bool: filter_result = self._eval( - expr=filter_rule, record=record, property_name=None + expr=filter_rule, + record=record, + property_name=None, ) logging.debug( - f"Filter result for '{filter_rule}' " - "in '{self.name}' stream: {filter_result}" + "Filter result for '%s' in '{self.name}' stream: %s", + filter_rule, + filter_result, ) if not filter_result: logging.debug("Excluding record due to filter.") @@ -484,10 +527,11 @@ def always_true(record: dict) -> bool: elif filter_rule is None: filter_fn = always_true else: - raise StreamMapConfigError( + msg = ( f"Unexpected filter rule type '{type(filter_rule).__name__}' in " - f"expression {str(filter_rule)}. Expected 'str' or 'None'." + f"expression {filter_rule!s}. Expected 'str' or 'None'." ) + raise StreamMapConfigError(msg) def transform_fn(record: dict) -> dict | None: nonlocal include_by_default, stream_map @@ -505,7 +549,7 @@ def transform_fn(record: dict) -> dict | None: result[key_property] = record[key_property] for prop_key, prop_def in list(stream_map.items()): - if prop_def is None: + if prop_def in {None, NULL_STRING}: # Remove property from result result.pop(prop_key, None) continue @@ -513,14 +557,17 @@ def transform_fn(record: dict) -> dict | None: if isinstance(prop_def, str): # Apply property transform result[prop_key] = self._eval( - expr=prop_def, record=record, property_name=prop_key + expr=prop_def, + record=record, + property_name=prop_key, ) continue - raise StreamMapConfigError( - f"Unexpected mapping type '{type(prop_def).__name__}' in " - f"map expression '{prop_def}'. Expected 'str' or 'None'." + msg = ( + f"Unexpected mapping type '{type(prop_def).__name__}' " + f"in map expression '{prop_def}'. Expected 'str' or 'None'." ) + raise StreamMapConfigError(msg) return result @@ -532,7 +579,7 @@ class PluginMapper: def __init__( self, - plugin_config: dict[str, dict[str, str | dict]], + plugin_config: dict[str, StreamMapsDict], logger: logging.Logger, ) -> None: """Initialize mapper. @@ -550,32 +597,32 @@ def __init__( self.default_mapper_type: type[DefaultStreamMap] = SameRecordTransform self.logger = logger - self.stream_maps_dict: dict[str, str | dict] = plugin_config.get( - "stream_maps", {} - ) + self.stream_maps_dict: StreamMapsDict = plugin_config.get("stream_maps", {}) if MAPPER_ELSE_OPTION in self.stream_maps_dict: - if self.stream_maps_dict[MAPPER_ELSE_OPTION] is None: + if self.stream_maps_dict[MAPPER_ELSE_OPTION] in {None, NULL_STRING}: logging.info( - f"Found '{MAPPER_ELSE_OPTION}=None' default mapper. " - "Unmapped streams will be excluded from output." + "Found '%s=None' default mapper. " + "Unmapped streams will be excluded from output.", + MAPPER_ELSE_OPTION, ) self.default_mapper_type = RemoveRecordTransform self.stream_maps_dict.pop(MAPPER_ELSE_OPTION) else: - raise StreamMapConfigError( + msg = ( f"Undefined transform for '{MAPPER_ELSE_OPTION}'' case: " f"{self.stream_maps_dict[MAPPER_ELSE_OPTION]}" ) + raise StreamMapConfigError(msg) else: logging.debug( - f"Operator '{MAPPER_ELSE_OPTION}=None' was not found. " - "Unmapped streams will be included in output." + "Operator '%s=None' was not found. " + "Unmapped streams will be included in output.", + MAPPER_ELSE_OPTION, ) for stream_map_key, stream_def in self.stream_maps_dict.items(): if stream_map_key.startswith("__"): - raise StreamMapConfigError( - f"Option '{stream_map_key}:{stream_def}' is not expected." - ) + msg = f"Option '{stream_map_key}:{stream_def}' is not expected." + raise StreamMapConfigError(msg) def register_raw_streams_from_catalog(self, catalog: Catalog) -> None: """Register all streams as described in the catalog dict. @@ -595,8 +642,11 @@ def register_raw_streams_from_catalog(self, catalog: Catalog) -> None: catalog_entry.key_properties, ) - def register_raw_stream_schema( - self, stream_name: str, schema: dict, key_properties: list[str] | None + def register_raw_stream_schema( # noqa: PLR0912, C901 + self, + stream_name: str, + schema: dict, + key_properties: list[str] | None, ) -> None: """Register a new stream as described by its name and schema. @@ -629,22 +679,26 @@ def register_raw_stream_schema( schema, key_properties, flattening_options=self.flattening_options, - ) + ), ] - for stream_map_key, stream_def in self.stream_maps_dict.items(): + for stream_map_key, stream_map_val in self.stream_maps_dict.items(): + stream_def = ( + stream_map_val.copy() + if isinstance(stream_map_val, dict) + else stream_map_val + ) stream_alias: str = stream_map_key source_stream: str = stream_map_key - if isinstance(stream_def, str): + if isinstance(stream_def, str) and stream_def != NULL_STRING: if stream_name == stream_map_key: # TODO: Add any expected cases for str expressions (currently none) pass - raise StreamMapConfigError( - f"Option '{stream_map_key}:{stream_def}' is not expected." - ) + msg = f"Option '{stream_map_key}:{stream_def}' is not expected." + raise StreamMapConfigError(msg) - if stream_def is None: + if stream_def is None or stream_def == NULL_STRING: if stream_name != stream_map_key: continue @@ -654,14 +708,15 @@ def register_raw_stream_schema( key_properties=None, flattening_options=self.flattening_options, ) - logging.info(f"Set null tansform as default for '{stream_name}'") + logging.info("Set null tansform as default for '%s'", stream_name) continue if not isinstance(stream_def, dict): - raise StreamMapConfigError( - "Unexpected stream definition type. Expected str, dict, or None. " + msg = ( + f"Unexpected stream definition type. Expected str, dict, or None. " f"Got '{type(stream_def).__name__}'." ) + raise StreamMapConfigError(msg) if MAPPER_SOURCE_OPTION in stream_def: source_stream = stream_def.pop(MAPPER_SOURCE_OPTION) diff --git a/singer_sdk/mapper_base.py b/singer_sdk/mapper_base.py index c09d39255..b0be198bd 100644 --- a/singer_sdk/mapper_base.py +++ b/singer_sdk/mapper_base.py @@ -1,14 +1,13 @@ """Abstract base class for stream mapper plugins.""" +from __future__ import annotations + import abc -from io import FileIO -from typing import Callable, Iterable, List, Tuple +import typing as t import click -import singer -from singer_sdk.cli import common_options -from singer_sdk.configuration._dict_config import merge_config_sources +import singer_sdk._singerlib as singer from singer_sdk.helpers._classproperty import classproperty from singer_sdk.helpers.capabilities import CapabilitiesEnum, PluginCapabilities from singer_sdk.io_base import SingerReader @@ -19,11 +18,7 @@ class InlineMapper(PluginBase, SingerReader, metaclass=abc.ABCMeta): """Abstract base class for inline mappers.""" @classproperty - def _env_prefix(cls) -> str: - return f"{cls.name.upper().replace('-', '_')}_" - - @classproperty - def capabilities(self) -> List[CapabilitiesEnum]: + def capabilities(self) -> list[CapabilitiesEnum]: """Get capabilities. Returns: @@ -34,7 +29,7 @@ def capabilities(self) -> List[CapabilitiesEnum]: ] @staticmethod - def _write_messages(messages: Iterable[singer.Message]) -> None: + def _write_messages(messages: t.Iterable[singer.Message]) -> None: for message in messages: singer.write_message(message) @@ -50,8 +45,11 @@ def _process_state_message(self, message_dict: dict) -> None: def _process_activate_version_message(self, message_dict: dict) -> None: self._write_messages(self.map_activate_version_message(message_dict)) + def _process_batch_message(self, message_dict: dict) -> None: + self._write_messages(self.map_batch_message(message_dict)) + @abc.abstractmethod - def map_schema_message(self, message_dict: dict) -> Iterable[singer.Message]: + def map_schema_message(self, message_dict: dict) -> t.Iterable[singer.Message]: """Map a schema message to zero or more new messages. Args: @@ -60,7 +58,7 @@ def map_schema_message(self, message_dict: dict) -> Iterable[singer.Message]: ... @abc.abstractmethod - def map_record_message(self, message_dict: dict) -> Iterable[singer.Message]: + def map_record_message(self, message_dict: dict) -> t.Iterable[singer.Message]: """Map a record message to zero or more new messages. Args: @@ -69,7 +67,7 @@ def map_record_message(self, message_dict: dict) -> Iterable[singer.Message]: ... @abc.abstractmethod - def map_state_message(self, message_dict: dict) -> Iterable[singer.Message]: + def map_state_message(self, message_dict: dict) -> t.Iterable[singer.Message]: """Map a state message to zero or more new messages. Args: @@ -81,7 +79,7 @@ def map_state_message(self, message_dict: dict) -> Iterable[singer.Message]: def map_activate_version_message( self, message_dict: dict, - ) -> Iterable[singer.Message]: + ) -> t.Iterable[singer.Message]: """Map a version message to zero or more new messages. Args: @@ -89,68 +87,69 @@ def map_activate_version_message( """ ... - @classproperty - def cli(cls) -> Callable: + def map_batch_message( + self, + message_dict: dict, # noqa: ARG002 + ) -> t.Iterable[singer.Message]: + """Map a batch message to zero or more new messages. + + Args: + message_dict: A BATCH message JSON dictionary. + + Raises: + NotImplementedError: if not implemented by subclass. + """ + msg = "BATCH messages are not supported by mappers." + raise NotImplementedError(msg) + + # CLI handler + + @classmethod + def invoke( # type: ignore[override] + cls: type[InlineMapper], + *, + about: bool = False, + about_format: str | None = None, + config: tuple[str, ...] = (), + file_input: t.IO[str] | None = None, + ) -> None: + """Invoke the mapper. + + Args: + about: Display package metadata and settings. + about_format: Specify output style for `--about`. + config: Configuration file location or 'ENV' to use environment + variables. Accepts multiple inputs as a tuple. + file_input: Optional file to read input from. + """ + super().invoke(about=about, about_format=about_format) + cls.print_version(print_fn=cls.logger.info) + config_files, parse_env_config = cls.config_from_cli_args(*config) + + mapper = cls( + config=config_files, # type: ignore[arg-type] + validate_config=True, + parse_env_config=parse_env_config, + ) + mapper.listen(file_input) + + @classmethod + def get_singer_command(cls: type[InlineMapper]) -> click.Command: """Execute standard CLI handler for inline mappers. Returns: - A callable CLI object. + A click.Command object. """ - - @common_options.PLUGIN_VERSION - @common_options.PLUGIN_ABOUT - @common_options.PLUGIN_ABOUT_FORMAT - @common_options.PLUGIN_CONFIG - @common_options.PLUGIN_FILE_INPUT - @click.command( - help="Execute the Singer mapper.", - context_settings={"help_option_names": ["--help"]}, + command = super().get_singer_command() + command.help = "Execute the Singer mapper." + command.params.extend( + [ + click.Option( + ["--input", "file_input"], + help="A path to read messages from instead of from standard in.", + type=click.File("r"), + ), + ], ) - def cli( - version: bool = False, - about: bool = False, - config: Tuple[str, ...] = (), - format: str = None, - file_input: FileIO = None, - ) -> None: - """Handle command line execution. - - Args: - version: Display the package version. - about: Display package metadata and settings. - format: Specify output style for `--about`. - config: Configuration file location or 'ENV' to use environment - variables. Accepts multiple inputs as a tuple. - file_input: Specify a path to an input file to read messages from. - Defaults to standard in if unspecified. - """ - if version: - cls.print_version() - return - - if not about: - cls.print_version(print_fn=cls.logger.info) - - validate_config: bool = True - if about: - validate_config = False - - cls.print_version(print_fn=cls.logger.info) - - config_dict = merge_config_sources( - config, - cls.config_jsonschema, - cls._env_prefix, - ) - - mapper = cls( # type: ignore # Ignore 'type not callable' - config=config_dict, - validate_config=validate_config, - ) - - if about: - mapper.print_about(format) - else: - mapper.listen(file_input) - - return cli + + return command diff --git a/singer_sdk/metrics.py b/singer_sdk/metrics.py new file mode 100644 index 000000000..89d51a338 --- /dev/null +++ b/singer_sdk/metrics.py @@ -0,0 +1,418 @@ +"""Singer metrics logging.""" + +from __future__ import annotations + +import abc +import enum +import json +import logging +import logging.config +import os +import typing as t +from dataclasses import dataclass, field +from pathlib import Path +from time import time + +import yaml + +from singer_sdk.helpers._resources import Traversable, get_package_files + +if t.TYPE_CHECKING: + from types import TracebackType + +DEFAULT_LOG_INTERVAL = 60.0 +METRICS_LOGGER_NAME = __name__ +METRICS_LOG_LEVEL_SETTING = "metrics_log_level" + +_TVal = t.TypeVar("_TVal") + + +class Status(str, enum.Enum): + """Constants for commonly used status values.""" + + SUCCEEDED = "succeeded" + FAILED = "failed" + + +class Tag(str, enum.Enum): + """Constants for commonly used tags.""" + + STREAM = "stream" + CONTEXT = "context" + ENDPOINT = "endpoint" + JOB_TYPE = "job_type" + HTTP_STATUS_CODE = "http_status_code" + STATUS = "status" + + +class Metric(str, enum.Enum): + """Common metric types.""" + + RECORD_COUNT = "record_count" + BATCH_COUNT = "batch_count" + HTTP_REQUEST_DURATION = "http_request_duration" + HTTP_REQUEST_COUNT = "http_request_count" + JOB_DURATION = "job_duration" + SYNC_DURATION = "sync_duration" + + +@dataclass +class Point(t.Generic[_TVal]): + """An individual metric measurement.""" + + metric_type: str + metric: Metric + value: _TVal + tags: dict[str, t.Any] = field(default_factory=dict) + + def __str__(self) -> str: + """Get string representation of this measurement. + + Returns: + A string representation of this measurement. + """ + return self.to_json() + + def to_json(self) -> str: + """Convert this measure to a JSON object. + + Returns: + A JSON object. + """ + return json.dumps( + { + "type": self.metric_type, + "metric": self.metric.value, + "value": self.value, + "tags": self.tags, + }, + default=str, + ) + + +def log(logger: logging.Logger, point: Point) -> None: + """Log a measurement. + + Args: + logger: An logger instance. + point: A measurement. + """ + logger.info("METRIC: %s", point) + + +class Meter(metaclass=abc.ABCMeta): + """Base class for all meters.""" + + def __init__(self, metric: Metric, tags: dict | None = None) -> None: + """Initialize a meter. + + Args: + metric: The metric type. + tags: Tags to add to the measurement. + """ + self.metric = metric + self.tags = tags or {} + self.logger = get_metrics_logger() + + @property + def context(self) -> dict | None: + """Get the context for this meter. + + Returns: + A context dictionary. + """ + return self.tags.get(Tag.CONTEXT) + + @context.setter + def context(self, value: dict | None) -> None: + """Set the context for this meter. + + Args: + value: A context dictionary. + """ + if value is None: + self.tags.pop(Tag.CONTEXT, None) + else: + self.tags[Tag.CONTEXT] = value + + @abc.abstractmethod + def __enter__(self) -> Meter: + """Enter the meter context.""" + ... + + @abc.abstractmethod + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + """Exit the meter context. + + Args: + exc_type: The exception type. + exc_val: The exception value. + exc_tb: The exception traceback. + """ + ... + + +class Counter(Meter): + """A meter for counting things.""" + + def __init__( + self, + metric: Metric, + tags: dict | None = None, + log_interval: float = DEFAULT_LOG_INTERVAL, + ) -> None: + """Initialize a counter. + + Args: + metric: The metric type. + tags: Tags to add to the measurement. + log_interval: The interval at which to log the count. + """ + super().__init__(metric, tags) + self.value = 0 + self.log_interval = log_interval + self.last_log_time = time() + + def __enter__(self) -> Counter: + """Enter the counter context. + + Returns: + The counter instance. + """ + self.last_log_time = time() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + """Exit the counter context. + + Args: + exc_type: The exception type. + exc_val: The exception value. + exc_tb: The exception traceback. + """ + self._pop() + + def _pop(self) -> None: + """Log and reset the counter.""" + log(self.logger, Point("counter", self.metric, self.value, self.tags)) + self.value = 0 + self.last_log_time = time() + + def increment(self, value: int = 1) -> None: + """Increment the counter. + + Args: + value: The value to increment by. + """ + self.value += value + if self._ready_to_log(): + self._pop() + + def _ready_to_log(self) -> bool: + """Check if the counter is ready to log. + + Returns: + True if the counter is ready to log. + """ + return time() - self.last_log_time > self.log_interval + + +class Timer(Meter): + """A meter for timing things.""" + + def __init__(self, metric: Metric, tags: dict | None = None) -> None: + """Initialize a timer. + + Args: + metric: The metric type. + tags: Tags to add to the measurement. + """ + super().__init__(metric, tags) + self.start_time = time() + + def __enter__(self) -> Timer: + """Enter the timer context. + + Returns: + The timer instance. + """ + self.start_time = time() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + """Exit the timer context. + + Args: + exc_type: The exception type. + exc_val: The exception value. + exc_tb: The exception traceback. + """ + if Tag.STATUS not in self.tags: + if exc_type is None: + self.tags[Tag.STATUS] = Status.SUCCEEDED + else: + self.tags[Tag.STATUS] = Status.FAILED + log(self.logger, Point("timer", self.metric, self.elapsed(), self.tags)) + + def elapsed(self) -> float: + """Get the elapsed time. + + Returns: + The elapsed time. + """ + return time() - self.start_time + + +def get_metrics_logger() -> logging.Logger: + """Get a logger for emitting metrics. + + Returns: + A logger that can be used to emit metrics. + """ + return logging.getLogger(METRICS_LOGGER_NAME) + + +def record_counter( + stream: str, + endpoint: str | None = None, + log_interval: float = DEFAULT_LOG_INTERVAL, + **tags: t.Any, +) -> Counter: + """Use for counting records retrieved from the source. + + with record_counter("my_stream", endpoint="/users") as counter: + for record in my_records: + # Do something with the record + counter.increment() + + Args: + stream: The stream name. + endpoint: The endpoint name. + log_interval: The interval at which to log the count. + tags: Tags to add to the measurement. + + Returns: + A counter for counting records. + """ + tags[Tag.STREAM] = stream + if endpoint: + tags[Tag.ENDPOINT] = endpoint + return Counter(Metric.RECORD_COUNT, tags, log_interval=log_interval) + + +def batch_counter(stream: str, **tags: t.Any) -> Counter: + """Use for counting batches sent to the target. + + with batch_counter("my_stream") as counter: + for batch in my_batches: + # Do something with the batch + counter.increment() + + Args: + stream: The stream name. + tags: Tags to add to the measurement. + + Returns: + A counter for counting batches. + """ + tags[Tag.STREAM] = stream + return Counter(Metric.BATCH_COUNT, tags) + + +def http_request_counter( + stream: str, + endpoint: str, + log_interval: float = DEFAULT_LOG_INTERVAL, + **tags: t.Any, +) -> Counter: + """Use for counting HTTP requests. + + with http_request_counter() as counter: + for record in my_records: + # Do something with the record + counter.increment() + + Args: + stream: The stream name. + endpoint: The endpoint name. + log_interval: The interval at which to log the count. + tags: Tags to add to the measurement. + + Returns: + A counter for counting HTTP requests. + """ + tags.update({Tag.STREAM: stream, Tag.ENDPOINT: endpoint}) + return Counter(Metric.HTTP_REQUEST_COUNT, tags, log_interval=log_interval) + + +def sync_timer(stream: str, **tags: t.Any) -> Timer: + """Use for timing the sync of a stream. + + with singer.metrics.sync_timer() as timer: + # Do something + print(f"Sync took {timer.elapsed()} seconds") + + Args: + stream: The stream name. + tags: Tags to add to the measurement. + + Returns: + A timer for timing the sync of a stream. + """ + tags[Tag.STREAM] = stream + return Timer(Metric.SYNC_DURATION, tags) + + +def _load_yaml_logging_config(path: Traversable | Path) -> t.Any: # noqa: ANN401 + """Load the logging config from the YAML file. + + Args: + path: A path to the YAML file. + + Returns: + The logging config. + """ + with path.open() as f: + return yaml.safe_load(f) + + +def _get_default_config() -> t.Any: # noqa: ANN401 + """Get a logging configuration. + + Returns: + A logging configuration. + """ + log_config_path = get_package_files("singer_sdk").joinpath("default_logging.yml") + return _load_yaml_logging_config(log_config_path) + + +def _setup_logging(config: t.Mapping[str, t.Any]) -> None: + """Setup logging. + + Args: + config: A plugin configuration dictionary. + """ + logging.config.dictConfig(_get_default_config()) + + config = config or {} + metrics_log_level = config.get(METRICS_LOG_LEVEL_SETTING, "INFO").upper() + logging.getLogger(METRICS_LOGGER_NAME).setLevel(metrics_log_level) + + if "SINGER_SDK_LOG_CONFIG" in os.environ: + log_config_path = Path(os.environ["SINGER_SDK_LOG_CONFIG"]) + logging.config.dictConfig(_load_yaml_logging_config(log_config_path)) diff --git a/singer_sdk/pagination.py b/singer_sdk/pagination.py new file mode 100644 index 000000000..f00bb0920 --- /dev/null +++ b/singer_sdk/pagination.py @@ -0,0 +1,458 @@ +"""Generic paginator classes.""" + +from __future__ import annotations + +import sys +import typing as t +from abc import ABCMeta, abstractmethod +from urllib.parse import ParseResult, urlparse + +from singer_sdk.helpers.jsonpath import extract_jsonpath + +if sys.version_info >= (3, 8): + from typing import Protocol # noqa: ICN003 +else: + from typing_extensions import Protocol + +if t.TYPE_CHECKING: + from requests import Response + +T = t.TypeVar("T") +TPageToken = t.TypeVar("TPageToken") + + +def first(iterable: t.Iterable[T]) -> T: + """Return the first element of an iterable or raise an exception. + + Args: + iterable: An iterable. + + Returns: + The first element of the iterable. + + >>> first('ABC') + 'A' + """ + return next(iter(iterable)) + + +class BaseAPIPaginator(t.Generic[TPageToken], metaclass=ABCMeta): + """An API paginator object.""" + + def __init__(self, start_value: TPageToken) -> None: + """Create a new paginator. + + Args: + start_value: Initial value. + """ + self._value: TPageToken = start_value + self._page_count = 0 + self._finished = False + self._last_seen_record: dict | None = None + + @property + def current_value(self) -> TPageToken: + """Get the current pagination value. + + Returns: + Current page value. + """ + return self._value + + @property + def finished(self) -> bool: + """Get a flag that indicates if the last page of data has been reached. + + Returns: + True if there are no more pages. + """ + return self._finished + + @property + def count(self) -> int: + """Count the number of pages traversed so far. + + Returns: + Number of pages. + """ + return self._page_count + + def __str__(self) -> str: + """Stringify this object. + + Returns: + String representation. + """ + return f"{self.__class__.__name__}<{self.current_value}>" + + def __repr__(self) -> str: + """Stringify this object. + + Returns: + String representation. + """ + return str(self) + + def advance(self, response: Response) -> None: + """Get a new page value and advance the current one. + + Args: + response: API response object. + + Raises: + RuntimeError: If a loop in pagination is detected. That is, when two + consecutive pagination tokens are identical. + """ + self._page_count += 1 + + if not self.has_more(response): + self._finished = True + return + + new_value = self.get_next(response) + + if new_value and new_value == self._value: + msg = ( + f"Loop detected in pagination. Pagination token {new_value} is " + "identical to prior token." + ) + raise RuntimeError(msg) + + # Stop if new value None, empty string, 0, etc. + if not new_value: + self._finished = True + else: + self._value = new_value + + def has_more(self, response: Response) -> bool: # noqa: ARG002 + """Override this method to check if the endpoint has any pages left. + + Args: + response: API response object. + + Returns: + Boolean flag used to indicate if the endpoint has more pages. + """ + return True + + @abstractmethod + def get_next(self, response: Response) -> TPageToken | None: + """Get the next pagination token or index from the API response. + + Args: + response: API response object. + + Returns: + The next page token or index. Return `None` from this method to indicate + the end of pagination. + """ + ... + + +class SinglePagePaginator(BaseAPIPaginator[None]): + """A paginator that does works with single-page endpoints.""" + + def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: + """Create a new paginator. + + Args: + args: Paginator positional arguments for base class. + kwargs: Paginator keyword arguments for base class. + """ + super().__init__(None, *args, **kwargs) + + def get_next(self, response: Response) -> None: # noqa: ARG002 + """Get the next pagination token or index from the API response. + + Args: + response: API response object. + + Returns: + The next page token or index. Return `None` from this method to indicate + the end of pagination. + """ + return + + +class BaseHATEOASPaginator( + BaseAPIPaginator[t.Optional[ParseResult]], + metaclass=ABCMeta, +): + """Paginator class for APIs supporting HATEOAS links in their response bodies. + + HATEOAS stands for "Hypermedia as the Engine of Application State". See + https://en.wikipedia.org/wiki/HATEOAS. + + This paginator expects responses to have a key "next" with a value + like "https://api.com/link/to/next-item". + + The :attr:`~singer_sdk.pagination.BaseAPIPaginator.current_value` attribute of + this paginator is a `urllib.parse.ParseResult`_ object. This object + contains the following attributes: + + - scheme + - netloc + - path + - params + - query + - fragment + + That means you can access and parse the query params in your stream like this: + + .. code-block:: python + + class MyHATEOASPaginator(BaseHATEOASPaginator): + def get_next_url(self, response): + return response.json().get("next") + + class MyStream(Stream): + def get_new_paginator(self): + return MyHATEOASPaginator() + + def get_url_params(self, next_page_token) -> dict: + if next_page_token: + return dict(parse_qsl(next_page_token.query)) + return {} + + .. _`urllib.parse.ParseResult`: + https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlparse + """ + + def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: + """Create a new paginator. + + Args: + args: Paginator positional arguments for base class. + kwargs: Paginator keyword arguments for base class. + """ + super().__init__(None, *args, **kwargs) + + @abstractmethod + def get_next_url(self, response: Response) -> str | None: + """Override this method to extract a HATEOAS link from the response. + + Args: + response: API response object. + """ + ... + + def get_next(self, response: Response) -> ParseResult | None: + """Get the next pagination token or index from the API response. + + Args: + response: API response object. + + Returns: + A parsed HATEOAS link if the response has one, otherwise `None`. + """ + next_url = self.get_next_url(response) + return urlparse(next_url) if next_url else None + + +class HeaderLinkPaginator(BaseHATEOASPaginator): + """Paginator class for APIs supporting HATEOAS links in their headers. + + Links: + - https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link + - https://datatracker.ietf.org/doc/html/rfc8288#section-3 + """ + + def get_next_url(self, response: Response) -> str | None: + """Override this method to extract a HATEOAS link from the response. + + Args: + response: API response object. + + Returns: + A HATEOAS link parsed from the response headers. + """ + url: str | None = response.links.get("next", {}).get("url") + return url + + +class JSONPathPaginator(BaseAPIPaginator[t.Optional[str]]): + """Paginator class for APIs returning a pagination token in the response body.""" + + def __init__( + self, + jsonpath: str, + *args: t.Any, + **kwargs: t.Any, + ) -> None: + """Create a new paginator. + + Args: + jsonpath: A JSONPath expression. + args: Paginator positional arguments for base class. + kwargs: Paginator keyword arguments for base class. + """ + super().__init__(None, *args, **kwargs) + self._jsonpath = jsonpath + + def get_next(self, response: Response) -> str | None: + """Get the next page token. + + Args: + response: API response object. + + Returns: + The next page token. + """ + all_matches = extract_jsonpath(self._jsonpath, response.json()) + return next(all_matches, None) + + +class SimpleHeaderPaginator(BaseAPIPaginator[t.Optional[str]]): + """Paginator class for APIs returning a pagination token in the response headers.""" + + def __init__( + self, + key: str, + *args: t.Any, + **kwargs: t.Any, + ) -> None: + """Create a new paginator. + + Args: + key: Header key that contains the next page token. + args: Paginator positional arguments for base class. + kwargs: Paginator keyword arguments for base class. + """ + super().__init__(None, *args, **kwargs) + self._key = key + + def get_next(self, response: Response) -> str | None: + """Get the next page token. + + Args: + response: API response object. + + Returns: + The next page token. + """ + return response.headers.get(self._key, None) + + +class BasePageNumberPaginator(BaseAPIPaginator[int], metaclass=ABCMeta): + """Paginator class for APIs that use page number.""" + + @abstractmethod + def has_more(self, response: Response) -> bool: + """Override this method to check if the endpoint has any pages left. + + Args: + response: API response object. + + Returns: + Boolean flag used to indicate if the endpoint has more pages. + + """ + ... + + def get_next(self, response: Response) -> int | None: # noqa: ARG002 + """Get the next page number. + + Args: + response: API response object. + + Returns: + The next page number. + """ + return self._value + 1 + + +class BaseOffsetPaginator(BaseAPIPaginator[int], metaclass=ABCMeta): + """Paginator class for APIs that use page offset.""" + + def __init__( + self, + start_value: int, + page_size: int, + *args: t.Any, + **kwargs: t.Any, + ) -> None: + """Create a new paginator. + + Args: + start_value: Initial value. + page_size: Constant page size. + args: Paginator positional arguments. + kwargs: Paginator keyword arguments. + """ + super().__init__(start_value, *args, **kwargs) + self._page_size = page_size + + @abstractmethod + def has_more(self, response: Response) -> bool: + """Override this method to check if the endpoint has any pages left. + + Args: + response: API response object. + + Returns: + Boolean flag used to indicate if the endpoint has more pages. + """ + ... + + def get_next(self, response: Response) -> int | None: # noqa: ARG002 + """Get the next page offset. + + Args: + response: API response object. + + Returns: + The next page offset. + """ + return self._value + self._page_size + + +class LegacyPaginatedStreamProtocol(Protocol[TPageToken]): + """Protocol for legacy paginated streams classes.""" + + def get_next_page_token( + self, + response: Response, + previous_token: TPageToken | None, + ) -> TPageToken | None: + """Get the next page token. + + Args: + response: API response object. + previous_token: Previous page token. + """ + ... # pragma: no cover + + +class LegacyStreamPaginator( + BaseAPIPaginator[t.Optional[TPageToken]], + t.Generic[TPageToken], +): + """Paginator that works with REST streams as they exist today.""" + + def __init__( + self, + stream: LegacyPaginatedStreamProtocol[TPageToken], + *args: t.Any, + **kwargs: t.Any, + ) -> None: + """Create a new paginator. + + Args: + stream: A RESTStream instance. + args: Paginator positional arguments for base class. + kwargs: Paginator keyword arguments for base class. + """ + super().__init__(None, *args, **kwargs) + self.stream = stream + + def get_next(self, response: Response) -> TPageToken | None: + """Get next page value by calling the stream method. + + Args: + response: API response object. + + Returns: + The next page token or index. Return `None` from this method to indicate + the end of pagination. + """ + return self.stream.get_next_page_token(response, self.current_value) diff --git a/singer_sdk/plugin_base.py b/singer_sdk/plugin_base.py index 7ef4d4857..53e2cd2f2 100644 --- a/singer_sdk/plugin_base.py +++ b/singer_sdk/plugin_base.py @@ -1,29 +1,26 @@ """Shared parent class for Tap, Target (future), and Transform (future).""" +from __future__ import annotations + import abc -import json import logging import os -from collections import OrderedDict -from pathlib import PurePath +import sys +import time +import typing as t +from pathlib import Path, PurePath from types import MappingProxyType -from typing import ( - Any, - Callable, - Dict, - List, - Mapping, - Optional, - Tuple, - Type, - Union, - cast, -) import click -from jsonschema import Draft4Validator, SchemaError, ValidationError - -from singer_sdk.configuration._dict_config import parse_environment_config +from jsonschema import Draft7Validator +from packaging.specifiers import SpecifierSet + +from singer_sdk import about, metrics +from singer_sdk.cli import plugin_cli +from singer_sdk.configuration._dict_config import ( + merge_missing_config_jsonschema, + parse_environment_config, +) from singer_sdk.exceptions import ConfigValidationError from singer_sdk.helpers._classproperty import classproperty from singer_sdk.helpers._compat import metadata @@ -39,47 +36,82 @@ from singer_sdk.typing import extend_validator_with_defaults SDK_PACKAGE_NAME = "singer_sdk" +CHECK_SUPPORTED_PYTHON_VERSIONS = ( + # unsupported versions + "2.7", + "3.0", + "3.1", + "3.2", + "3.3", + "3.4", + "3.5", + "3.6", + "3.7", + # current supported versions + "3.8", + "3.9", + "3.10", + "3.11", + # future supported versions + "3.12", + "3.13", + "3.14", + "3.15", + "3.16", +) + + +JSONSchemaValidator = extend_validator_with_defaults(Draft7Validator) + +class MapperNotInitialized(Exception): + """Raised when the mapper is not initialized.""" -JSONSchemaValidator = extend_validator_with_defaults(Draft4Validator) + def __init__(self) -> None: + """Initialize the exception.""" + super().__init__("Mapper not initialized. Please call setup_mapper() first.") class PluginBase(metaclass=abc.ABCMeta): """Abstract base class for taps.""" - name: str # The executable name of the tap or target plugin. + #: The executable name of the tap or target plugin. e.g. tap-foo + name: str - config_jsonschema: dict = {} + #: The package name of the plugin. e.g meltanolabs-tap-foo + package_name: str | None = None + + config_jsonschema: t.ClassVar[dict] = {} # A JSON Schema object defining the config options that this tap will accept. _config: dict @classproperty - def logger(cls) -> logging.Logger: + def logger(cls) -> logging.Logger: # noqa: N805 """Get logger. Returns: Plugin logger. """ # Get the level from <PLUGIN_NAME>_LOGLEVEL or LOGLEVEL environment variables - LOGLEVEL = ( - os.environ.get(f"{cls.name.upper()}_LOGLEVEL") - or os.environ.get("LOGLEVEL") - or "INFO" - ).upper() - - assert ( - LOGLEVEL in logging._levelToName.values() - ), f"Invalid LOGLEVEL configuration: {LOGLEVEL}" + plugin_env_prefix = f"{cls.name.upper().replace('-', '_')}_" + log_level = os.environ.get(f"{plugin_env_prefix}LOGLEVEL") or os.environ.get( + "LOGLEVEL", + ) + logger = logging.getLogger(cls.name) - logger.setLevel(LOGLEVEL) + + if log_level is not None and log_level.upper() in logging._levelToName.values(): + logger.setLevel(log_level.upper()) + return logger # Constructor def __init__( self, - config: Optional[Union[dict, PurePath, str, List[Union[PurePath, str]]]] = None, + *, + config: dict | PurePath | str | list[PurePath | str] | None = None, parse_env_config: bool = False, validate_config: bool = True, ) -> None: @@ -96,7 +128,7 @@ def __init__( """ if not config: config_dict = {} - elif isinstance(config, str) or isinstance(config, PurePath): + elif isinstance(config, (str, PurePath)): config_dict = read_json_file(config) elif isinstance(config, list): config_dict = {} @@ -107,7 +139,8 @@ def __init__( elif isinstance(config, dict): config_dict = config else: - raise ValueError(f"Error parsing config of type '{type(config).__name__}'.") + msg = f"Error parsing config of type '{type(config).__name__}'." + raise ValueError(msg) if parse_env_config: self.logger.info("Parsing env var for settings config...") config_dict.update(self._env_var_config) @@ -118,10 +151,55 @@ def __init__( config_dict[k] = SecretString(v) self._config = config_dict self._validate_config(raise_errors=validate_config) - self.mapper: PluginMapper + self._mapper: PluginMapper | None = None + + metrics._setup_logging(self.config) + self.metrics_logger = metrics.get_metrics_logger() + + # Initialization timestamp + self.__initialized_at = int(time.time() * 1000) + + def setup_mapper(self) -> None: + """Initialize the plugin mapper for this tap.""" + self._mapper = PluginMapper( + plugin_config=dict(self.config), + logger=self.logger, + ) + + @property + def mapper(self) -> PluginMapper: + """Plugin mapper for this tap. + + Returns: + A PluginMapper object. + + Raises: + MapperNotInitialized: If the mapper has not been initialized. + """ + if self._mapper is None: + raise MapperNotInitialized + return self._mapper + + @mapper.setter + def mapper(self, mapper: PluginMapper) -> None: + """Set the plugin mapper for this plugin. + + Args: + mapper: A PluginMapper object. + """ + self._mapper = mapper + + @property + def initialized_at(self) -> int: + """Start time of the plugin. + + Returns: + The start time of the plugin. + """ + return self.__initialized_at @classproperty - def capabilities(self) -> List[CapabilitiesEnum]: + def capabilities(self) -> list[CapabilitiesEnum]: """Get capabilities. Developers may override this property in oder to add or remove @@ -133,10 +211,11 @@ def capabilities(self) -> List[CapabilitiesEnum]: return [ PluginCapabilities.STREAM_MAPS, PluginCapabilities.FLATTENING, + PluginCapabilities.BATCH, ] @classproperty - def _env_var_config(cls) -> Dict[str, Any]: + def _env_var_config(cls) -> dict[str, t.Any]: # noqa: N805 """Return any config specified in environment variables. Variables must match the convention "<PLUGIN_NAME>_<SETTING_NAME>", @@ -153,31 +232,88 @@ def _env_var_config(cls) -> Dict[str, Any]: # Core plugin metadata: - @classproperty - def plugin_version(cls) -> str: - """Get version. + @staticmethod + def _get_package_version(package: str) -> str: + """Return the package version number. + + Args: + package: The package name. Returns: The package version number. """ try: - version = metadata.version(cls.name) + version = metadata.version(package) except metadata.PackageNotFoundError: version = "[could not be detected]" return version - @classproperty - def sdk_version(cls) -> str: - """Return the package version number. + @staticmethod + def _get_supported_python_versions(package: str) -> list[str] | None: + """Return the supported Python versions. + + Args: + package: The package name. Returns: - Meltano SDK version number. + A list of supported Python versions. """ try: - version = metadata.version(SDK_PACKAGE_NAME) + package_metadata = metadata.metadata(package) except metadata.PackageNotFoundError: - version = "[could not be detected]" - return version + return None + + reported_python_versions = SpecifierSet(package_metadata["Requires-Python"]) + return [ + version + for version in CHECK_SUPPORTED_PYTHON_VERSIONS + if version in reported_python_versions + ] + + @classmethod + def get_plugin_version(cls) -> str: + """Return the package version number. + + Returns: + The package version number. + """ + return cls._get_package_version(cls.package_name or cls.name) + + @classmethod + def get_sdk_version(cls) -> str: + """Return the package version number. + + Returns: + The package version number. + """ + return cls._get_package_version(SDK_PACKAGE_NAME) + + @classmethod + def get_supported_python_versions(cls) -> list[str] | None: + """Return the supported Python versions. + + Returns: + A list of supported Python versions. + """ + return cls._get_supported_python_versions(cls.package_name or cls.name) + + @classproperty + def plugin_version(cls) -> str: # noqa: N805 + """Get version. + + Returns: + The package version number. + """ + return cls.get_plugin_version() + + @classproperty + def sdk_version(cls) -> str: # noqa: N805 + """Return the package version number. + + Returns: + Meltano Singer SDK version number. + """ + return cls.get_sdk_version() # Abstract methods: @@ -188,18 +324,18 @@ def state(self) -> dict: Raises: NotImplementedError: If the derived plugin doesn't override this method. """ - raise NotImplementedError() + raise NotImplementedError # Core plugin config: @property - def config(self) -> Mapping[str, Any]: + def config(self) -> t.Mapping[str, t.Any]: """Get config. Returns: A frozen (read-only) config dictionary map. """ - return cast(Dict, MappingProxyType(self._config)) + return t.cast(dict, MappingProxyType(self._config)) @staticmethod def _is_secret_config(config_key: str) -> bool: @@ -216,8 +352,11 @@ def _is_secret_config(config_key: str) -> bool: return is_common_secret_key(config_key) def _validate_config( - self, raise_errors: bool = True, warnings_as_errors: bool = False - ) -> Tuple[List[str], List[str]]: + self, + *, + raise_errors: bool = True, + warnings_as_errors: bool = False, + ) -> tuple[list[str], list[str]]: """Validate configuration input against the plugin configuration JSON schema. Args: @@ -230,23 +369,23 @@ def _validate_config( Raises: ConfigValidationError: If raise_errors is True and validation fails. """ - warnings: List[str] = [] - errors: List[str] = [] + warnings: list[str] = [] + errors: list[str] = [] log_fn = self.logger.info config_jsonschema = self.config_jsonschema + if config_jsonschema: self.append_builtin_config(config_jsonschema) - try: - self.logger.debug( - f"Validating config using jsonschema: {config_jsonschema}" - ) - validator = JSONSchemaValidator(config_jsonschema) - validator.validate(self._config) - except (ValidationError, SchemaError) as ex: - errors.append(str(ex.message)) + self.logger.debug( + "Validating config using jsonschema: %s", + config_jsonschema, + ) + validator = JSONSchemaValidator(config_jsonschema) + errors = [e.message for e in validator.iter_errors(self._config)] + if errors: summary = ( - f"Config validation failed: {f'; '.join(errors)}\n" + f"Config validation failed: {'; '.join(errors)}\n" f"JSONSchema was: {config_jsonschema}" ) if raise_errors: @@ -257,47 +396,50 @@ def _validate_config( summary = f"Config validation passed with {len(warnings)} warnings." for warning in warnings: summary += f"\n{warning}" + if warnings_as_errors and raise_errors and warnings: - raise ConfigValidationError( - f"One or more warnings ocurred during validation: {warnings}" - ) + msg = f"One or more warnings ocurred during validation: {warnings}" + raise ConfigValidationError(msg) log_fn(summary) return warnings, errors @classmethod def print_version( - cls: Type["PluginBase"], - print_fn: Callable[[Any], None] = print, + cls: type[PluginBase], + print_fn: t.Callable[[t.Any], None] = print, ) -> None: """Print help text for the tap. Args: print_fn: A function to use to display the plugin version. - Defaults to :function:`print`. + Defaults to `print`_. + + .. _print: https://docs.python.org/3/library/functions.html#print """ print_fn(f"{cls.name} v{cls.plugin_version}, Meltano SDK v{cls.sdk_version}") @classmethod - def _get_about_info(cls: Type["PluginBase"]) -> Dict[str, Any]: + def _get_about_info(cls: type[PluginBase]) -> about.AboutInfo: """Returns capabilities and other tap metadata. Returns: A dictionary containing the relevant 'about' information. """ - info: Dict[str, Any] = OrderedDict({}) - info["name"] = cls.name - info["description"] = cls.__doc__ - info["version"] = cls.plugin_version - info["sdk_version"] = cls.sdk_version - info["capabilities"] = cls.capabilities - config_jsonschema = cls.config_jsonschema cls.append_builtin_config(config_jsonschema) - info["settings"] = config_jsonschema - return info + + return about.AboutInfo( + name=cls.name, + description=cls.__doc__, + version=cls.get_plugin_version(), + sdk_version=cls.get_sdk_version(), + supported_python_versions=cls.get_supported_python_versions(), + capabilities=cls.capabilities, + settings=config_jsonschema, + ) @classmethod - def append_builtin_config(cls: Type["PluginBase"], config_jsonschema: dict) -> None: + def append_builtin_config(cls: type[PluginBase], config_jsonschema: dict) -> None: """Appends built-in config to `config_jsonschema` if not already set. To customize or disable this behavior, developers may either override this class @@ -312,101 +454,155 @@ def append_builtin_config(cls: Type["PluginBase"], config_jsonschema: dict) -> N Args: config_jsonschema: [description] """ - - def _merge_missing(source_jsonschema: dict, target_jsonschema: dict) -> None: - # Append any missing properties in the target with those from source. - for k, v in source_jsonschema["properties"].items(): - if k not in target_jsonschema["properties"]: - target_jsonschema["properties"][k] = v - capabilities = cls.capabilities if PluginCapabilities.STREAM_MAPS in capabilities: - _merge_missing(STREAM_MAPS_CONFIG, config_jsonschema) + merge_missing_config_jsonschema(STREAM_MAPS_CONFIG, config_jsonschema) if PluginCapabilities.FLATTENING in capabilities: - _merge_missing(FLATTENING_CONFIG, config_jsonschema) + merge_missing_config_jsonschema(FLATTENING_CONFIG, config_jsonschema) @classmethod - def print_about(cls: Type["PluginBase"], format: Optional[str] = None) -> None: + def print_about( + cls: type[PluginBase], + output_format: str | None = None, + ) -> None: """Print capabilities and other tap metadata. Args: - format: Render option for the plugin information. + output_format: Render option for the plugin information. """ info = cls._get_about_info() + formatter = about.AboutFormatter.get_formatter(output_format or "text") + print(formatter.format_about(info)) # noqa: T201 - if format == "json": - print(json.dumps(info, indent=2, default=str)) + @staticmethod + def config_from_cli_args(*args: str) -> tuple[list[Path], bool]: + """Parse CLI arguments into a config dictionary. - elif format == "markdown": - max_setting_len = cast( - int, max(len(k) for k in info["settings"]["properties"].keys()) - ) + Args: + args: CLI arguments. - # Set table base for markdown - table_base = ( - f"| {'Setting':{max_setting_len}}| Required | Default | Description |\n" - f"|:{'-' * max_setting_len}|:--------:|:-------:|:------------|\n" - ) + Raises: + FileNotFoundError: If the config file does not exist. - # Empty list for string parts - md_list = [] - # Get required settings for table - required_settings = info["settings"].get("required", []) - - # Iterate over Dict to set md - md_list.append( - f"# `{info['name']}`\n\n" - f"{info['description']}\n\n" - f"Built with the [Meltano SDK](https://sdk.meltano.com) for " - "Singer Taps and Targets.\n\n" - ) - for key, value in info.items(): - - if key == "capabilities": - capabilities = f"## {key.title()}\n\n" - capabilities += "\n".join([f"* `{v}`" for v in value]) - capabilities += "\n\n" - md_list.append(capabilities) - - if key == "settings": - setting = f"## {key.title()}\n\n" - for k, v in info["settings"].get("properties", {}).items(): - md_description = v.get("description", "").replace("\n", "<BR/>") - table_base += ( - f"| {k}{' ' * (max_setting_len - len(k))}" - f"| {'True' if k in required_settings else 'False':8} | " - f"{v.get('default', 'None'):7} | " - f"{md_description:11} |\n" - ) - setting += table_base - setting += ( - "\n" - + "\n".join( - [ - "A full list of supported settings and capabilities " - f"is available by running: `{info['name']} --about`" - ] - ) - + "\n" - ) - md_list.append(setting) - - print("".join(md_list)) - else: - formatted = "\n".join([f"{k.title()}: {v}" for k, v in info.items()]) - print(formatted) + Returns: + A tuple containing the config dictionary and a boolean indicating whether + the config file was found. + """ + config_files = [] + parse_env_config = False + + for config_path in args: + if config_path == "ENV": + # Allow parse from env vars: + parse_env_config = True + continue + + # Validate config file paths before adding to list + if not Path(config_path).is_file(): + msg = ( + f"Could not locate config file at '{config_path}'.Please check " + "that the file exists." + ) + raise FileNotFoundError(msg) - @classproperty - def cli(cls) -> Callable: + config_files.append(Path(config_path)) + + return config_files, parse_env_config + + @classmethod + def invoke( + cls, + *, + about: bool = False, + about_format: str | None = None, + **kwargs: t.Any, # noqa: ARG003 + ) -> None: + """Invoke the plugin. + + Args: + about: Display package metadata and settings. + about_format: Specify output style for `--about`. + kwargs: Plugin keyword arguments. + """ + if about: + cls.print_about(about_format) + sys.exit(0) + + @classmethod + def cb_version( + cls: type[PluginBase], + ctx: click.Context, + param: click.Option, # noqa: ARG003 + value: bool, # noqa: FBT001 + ) -> None: + """CLI callback to print the plugin version and exit. + + Args: + ctx: Click context. + param: Click parameter. + value: Boolean indicating whether to print the version. + """ + if not value: + return + cls.print_version(print_fn=click.echo) + ctx.exit() + + @classmethod + def get_singer_command(cls: type[PluginBase]) -> click.Command: """Handle command line execution. Returns: A callable CLI object. """ + return click.Command( + name=cls.name, + callback=cls.invoke, + context_settings={"help_option_names": ["--help"]}, + params=[ + click.Option( + ["--version"], + is_flag=True, + help="Display the package version.", + is_eager=True, + expose_value=False, + callback=cls.cb_version, + ), + click.Option( + ["--about"], + help="Display package metadata and settings.", + is_flag=True, + is_eager=False, + expose_value=True, + ), + click.Option( + ["--format", "about_format"], + help="Specify output style for --about", + type=click.Choice( + ["json", "markdown"], + case_sensitive=False, + ), + default=None, + ), + click.Option( + ["--config"], + multiple=True, + help=( + "Configuration file location or 'ENV' to use environment " + "variables." + ), + type=click.STRING, + default=(), + is_eager=True, + ), + ], + ) + + @plugin_cli + def cli(cls) -> click.Command: + """Handle command line execution. - @click.command() - def cli() -> None: - pass - - return cli + Returns: + A callable CLI object. + """ + return cls.get_singer_command() diff --git a/singer_sdk/sinks/__init__.py b/singer_sdk/sinks/__init__.py index be06754da..f340fa4d2 100644 --- a/singer_sdk/sinks/__init__.py +++ b/singer_sdk/sinks/__init__.py @@ -1,14 +1,10 @@ """Sink classes for targets.""" +from __future__ import annotations + from singer_sdk.sinks.batch import BatchSink from singer_sdk.sinks.core import Sink from singer_sdk.sinks.record import RecordSink -from singer_sdk.sinks.sql import SQLConnector, SQLSink +from singer_sdk.sinks.sql import SQLSink -__all__ = [ - "BatchSink", - "RecordSink", - "Sink", - "SQLSink", - "SQLConnector", -] +__all__ = ["BatchSink", "RecordSink", "Sink", "SQLSink"] diff --git a/singer_sdk/sinks/batch.py b/singer_sdk/sinks/batch.py index 35bd520a8..b8edb1d5a 100644 --- a/singer_sdk/sinks/batch.py +++ b/singer_sdk/sinks/batch.py @@ -1,5 +1,7 @@ """Sink classes load data to a target.""" +from __future__ import annotations + import abc import datetime import uuid @@ -10,7 +12,7 @@ class BatchSink(Sink): """Base class for batched record writers.""" - def _get_context(self, record: dict) -> dict: + def _get_context(self, record: dict) -> dict: # noqa: ARG002 """Return a batch context. If no batch is active, return a new batch context. The SDK-generated context will contain `batch_id` (GUID string) and @@ -27,7 +29,7 @@ def _get_context(self, record: dict) -> dict: if self._pending_batch is None: new_context = { "batch_id": str(uuid.uuid4()), - "batch_start_time": datetime.datetime.now(), + "batch_start_time": datetime.datetime.now(tz=datetime.timezone.utc), } self.start_batch(new_context) self._pending_batch = new_context @@ -47,7 +49,6 @@ def start_batch(self, context: dict) -> None: Args: context: Stream partition or context dictionary. """ - pass def process_record(self, record: dict, context: dict) -> None: """Load the latest record from the stream. @@ -87,4 +88,3 @@ def process_batch(self, context: dict) -> None: Args: context: Stream partition or context dictionary. """ - pass diff --git a/singer_sdk/sinks/core.py b/singer_sdk/sinks/core.py index 1b5a50f0f..19dfbc31a 100644 --- a/singer_sdk/sinks/core.py +++ b/singer_sdk/sinks/core.py @@ -1,24 +1,40 @@ """Sink classes load data to a target.""" +from __future__ import annotations + import abc +import copy import datetime +import json import time -from logging import Logger +import typing as t +from gzip import GzipFile +from gzip import open as gzip_open from types import MappingProxyType -from typing import Any, Dict, List, Mapping, Optional, Union from dateutil import parser -from jsonschema import Draft4Validator, FormatChecker - +from jsonschema import Draft7Validator, FormatChecker + +from singer_sdk.exceptions import MissingKeyPropertiesError +from singer_sdk.helpers._batch import ( + BaseBatchFileEncoding, + BatchConfig, + BatchFileFormat, + StorageTarget, +) from singer_sdk.helpers._compat import final from singer_sdk.helpers._typing import ( DatetimeErrorTreatmentEnum, get_datelike_property_type, handle_invalid_timestamp_in_record, ) -from singer_sdk.plugin_base import PluginBase -JSONSchemaValidator = Draft4Validator +if t.TYPE_CHECKING: + from logging import Logger + + from singer_sdk.target_base import Target + +JSONSchemaValidator = Draft7Validator class Sink(metaclass=abc.ABCMeta): @@ -32,10 +48,10 @@ class Sink(metaclass=abc.ABCMeta): def __init__( self, - target: PluginBase, + target: Target, stream_name: str, - schema: Dict, - key_properties: Optional[List[str]], + schema: dict, + key_properties: list[str] | None, ) -> None: """Initialize target sink. @@ -46,21 +62,26 @@ def __init__( key_properties: Primary key of the stream to sink. """ self.logger = target.logger + self.sync_started_at = target.initialized_at self._config = dict(target.config) - self._pending_batch: Optional[dict] = None + self._pending_batch: dict | None = None self.stream_name = stream_name - self.logger.info(f"Initializing target sink for stream '{stream_name}'...") + self.logger.info( + "Initializing target sink for stream '%s'...", + stream_name, + ) + self.original_schema = copy.deepcopy(schema) self.schema = schema if self.include_sdc_metadata_properties: self._add_sdc_metadata_to_schema() else: self._remove_sdc_metadata_from_schema() - self.records_to_drain: Union[List[dict], Any] = [] - self._context_draining: Optional[dict] = None - self.latest_state: Optional[dict] = None - self._draining_state: Optional[dict] = None - self.drained_state: Optional[dict] = None - self.key_properties = key_properties or [] + self.records_to_drain: list[dict] | t.Any = [] + self._context_draining: dict | None = None + self.latest_state: dict | None = None + self._draining_state: dict | None = None + self.drained_state: dict | None = None + self._key_properties = key_properties or [] # Tally counters self._total_records_written: int = 0 @@ -69,9 +90,9 @@ def __init__( self._batch_records_read: int = 0 self._batch_dupe_records_merged: int = 0 - self._validator = Draft4Validator(schema, format_checker=FormatChecker()) + self._validator = Draft7Validator(schema, format_checker=FormatChecker()) - def _get_context(self, record: dict) -> dict: + def _get_context(self, record: dict) -> dict: # noqa: ARG002 """Return an empty dictionary by default. NOTE: Future versions of the SDK may expand the available context attributes. @@ -155,7 +176,7 @@ def tally_duplicate_merged(self, count: int = 1) -> None: # Properties @property - def config(self) -> Mapping[str, Any]: + def config(self) -> t.Mapping[str, t.Any]: """Get plugin configuration. Returns: @@ -163,6 +184,16 @@ def config(self) -> Mapping[str, Any]: """ return MappingProxyType(self._config) + @property + def batch_config(self) -> BatchConfig | None: + """Get batch configuration. + + Returns: + A frozen (read-only) config dictionary map. + """ + raw = self.config.get("batch_config") + return BatchConfig.from_dict(raw) if raw else None + @property def include_sdc_metadata_properties(self) -> bool: """Check if metadata columns should be added. @@ -181,72 +212,92 @@ def datetime_error_treatment(self) -> DatetimeErrorTreatmentEnum: """ return DatetimeErrorTreatmentEnum.ERROR + @property + def key_properties(self) -> list[str]: + """Return key properties. + + Override this method to return a list of key properties in a format that is + compatible with the target. + + Returns: + A list of stream key properties. + """ + return self._key_properties + # Record processing def _add_sdc_metadata_to_record( - self, record: dict, message: dict, context: dict + self, + record: dict, + message: dict, + context: dict, ) -> None: """Populate metadata _sdc columns from incoming record message. Record metadata specs documented at: - https://sdk.meltano.com/en/latest/implementation/record_metadata.md + https://sdk.meltano.com/en/latest/implementation/record_metadata.html Args: record: Individual record in the stream. - message: TODO + message: The record message. context: Stream partition or context dictionary. """ record["_sdc_extracted_at"] = message.get("time_extracted") - record["_sdc_received_at"] = datetime.datetime.now().isoformat() + record["_sdc_received_at"] = datetime.datetime.now( + tz=datetime.timezone.utc, + ).isoformat() record["_sdc_batched_at"] = ( - context.get("batch_start_time", None) or datetime.datetime.now() + context.get("batch_start_time", None) + or datetime.datetime.now(tz=datetime.timezone.utc) ).isoformat() record["_sdc_deleted_at"] = record.get("_sdc_deleted_at") record["_sdc_sequence"] = int(round(time.time() * 1000)) record["_sdc_table_version"] = message.get("version") + record["_sdc_sync_started_at"] = self.sync_started_at def _add_sdc_metadata_to_schema(self) -> None: """Add _sdc metadata columns. Record metadata specs documented at: - https://sdk.meltano.com/en/latest/implementation/record_metadata.md + https://sdk.meltano.com/en/latest/implementation/record_metadata.html """ properties_dict = self.schema["properties"] - for col in { + for col in ( "_sdc_extracted_at", "_sdc_received_at", "_sdc_batched_at", "_sdc_deleted_at", - }: + ): properties_dict[col] = { "type": ["null", "string"], "format": "date-time", } - for col in {"_sdc_sequence", "_sdc_table_version"}: + for col in ("_sdc_sequence", "_sdc_table_version", "_sdc_sync_started_at"): properties_dict[col] = {"type": ["null", "integer"]} def _remove_sdc_metadata_from_schema(self) -> None: """Remove _sdc metadata columns. Record metadata specs documented at: - https://sdk.meltano.com/en/latest/implementation/record_metadata.md + https://sdk.meltano.com/en/latest/implementation/record_metadata.html """ properties_dict = self.schema["properties"] - for col in { + for col in ( "_sdc_extracted_at", "_sdc_received_at", "_sdc_batched_at", "_sdc_deleted_at", "_sdc_sequence", "_sdc_table_version", - }: + "_sdc_sync_started_at", + ): properties_dict.pop(col, None) def _remove_sdc_metadata_from_record(self, record: dict) -> None: """Remove metadata _sdc columns from incoming record message. Record metadata specs documented at: - https://sdk.meltano.com/en/latest/implementation/record_metadata.md + https://sdk.meltano.com/en/latest/implementation/record_metadata.html Args: record: Individual record in the stream. @@ -257,10 +308,11 @@ def _remove_sdc_metadata_from_record(self, record: dict) -> None: record.pop("_sdc_deleted_at", None) record.pop("_sdc_sequence", None) record.pop("_sdc_table_version", None) + record.pop("_sdc_sync_started_at", None) # Record validation - def _validate_and_parse(self, record: Dict) -> Dict: + def _validate_and_parse(self, record: dict) -> dict: """Validate or repair the record, parsing to python-native types as needed. Args: @@ -271,12 +323,36 @@ def _validate_and_parse(self, record: Dict) -> Dict: """ self._validator.validate(record) self._parse_timestamps_in_record( - record=record, schema=self.schema, treatment=self.datetime_error_treatment + record=record, + schema=self.schema, + treatment=self.datetime_error_treatment, ) return record + def _singer_validate_message(self, record: dict) -> None: + """Ensure record conforms to Singer Spec. + + Args: + record: Record (after parsing, schema validations and transformations). + + Raises: + MissingKeyPropertiesError: If record is missing one or more key properties. + """ + if any(key_property not in record for key_property in self._key_properties): + msg = ( + f"Record is missing one or more key_properties. \n" + f"Key Properties: {self._key_properties}, " + f"Record Keys: {list(record.keys())}" + ) + raise MissingKeyPropertiesError( + msg, + ) + def _parse_timestamps_in_record( - self, record: Dict, schema: Dict, treatment: DatetimeErrorTreatmentEnum + self, + record: dict, + schema: dict, + treatment: DatetimeErrorTreatmentEnum, ) -> None: """Parse strings to datetime.datetime values, repairing or erroring on failure. @@ -289,14 +365,14 @@ def _parse_timestamps_in_record( schema: TODO treatment: TODO """ - for key in record.keys(): + for key in record: datelike_type = get_datelike_property_type(schema["properties"][key]) if datelike_type: + date_val = record[key] try: - date_val = record[key] if record[key] is not None: date_val = parser.parse(date_val) - except Exception as ex: + except parser.ParserError as ex: date_val = handle_invalid_timestamp_in_record( record, [key], @@ -314,11 +390,11 @@ def _after_process_record(self, context: dict) -> None: Args: context: Stream partition or context dictionary. """ - pass + self.logger.debug("Processed record: %s", context) # SDK developer overrides: - def preprocess_record(self, record: Dict, context: dict) -> dict: + def preprocess_record(self, record: dict, context: dict) -> dict: # noqa: ARG002 """Process incoming record and return a modified result. Args: @@ -349,7 +425,6 @@ def process_record(self, record: dict, context: dict) -> None: record: Individual record in the stream. context: Stream partition or context dictionary. """ - pass def start_drain(self) -> dict: """Set and return `self._context_draining`. @@ -374,7 +449,8 @@ def process_batch(self, context: dict) -> None: Raises: NotImplementedError: If derived class does not override this method. """ - raise NotImplementedError("No handling exists for process_batch().") + msg = "No handling exists for process_batch()." + raise NotImplementedError(msg) def mark_drained(self) -> None: """Reset `records_to_drain` and any other tracking.""" @@ -383,7 +459,7 @@ def mark_drained(self) -> None: self._context_draining = None if self._batch_records_read: self.tally_record_written( - self._batch_records_read - self._batch_dupe_records_merged + self._batch_records_read - self._batch_dupe_records_merged, ) self._batch_records_read = 0 @@ -399,9 +475,17 @@ def activate_version(self, new_version: int) -> None: _ = new_version self.logger.warning( "ACTIVATE_VERSION message received but not implemented by this target. " - "Ignoring." + "Ignoring.", ) + def setup(self) -> None: + """Perform any setup actions at the beginning of a Stream. + + Setup is executed once per Sink instance, after instantiation. If a Schema + change is detected, a new Sink is instantiated and this method is called again. + """ + self.logger.info("Setting up %s", self.stream_name) + def clean_up(self) -> None: """Perform any clean up actions required at end of a stream. @@ -409,4 +493,48 @@ def clean_up(self) -> None: that may be in use from other instances of the same sink. Stream name alone should not be relied on, it's recommended to use a uuid as well. """ - pass + self.logger.info("Cleaning up %s", self.stream_name) + + def process_batch_files( + self, + encoding: BaseBatchFileEncoding, + files: t.Sequence[str], + ) -> None: + """Process a batch file with the given batch context. + + Args: + encoding: The batch file encoding. + files: The batch files to process. + + Raises: + NotImplementedError: If the batch file encoding is not supported. + """ + file: GzipFile | t.IO + storage: StorageTarget | None = None + + for path in files: + head, tail = StorageTarget.split_url(path) + + if self.batch_config: + storage = self.batch_config.storage + else: + storage = StorageTarget.from_url(head) + + if encoding.format == BatchFileFormat.JSONL: + with storage.fs(create=False) as batch_fs, batch_fs.open( + tail, + mode="rb", + ) as file: + open_file = ( + gzip_open(file) if encoding.compression == "gzip" else file + ) + context = { + "records": [ + json.loads(line) + for line in open_file # type: ignore[attr-defined] + ], + } + self.process_batch(context) + else: + msg = f"Unsupported batch encoding format: {encoding.format}" + raise NotImplementedError(msg) diff --git a/singer_sdk/sinks/record.py b/singer_sdk/sinks/record.py index 27d2930bb..422753bd0 100644 --- a/singer_sdk/sinks/record.py +++ b/singer_sdk/sinks/record.py @@ -1,5 +1,7 @@ """Sink classes load data to a target.""" +from __future__ import annotations + import abc from singer_sdk.helpers._compat import final @@ -11,7 +13,7 @@ class RecordSink(Sink): current_size = 0 # Records are always written directly - def _after_process_record(self, context: dict) -> None: + def _after_process_record(self, context: dict) -> None: # noqa: ARG002 """Perform post-processing and record keeping. Internal hook. The RecordSink class uses this method to tally each record written. @@ -32,7 +34,6 @@ def process_batch(self, context: dict) -> None: Args: context: Stream partition or context dictionary. """ - pass @final def start_batch(self, context: dict) -> None: @@ -45,7 +46,6 @@ def start_batch(self, context: dict) -> None: Args: context: Stream partition or context dictionary. """ - pass @abc.abstractmethod def process_record(self, record: dict, context: dict) -> None: @@ -63,4 +63,3 @@ def process_record(self, record: dict, context: dict) -> None: record: Individual record in the stream. context: Stream partition or context dictionary. """ - pass diff --git a/singer_sdk/sinks/sql.py b/singer_sdk/sinks/sql.py index 5faaf2ca4..238e83dec 100644 --- a/singer_sdk/sinks/sql.py +++ b/singer_sdk/sinks/sql.py @@ -1,31 +1,42 @@ """Sink classes load data to SQL targets.""" +from __future__ import annotations + +import re +import typing as t +from collections import defaultdict +from copy import copy from textwrap import dedent -from typing import Any, Dict, Iterable, List, Optional, Type import sqlalchemy from pendulum import now from sqlalchemy.sql.expression import bindparam -from singer_sdk.plugin_base import PluginBase +from singer_sdk.connectors import SQLConnector +from singer_sdk.exceptions import ConformedNameClashException +from singer_sdk.helpers._conformers import replace_leading_digit from singer_sdk.sinks.batch import BatchSink -from singer_sdk.streams.sql import SQLConnector + +if t.TYPE_CHECKING: + from sqlalchemy.sql import Executable + + from singer_sdk.target_base import Target class SQLSink(BatchSink): """SQL-type sink type.""" - connector_class: Type[SQLConnector] + connector_class: type[SQLConnector] soft_delete_column_name = "_sdc_deleted_at" version_column_name = "_sdc_table_version" def __init__( self, - target: PluginBase, + target: Target, stream_name: str, - schema: Dict, - key_properties: Optional[List[str]], - connector: Optional[SQLConnector] = None, + schema: dict, + key_properties: list[str] | None, + connector: SQLConnector | None = None, ) -> None: """Initialize SQL Sink. @@ -37,11 +48,7 @@ def __init__( connector: Optional connector to reuse. """ self._connector: SQLConnector - if connector: - self._connector = connector - else: - self._connector = self.connector_class(dict(target.config)) - + self._connector = connector or self.connector_class(dict(target.config)) super().__init__(target, stream_name, schema, key_properties) @property @@ -64,110 +71,210 @@ def connection(self) -> sqlalchemy.engine.Connection: @property def table_name(self) -> str: - """Returns the table name, with no schema or database part. + """Return the table name, with no schema or database part. Returns: The target table name. """ parts = self.stream_name.split("-") - - if len(parts) == 1: - return self.stream_name - else: - return parts[-1] + table = self.stream_name if len(parts) == 1 else parts[-1] + return self.conform_name(table, "table") @property - def schema_name(self) -> Optional[str]: - """Returns the schema name or `None` if using names with no schema part. + def schema_name(self) -> str | None: + """Return the schema name or `None` if using names with no schema part. Returns: The target schema name. """ - return None # Assumes single-schema target context. + # Look for a default_target_scheme in the configuraion fle + default_target_schema: str = self.config.get("default_target_schema", None) + parts = self.stream_name.split("-") + + # 1) When default_target_scheme is in the configuration use it + # 2) if the streams are in <schema>-<table> format use the + # stream <schema> + # 3) Return None if you don't find anything + if default_target_schema: + return default_target_schema + + if len(parts) in {2, 3}: + # Stream name is a two-part or three-part identifier. + # Use the second-to-last part as the schema name. + return self.conform_name(parts[-2], "schema") + + # Schema name not detected. + return None + + @property + def database_name(self) -> str | None: + """Return the DB name or `None` if using names with no database part.""" + # Assumes single-DB target context. @property - def database_name(self) -> Optional[str]: - """Returns the DB name or `None` if using names with no database part. + def full_table_name(self) -> str: + """Return the fully qualified table name. Returns: - The target database name. + The fully qualified table name. """ - return None # Assumes single-DB target context. + return self.connector.get_fully_qualified_name( + table_name=self.table_name, + schema_name=self.schema_name, + db_name=self.database_name, + ) - def process_batch(self, context: dict) -> None: - """Process a batch with the given batch context. + @property + def full_schema_name(self) -> str: + """Return the fully qualified schema name. - Writes a batch to the SQL target. Developers may override this method - in order to provide a more efficient upload/upsert process. + Returns: + The fully qualified schema name. + """ + return self.connector.get_fully_qualified_name( + schema_name=self.schema_name, + db_name=self.database_name, + ) + + def conform_name( + self, + name: str, + object_type: str | None = None, # noqa: ARG002 + ) -> str: + """Conform a stream property name to one suitable for the target system. + + Transforms names to snake case by default, applicable to most common DBMSs'. + Developers may override this method to apply custom transformations + to database/schema/table/column names. Args: - context: Stream partition or context dictionary. + name: Property name. + object_type: One of ``database``, ``schema``, ``table`` or ``column``. + + + Returns: + The name transformed to snake case. """ - # If duplicates are merged, these can be tracked via - # :meth:`~singer_sdk.Sink.tally_duplicate_merged()`. + # strip non-alphanumeric characters + name = re.sub(r"[^a-zA-Z0-9_\-\.\s]", "", name) + # strip leading/trailing whitespace, + # transform to lowercase and replace - . and spaces to _ + name = ( + name.lower() + .lstrip() + .rstrip() + .replace(".", "_") + .replace("-", "_") + .replace(" ", "_") + ) + # replace leading digit + return replace_leading_digit(name) + + @staticmethod + def _check_conformed_names_not_duplicated( + conformed_property_names: dict[str, str], + ) -> None: + """Check if conformed names produce duplicate keys. + + Args: + conformed_property_names: A name:conformed_name dict map. + + Raises: + ConformedNameClashException: if duplicates found. + """ + # group: {'_a': ['1_a'], 'abc': ['aBc', 'abC']} # noqa: ERA001 + grouped = defaultdict(list) + for k, v in conformed_property_names.items(): + grouped[v].append(k) + + # filter + duplicates = list(filter(lambda p: len(p[1]) > 1, grouped.items())) + if duplicates: + msg = ( + "Duplicate stream properties produced when conforming property names: " + f"{duplicates}" + ) + raise ConformedNameClashException(msg) + + def conform_schema(self, schema: dict) -> dict: + """Return schema dictionary with property names conformed. + + Args: + schema: JSON schema dictionary. + + Returns: + A schema dictionary with the property names conformed. + """ + conformed_schema = copy(schema) + conformed_property_names = { + key: self.conform_name(key) for key in conformed_schema["properties"] + } + self._check_conformed_names_not_duplicated(conformed_property_names) + conformed_schema["properties"] = { + conformed_property_names[key]: value + for key, value in conformed_schema["properties"].items() + } + return conformed_schema + + def conform_record(self, record: dict) -> dict: + """Return record dictionary with property names conformed. + + Args: + record: Dictionary representing a single record. + + Returns: + New record dictionary with conformed column names. + """ + conformed_property_names = {key: self.conform_name(key) for key in record} + self._check_conformed_names_not_duplicated(conformed_property_names) + return {conformed_property_names[key]: value for key, value in record.items()} + + def setup(self) -> None: + """Set up Sink. + + This method is called on Sink creation, and creates the required Schema and + Table entities in the target database. + """ + if self.schema_name: + self.connector.prepare_schema(self.schema_name) self.connector.prepare_table( full_table_name=self.full_table_name, - schema=self.schema, + schema=self.conform_schema(self.schema), primary_keys=self.key_properties, as_temp_table=False, ) - self.bulk_insert_records( - full_table_name=self.full_table_name, - schema=self.schema, - records=context["records"], - ) @property - def full_table_name(self) -> str: - """Gives the fully qualified table name. + def key_properties(self) -> list[str]: + """Return key properties, conformed to target system naming requirements. Returns: - The fully qualified table name. + A list of key properties, conformed with `self.conform_name()` """ - return self.connector.get_fully_qualified_name( - self.table_name, - self.schema_name, - self.database_name, - ) + return [self.conform_name(key, "column") for key in super().key_properties] - def create_table_with_records( - self, - full_table_name: Optional[str], - schema: dict, - records: Iterable[Dict[str, Any]], - primary_keys: Optional[List[str]] = None, - partition_keys: Optional[List[str]] = None, - as_temp_table: bool = False, - ) -> None: - """Create an empty table. + def process_batch(self, context: dict) -> None: + """Process a batch with the given batch context. + + Writes a batch to the SQL target. Developers may override this method + in order to provide a more efficient upload/upsert process. Args: - full_table_name: the target table name. - schema: the JSON schema for the new table. - records: records to load. - primary_keys: list of key properties. - partition_keys: list of partition keys. - as_temp_table: True to create a temp table. + context: Stream partition or context dictionary. """ - full_table_name = full_table_name or self.full_table_name - if primary_keys is None: - primary_keys = self.key_properties - partition_keys = partition_keys or None - self.connector.prepare_table( - full_table_name=full_table_name, - primary_keys=primary_keys, - schema=schema, - as_temp_table=as_temp_table, - ) + # If duplicates are merged, these can be tracked via + # :meth:`~singer_sdk.Sink.tally_duplicate_merged()`. self.bulk_insert_records( - full_table_name=full_table_name, schema=schema, records=records + full_table_name=self.full_table_name, + schema=self.schema, + records=context["records"], ) def generate_insert_statement( self, full_table_name: str, schema: dict, - ) -> str: + ) -> str | Executable: """Generate an insert statement for the given records. Args: @@ -177,23 +284,22 @@ def generate_insert_statement( Returns: An insert statement. """ - property_names = list(schema["properties"].keys()) + property_names = list(self.conform_schema(schema)["properties"].keys()) statement = dedent( f"""\ INSERT INTO {full_table_name} ({", ".join(property_names)}) VALUES ({", ".join([f":{name}" for name in property_names])}) - """ + """, # noqa: S608 ) - return statement.rstrip() def bulk_insert_records( self, full_table_name: str, schema: dict, - records: Iterable[Dict[str, Any]], - ) -> Optional[int]: + records: t.Iterable[dict[str, t.Any]], + ) -> int | None: """Bulk insert records to an existing destination table. The default implementation uses a generic SQLAlchemy bulk insert operation. @@ -213,19 +319,31 @@ def bulk_insert_records( full_table_name, schema, ) + if isinstance(insert_sql, str): + insert_sql = sqlalchemy.text(insert_sql) + + conformed_records = [self.conform_record(record) for record in records] + property_names = list(self.conform_schema(schema)["properties"].keys()) + + # Create new record dicts with missing properties filled in with None + new_records = [ + {name: record.get(name) for name in property_names} + for record in conformed_records + ] + self.logger.info("Inserting with SQL: %s", insert_sql) - self.connector.connection.execute( - sqlalchemy.text(insert_sql), - records, - ) - if isinstance(records, list): - return len(records) # If list, we can quickly return record count. - return None # Unknown record count. + with self.connector._connect() as conn, conn.begin(): + result = conn.execute(insert_sql, new_records) + + return result.rowcount def merge_upsert_from_table( - self, target_table_name: str, from_table_name: str, join_keys: List[str] - ) -> Optional[int]: + self, + target_table_name: str, + from_table_name: str, + join_keys: list[str], + ) -> int | None: """Merge upsert data from one table to another. Args: @@ -241,7 +359,7 @@ def merge_upsert_from_table( NotImplementedError: if the merge upsert capability does not exist or is undefined. """ - raise NotImplementedError() + raise NotImplementedError def activate_version(self, new_version: int) -> None: """Bump the active version of the target table. @@ -267,10 +385,13 @@ def activate_version(self, new_version: int) -> None: ) if self.config.get("hard_delete", True): - self.connection.execute( - f"DELETE FROM {self.full_table_name} " - f"WHERE {self.version_column_name} <= {new_version}" - ) + with self.connector._connect() as conn, conn.begin(): + conn.execute( + sqlalchemy.text( + f"DELETE FROM {self.full_table_name} " # noqa: S608 + f"WHERE {self.version_column_name} <= {new_version}", + ), + ) return if not self.connector.column_exists( @@ -287,13 +408,14 @@ def activate_version(self, new_version: int) -> None: f"UPDATE {self.full_table_name}\n" f"SET {self.soft_delete_column_name} = :deletedate \n" f"WHERE {self.version_column_name} < :version \n" - f" AND {self.soft_delete_column_name} IS NULL\n" + f" AND {self.soft_delete_column_name} IS NULL\n", ) query = query.bindparams( bindparam("deletedate", value=deleted_at, type_=sqlalchemy.types.DateTime), bindparam("version", value=new_version, type_=sqlalchemy.types.Integer), ) - self.connector.connection.execute(query) + with self.connector._connect() as conn, conn.begin(): + conn.execute(query) __all__ = ["SQLSink", "SQLConnector"] diff --git a/singer_sdk/streams/__init__.py b/singer_sdk/streams/__init__.py index d34bb67c7..dc4ad9d48 100644 --- a/singer_sdk/streams/__init__.py +++ b/singer_sdk/streams/__init__.py @@ -1,14 +1,10 @@ -"""SDK for building singer-compliant taps.""" +"""SDK for building Singer taps.""" + +from __future__ import annotations from singer_sdk.streams.core import Stream from singer_sdk.streams.graphql import GraphQLStream from singer_sdk.streams.rest import RESTStream -from singer_sdk.streams.sql import SQLConnector, SQLStream +from singer_sdk.streams.sql import SQLStream -__all__ = [ - "Stream", - "GraphQLStream", - "RESTStream", - "SQLStream", - "SQLConnector", -] +__all__ = ["Stream", "GraphQLStream", "RESTStream", "SQLStream"] diff --git a/singer_sdk/streams/core.py b/singer_sdk/streams/core.py index cbf46a360..4c3adb225 100644 --- a/singer_sdk/streams/core.py +++ b/singer_sdk/streams/core.py @@ -1,89 +1,111 @@ """Stream abstract class.""" +from __future__ import annotations + import abc import copy import datetime import json -import logging +import typing as t from os import PathLike from pathlib import Path from types import MappingProxyType -from typing import ( - Any, - Callable, - Dict, - Generator, - Iterable, - List, - Mapping, - Optional, - Tuple, - Type, - TypeVar, - Union, - cast, -) import pendulum -import requests -import singer -from singer import RecordMessage, SchemaMessage, StateMessage -from singer.schema import Schema -from singer_sdk.exceptions import InvalidStreamSortException, MaxRecordsLimitException +import singer_sdk._singerlib as singer +from singer_sdk import metrics +from singer_sdk.batch import JSONLinesBatcher +from singer_sdk.exceptions import ( + AbortedSyncFailedException, + AbortedSyncPausedException, + InvalidReplicationKeyException, + InvalidStreamSortException, + MaxRecordsLimitException, +) +from singer_sdk.helpers._batch import ( + BaseBatchFileEncoding, + BatchConfig, + SDKBatchMessage, +) from singer_sdk.helpers._catalog import pop_deselected_record_properties from singer_sdk.helpers._compat import final from singer_sdk.helpers._flattening import get_flattening_options -from singer_sdk.helpers._singer import ( - Catalog, - CatalogEntry, - MetadataMapping, - SelectionMask, -) from singer_sdk.helpers._state import ( finalize_state_progress_markers, get_starting_replication_value, get_state_partitions_list, get_writeable_state_dict, increment_state, + is_state_non_resumable, log_sort_error, reset_state_progress_markers, write_replication_key_signpost, write_starting_replication_value, ) -from singer_sdk.helpers._typing import conform_record_data_types, is_datetime_type +from singer_sdk.helpers._typing import ( + TypeConformanceLevel, + conform_record_data_types, + is_datetime_type, +) from singer_sdk.helpers._util import utc_now from singer_sdk.mapper import RemoveRecordTransform, SameRecordTransform, StreamMap -from singer_sdk.plugin_base import PluginBase as TapBaseClass + +if t.TYPE_CHECKING: + import logging + + from singer_sdk.tap_base import Tap # Replication methods REPLICATION_FULL_TABLE = "FULL_TABLE" REPLICATION_INCREMENTAL = "INCREMENTAL" REPLICATION_LOG_BASED = "LOG_BASED" -FactoryType = TypeVar("FactoryType", bound="Stream") - -METRICS_LOG_LEVEL_SETTING = "metrics_log_level" +FactoryType = t.TypeVar("FactoryType", bound="Stream") class Stream(metaclass=abc.ABCMeta): """Abstract base class for tap streams.""" - STATE_MSG_FREQUENCY = 10000 # Number of records between state messages - _MAX_RECORDS_LIMIT: Optional[int] = None + STATE_MSG_FREQUENCY = 10000 + """Number of records between state messages.""" + + ABORT_AT_RECORD_COUNT: int | None = None + """ + If set, raise `MaxRecordsLimitException` if the limit is exceeded. + """ + + TYPE_CONFORMANCE_LEVEL = TypeConformanceLevel.RECURSIVE + """Type conformance level for this stream. + + Field types in the schema are used to convert record field values to the correct + type. + + Available options are: + + - ``TypeConformanceLevel.NONE``: No conformance is performed. + - ``TypeConformanceLevel.RECURSIVE``: Conformance is performed recursively through + all nested levels in the record. + - ``TypeConformanceLevel.ROOT_ONLY``: Conformance is performed only on the + root level. + """ # Used for nested stream relationships - parent_stream_type: Optional[Type["Stream"]] = None + parent_stream_type: type[Stream] | None = None + """Parent stream type for this stream. If this stream is a child stream, this should + be set to the parent stream class. + """ + ignore_parent_replication_key: bool = False - # Internal API cost aggregator - _sync_costs: Dict[str, int] = {} + selected_by_default: bool = True + """Whether this stream is selected by default in the catalog.""" def __init__( self, - tap: TapBaseClass, - schema: Optional[Union[str, PathLike, Dict[str, Any], Schema]] = None, - name: Optional[str] = None, + tap: Tap, + schema: str | PathLike | dict[str, t.Any] | singer.Schema | None = None, + name: str | None = None, ) -> None: """Init tap stream. @@ -99,52 +121,56 @@ def __init__( if name: self.name: str = name if not self.name: - raise ValueError("Missing argument or class variable 'name'.") + msg = "Missing argument or class variable 'name'." + raise ValueError(msg) self.logger: logging.Logger = tap.logger + self.metrics_logger = tap.metrics_logger self.tap_name: str = tap.name self._config: dict = dict(tap.config) self._tap = tap self._tap_state = tap.state - self._tap_input_catalog: Optional[Catalog] = None - self._stream_maps: Optional[List[StreamMap]] = None - self.forced_replication_method: Optional[str] = None - self._replication_key: Optional[str] = None - self._primary_keys: Optional[List[str]] = None - self._state_partitioning_keys: Optional[List[str]] = None - self._schema_filepath: Optional[Path] = None - self._metadata: Optional[MetadataMapping] = None - self._mask: Optional[SelectionMask] = None + self._tap_input_catalog: singer.Catalog | None = None + self._stream_maps: list[StreamMap] | None = None + self.forced_replication_method: str | None = None + self._replication_key: str | None = None + self._primary_keys: list[str] | None = None + self._state_partitioning_keys: list[str] | None = None + self._schema_filepath: Path | None = None + self._metadata: singer.MetadataMapping | None = None + self._mask: singer.SelectionMask | None = None self._schema: dict - self.child_streams: List[Stream] = [] + self._is_state_flushed: bool = True + self._last_emitted_state: dict | None = None + self._sync_costs: dict[str, int] = {} + self.child_streams: list[Stream] = [] if schema: if isinstance(schema, (PathLike, str)): if not Path(schema).is_file(): - raise FileNotFoundError( - f"Could not find schema file '{self.schema_filepath}'." - ) + msg = f"Could not find schema file '{self.schema_filepath}'." + raise FileNotFoundError(msg) self._schema_filepath = Path(schema) elif isinstance(schema, dict): self._schema = schema - elif isinstance(schema, Schema): + elif isinstance(schema, singer.Schema): self._schema = schema.to_dict() else: - raise ValueError( - f"Unexpected type {type(schema).__name__} for arg 'schema'." - ) + msg = f"Unexpected type {type(schema).__name__} for arg 'schema'." + raise ValueError(msg) if self.schema_filepath: self._schema = json.loads(Path(self.schema_filepath).read_text()) if not self.schema: - raise ValueError( - f"Could not initialize schema for stream '{self.name}'. " - "A valid schema object or filepath was not provided." + msg = ( + f"Could not initialize schema for stream '{self.name}'. A valid schema " + "object or filepath was not provided." ) + raise ValueError(msg) @property - def stream_maps(self) -> List[StreamMap]: + def stream_maps(self) -> list[StreamMap]: """Get stream transformation maps. The 0th item is the primary stream map. List should not be empty. @@ -158,12 +184,13 @@ def stream_maps(self) -> List[StreamMap]: if self._tap.mapper: self._stream_maps = self._tap.mapper.stream_maps[self.name] self.logger.info( - f"Tap has custom mapper. Using {len(self.stream_maps)} provided map(s)." + "Tap has custom mapper. Using %d provided map(s).", + len(self.stream_maps), ) else: self.logger.info( - f"No custom mapper provided for '{self.name}'. " - "Using SameRecordTransform." + "No custom mapper provided for '%s'. Using SameRecordTransform.", + self.name, ) self._stream_maps = [ SameRecordTransform( @@ -171,7 +198,7 @@ def stream_maps(self) -> List[StreamMap]: raw_schema=self.schema, key_properties=self.primary_keys, flattening_options=get_flattening_options(self.config), - ) + ), ] return self._stream_maps @@ -185,15 +212,23 @@ def is_timestamp_replication_key(self) -> bool: Returns: True if the stream uses a timestamp-based replication key. + + Raises: + InvalidReplicationKeyException: If the schema does not contain the + replication key. """ if not self.replication_key: return False type_dict = self.schema.get("properties", {}).get(self.replication_key) + if type_dict is None: + msg = f"Field '{self.replication_key}' is not in schema for stream '{self.name}'" # noqa: E501 + raise InvalidReplicationKeyException(msg) return is_datetime_type(type_dict) def get_starting_replication_key_value( - self, context: Optional[dict] - ) -> Optional[Any]: + self, + context: dict | None, + ) -> t.Any | None: # noqa: ANN401 """Get starting replication key. Will return the value of the stream's replication key when `--state` is passed. @@ -213,9 +248,7 @@ def get_starting_replication_key_value( return get_starting_replication_value(state) - def get_starting_timestamp( - self, context: Optional[dict] - ) -> Optional[datetime.datetime]: + def get_starting_timestamp(self, context: dict | None) -> datetime.datetime | None: """Get starting replication timestamp. Will return the value of the stream's replication key when `--state` is passed. @@ -241,13 +274,11 @@ def get_starting_timestamp( return None if not self.is_timestamp_replication_key: - raise ValueError( - f"The replication key {self.replication_key} is not of timestamp type" - ) + msg = f"The replication key {self.replication_key} is not of timestamp type" + raise ValueError(msg) - return cast(datetime.datetime, pendulum.parse(value)) + return t.cast(datetime.datetime, pendulum.parse(value)) - @final @property def selected(self) -> bool: """Check if stream is selected. @@ -257,6 +288,16 @@ def selected(self) -> bool: """ return self.mask.get((), True) + @selected.setter + def selected(self, value: bool | None) -> None: + """Set stream selection. + + Args: + value: True if the stream is selected. + """ + self.metadata.root.selected = value + self._mask = self.metadata.resolve_selection() + @final @property def has_selected_descendents(self) -> bool: @@ -265,29 +306,28 @@ def has_selected_descendents(self) -> bool: Returns: True if any child streams are selected, recursively. """ - for child in self.child_streams or []: - if child.selected or child.has_selected_descendents: - return True - - return False + return any( + child.selected or child.has_selected_descendents + for child in self.child_streams or [] + ) @final @property - def descendent_streams(self) -> List["Stream"]: + def descendent_streams(self) -> list[Stream]: """Get child streams. Returns: A list of all children, recursively. """ - result: List[Stream] = list(self.child_streams) or [] + result: list[Stream] = list(self.child_streams) or [] for child in self.child_streams: result += child.descendent_streams or [] return result def _write_replication_key_signpost( self, - context: Optional[dict], - value: Union[datetime.datetime, str, int, float], + context: dict | None, + value: datetime.datetime | str | int | float, ) -> None: """Write the signpost value, if available. @@ -304,7 +344,30 @@ def _write_replication_key_signpost( state = self.get_context_state(context) write_replication_key_signpost(state, value) - def _write_starting_replication_value(self, context: Optional[dict]) -> None: + def compare_start_date(self, value: str, start_date_value: str) -> str: + """Compare a bookmark value to a start date and return the most recent value. + + If the replication key is a datetime-formatted string, this method will parse + the value and compare it to the start date. Otherwise, the bookmark value is + returned. + + If the tap uses a non-datetime replication key (e.g. an UNIX timestamp), the + developer is encouraged to override this method to provide custom logic for + comparing the bookmark value to the start date. + + Args: + value: The replication key value. + start_date_value: The start date value from the config. + + Returns: + The most recent value between the bookmark and start date. + """ + if self.is_timestamp_replication_key: + return max(value, start_date_value, key=pendulum.parse) + + return value + + def _write_starting_replication_value(self, context: dict | None) -> None: """Write the starting replication value, if available. Args: @@ -316,18 +379,24 @@ def _write_starting_replication_value(self, context: Optional[dict]) -> None: if self.replication_key: replication_key_value = state.get("replication_key_value") if replication_key_value and self.replication_key == state.get( - "replication_key" + "replication_key", ): value = replication_key_value - elif "start_date" in self.config: - value = self.config["start_date"] + # Use start_date if it is more recent than the replication_key state + start_date_value: str | None = self.config.get("start_date") + if start_date_value: + if not value: + value = start_date_value + else: + value = self.compare_start_date(value, start_date_value) write_starting_replication_value(state, value) def get_replication_key_signpost( - self, context: Optional[dict] - ) -> Optional[Union[datetime.datetime, Any]]: + self, + context: dict | None, # noqa: ARG002 + ) -> datetime.datetime | t.Any | None: # noqa: ANN401 """Get the replication signpost. For timestamp-based replication keys, this defaults to `utc_now()`. For @@ -345,13 +414,10 @@ def get_replication_key_signpost( Returns: Max allowable bookmark value for this stream's replication key. """ - if self.is_timestamp_replication_key: - return utc_now() - - return None + return utc_now() if self.is_timestamp_replication_key else None @property - def schema_filepath(self) -> Optional[Path]: + def schema_filepath(self) -> Path | None: """Get path to schema file. Returns: @@ -369,18 +435,16 @@ def schema(self) -> dict: return self._schema @property - def primary_keys(self) -> Optional[List[str]]: + def primary_keys(self) -> list[str] | None: """Get primary keys. Returns: A list of primary key(s) for the stream. """ - if not self._primary_keys: - return [] - return self._primary_keys + return self._primary_keys or [] @primary_keys.setter - def primary_keys(self, new_value: List[str]) -> None: + def primary_keys(self, new_value: list[str] | None) -> None: """Set primary key(s) for the stream. Args: @@ -389,7 +453,7 @@ def primary_keys(self, new_value: List[str]) -> None: self._primary_keys = new_value @property - def state_partitioning_keys(self) -> Optional[List[str]]: + def state_partitioning_keys(self) -> list[str] | None: """Get state partition keys. If not set, a default partitioning will be inherited from the stream's context. @@ -401,7 +465,7 @@ def state_partitioning_keys(self) -> Optional[List[str]]: return self._state_partitioning_keys @state_partitioning_keys.setter - def state_partitioning_keys(self, new_value: Optional[List[str]]) -> None: + def state_partitioning_keys(self, new_value: list[str] | None) -> None: """Set partition keys for the stream state bookmarks. If not set, a default partitioning will be inherited from the stream's context. @@ -413,18 +477,16 @@ def state_partitioning_keys(self, new_value: Optional[List[str]]) -> None: self._state_partitioning_keys = new_value @property - def replication_key(self) -> Optional[str]: + def replication_key(self) -> str | None: """Get replication key. Returns: Replication key for the stream. """ - if not self._replication_key: - return None - return self._replication_key + return self._replication_key or None @replication_key.setter - def replication_key(self, new_value: str) -> None: + def replication_key(self, new_value: str | None) -> None: """Set replication key for the stream. Args: @@ -457,7 +519,7 @@ def check_sorted(self) -> bool: return True @property - def metadata(self) -> MetadataMapping: + def metadata(self) -> singer.MetadataMapping: """Get stream metadata. Metadata attributes (`inclusion`, `selected`, etc.) are part of the Singer spec. @@ -476,7 +538,7 @@ def metadata(self) -> MetadataMapping: self._metadata = catalog_entry.metadata return self._metadata - self._metadata = MetadataMapping.get_standard_metadata( + self._metadata = singer.MetadataMapping.get_standard_metadata( schema=self.schema, replication_method=self.forced_replication_method, key_properties=self.primary_keys or [], @@ -484,25 +546,27 @@ def metadata(self) -> MetadataMapping: [self.replication_key] if self.replication_key else None ), schema_name=None, + selected_by_default=self.selected_by_default, ) # If there's no input catalog, select all streams - if self._tap_input_catalog is None: - self._metadata.root.selected = True + self._metadata.root.selected = ( + self._tap_input_catalog is None and self.selected_by_default + ) return self._metadata @property - def _singer_catalog_entry(self) -> CatalogEntry: + def _singer_catalog_entry(self) -> singer.CatalogEntry: """Return catalog entry as specified by the Singer catalog spec. Returns: TODO """ - return CatalogEntry( + return singer.CatalogEntry( tap_stream_id=self.tap_stream_id, stream=self.name, - schema=Schema.from_dict(self.schema), + schema=singer.Schema.from_dict(self.schema), metadata=self.metadata, key_properties=self.primary_keys or [], replication_key=self.replication_key, @@ -515,16 +579,16 @@ def _singer_catalog_entry(self) -> CatalogEntry: ) @property - def _singer_catalog(self) -> Catalog: + def _singer_catalog(self) -> singer.Catalog: """TODO. Returns: TODO """ - return Catalog([(self.tap_stream_id, self._singer_catalog_entry)]) + return singer.Catalog([(self.tap_stream_id, self._singer_catalog_entry)]) @property - def config(self) -> Mapping[str, Any]: + def config(self) -> t.Mapping[str, t.Any]: """Get stream configuration. Returns: @@ -578,7 +642,7 @@ def tap_state(self) -> dict: """ return self._tap_state - def get_context_state(self, context: Optional[dict]) -> dict: + def get_context_state(self, context: dict | None) -> dict: """Return a writable state dict for the given context. Gives a partitioned context state if applicable; else returns stream state. @@ -633,7 +697,7 @@ def stream_state(self) -> dict: # Partitions @property - def partitions(self) -> Optional[List[dict]]: + def partitions(self) -> list[dict] | None: """Get stream partitions. Developers may override this property to provide a default partitions list. @@ -644,22 +708,29 @@ def partitions(self) -> Optional[List[dict]]: Returns: A list of partition key dicts (if applicable), otherwise `None`. """ - result: List[dict] = [] - for partition_state in ( - get_state_partitions_list(self.tap_state, self.name) or [] - ): - result.append(partition_state["context"]) + result: list[dict] = [ + partition_state["context"] + for partition_state in ( + get_state_partitions_list(self.tap_state, self.name) or [] + ) + ] return result or None # Private bookmarking methods def _increment_stream_state( - self, latest_record: Dict[str, Any], *, context: Optional[dict] = None + self, + latest_record: dict[str, t.Any], + *, + context: dict | None = None, ) -> None: """Update state of stream or partition with data from the provided record. - Raises InvalidStreamSortException is self.is_sorted = True and unsorted data is - detected. + Raises `InvalidStreamSortException` is `self.is_sorted = True` and unsorted data + is detected. + + Note: The default implementation does not advance any bookmarks unless + `self.replication_method == 'INCREMENTAL'. Args: latest_record: TODO @@ -668,36 +739,43 @@ def _increment_stream_state( Raises: ValueError: TODO """ + # This also creates a state entry if one does not yet exist: state_dict = self.get_context_state(context) - if latest_record: - if self.replication_method in [ - REPLICATION_INCREMENTAL, - REPLICATION_LOG_BASED, - ]: - if not self.replication_key: - raise ValueError( - f"Could not detect replication key for '{self.name}' stream" - f"(replication method={self.replication_method})" - ) - treat_as_sorted = self.is_sorted - if not treat_as_sorted and self.state_partitioning_keys is not None: - # Streams with custom state partitioning are not resumable. - treat_as_sorted = False - increment_state( - state_dict, - replication_key=self.replication_key, - latest_record=latest_record, - is_sorted=treat_as_sorted, - check_sorted=self.check_sorted, + + # Advance state bookmark values if applicable + if latest_record and self.replication_method == REPLICATION_INCREMENTAL: + if not self.replication_key: + msg = ( + f"Could not detect replication key for '{self.name}' " + f"stream(replication method={self.replication_method})" ) + raise ValueError(msg) + treat_as_sorted = self.is_sorted + if not treat_as_sorted and self.state_partitioning_keys is not None: + # Streams with custom state partitioning are not resumable. + treat_as_sorted = False + increment_state( + state_dict, + replication_key=self.replication_key, + latest_record=latest_record, + is_sorted=treat_as_sorted, + check_sorted=self.check_sorted, + ) # Private message authoring methods: def _write_state_message(self) -> None: """Write out a STATE message with the latest state.""" - singer.write_message(StateMessage(value=self.tap_state)) + if (not self._is_state_flushed) and ( + self.tap_state != self._last_emitted_state + ): + singer.write_message(singer.StateMessage(value=self.tap_state)) + self._last_emitted_state = copy.deepcopy(self.tap_state) + self._is_state_flushed = True - def _generate_schema_messages(self) -> Generator[SchemaMessage, None, None]: + def _generate_schema_messages( + self, + ) -> t.Generator[singer.SchemaMessage, None, None]: """Generate schema messages from stream maps. Yields: @@ -709,13 +787,12 @@ def _generate_schema_messages(self) -> Generator[SchemaMessage, None, None]: # Don't emit schema if the stream's records are all ignored. continue - schema_message = SchemaMessage( + yield singer.SchemaMessage( stream_map.stream_alias, stream_map.transformed_schema, stream_map.transformed_key_properties, bookmark_keys, ) - yield schema_message def _write_schema_message(self) -> None: """Write out a SCHEMA message with the stream schema.""" @@ -723,7 +800,7 @@ def _write_schema_message(self) -> None: singer.write_message(schema_message) @property - def mask(self) -> SelectionMask: + def mask(self) -> singer.SelectionMask: """Get a boolean mask for stream and property selection. Returns: @@ -737,7 +814,7 @@ def mask(self) -> SelectionMask: def _generate_record_messages( self, record: dict, - ) -> Generator[RecordMessage, None, None]: + ) -> t.Generator[singer.RecordMessage, None, None]: """Write out a RECORD message. Args: @@ -749,23 +826,22 @@ def _generate_record_messages( pop_deselected_record_properties(record, self.schema, self.mask, self.logger) record = conform_record_data_types( stream_name=self.name, - row=record, + record=record, schema=self.schema, + level=self.TYPE_CONFORMANCE_LEVEL, logger=self.logger, ) for stream_map in self.stream_maps: mapped_record = stream_map.transform(record) # Emit record if not filtered if mapped_record is not None: - record_message = RecordMessage( + yield singer.RecordMessage( stream=stream_map.stream_alias, record=mapped_record, version=None, time_extracted=utc_now(), ) - yield record_message - def _write_record_message(self, record: dict) -> None: """Write out a RECORD message. @@ -775,97 +851,35 @@ def _write_record_message(self, record: dict) -> None: for record_message in self._generate_record_messages(record): singer.write_message(record_message) - @property - def _metric_logging_function(self) -> Optional[Callable]: - """Return the metrics logging function. - - Returns: - The logging function for emitting metrics. - - Raises: - ValueError: If logging level setting is an unsupported value. - """ - if METRICS_LOG_LEVEL_SETTING not in self.config: - return self.logger.info + self._is_state_flushed = False - if self.config[METRICS_LOG_LEVEL_SETTING].upper() == "INFO": - return self.logger.info - - if self.config[METRICS_LOG_LEVEL_SETTING].upper() == "DEBUG": - return self.logger.debug - - if self.config[METRICS_LOG_LEVEL_SETTING].upper() == "NONE": - return None - - raise ValueError( - "Unexpected logging level for metrics: " - + self.config[METRICS_LOG_LEVEL_SETTING] - ) - - def _write_metric_log(self, metric: dict, extra_tags: Optional[dict]) -> None: - """Emit a metric log. Optionally with appended tag info. - - Args: - metric: TODO - extra_tags: TODO - - Returns: - None - """ - if not self._metric_logging_function: - return None - - if extra_tags: - metric["tags"].update(extra_tags) - self._metric_logging_function(f"INFO METRIC: {json.dumps(metric)}") - - def _write_record_count_log( - self, record_count: int, context: Optional[dict] + def _write_batch_message( + self, + encoding: BaseBatchFileEncoding, + manifest: list[str], ) -> None: - """Emit a metric log. Optionally with appended tag info. + """Write out a BATCH message. Args: - record_count: TODO - context: Stream partition or context dictionary. + encoding: The encoding to use for the batch. + manifest: A list of filenames for the batch. """ - extra_tags = {} if not context else {"context": context} - counter_metric: Dict[str, Any] = { - "type": "counter", - "metric": "record_count", - "value": record_count, - "tags": {"stream": self.name}, - } - self._write_metric_log(counter_metric, extra_tags=extra_tags) - - def _write_request_duration_log( - self, - endpoint: str, - response: requests.Response, - context: Optional[dict], - extra_tags: Optional[dict], - ) -> None: - """TODO. + singer.write_message( + SDKBatchMessage( + stream=self.name, + encoding=encoding, + manifest=manifest, + ), + ) + self._is_state_flushed = False + + def _log_metric(self, point: metrics.Point) -> None: + """Log a single measurement. Args: - endpoint: TODO - response: TODO - context: Stream partition or context dictionary. - extra_tags: TODO + point: A single measurement value. """ - request_duration_metric: Dict[str, Any] = { - "type": "timer", - "metric": "http_request_duration", - "value": response.elapsed.total_seconds(), - "tags": { - "endpoint": endpoint, - "http_status_code": response.status_code, - "status": "succeeded" if response.status_code < 400 else "failed", - }, - } - extra_tags = extra_tags or {} - if context: - extra_tags["context"] = context - self._write_metric_log(metric=request_duration_metric, extra_tags=extra_tags) + metrics.log(self.metrics_logger, point=point) def log_sync_costs(self) -> None: """Log a summary of Sync costs. @@ -879,27 +893,72 @@ def log_sync_costs(self) -> None: msg = f"Total Sync costs for stream {self.name}: {self._sync_costs}" self.logger.info(msg) - def _check_max_record_limit(self, record_count: int) -> None: - """TODO. + def _check_max_record_limit(self, current_record_index: int) -> None: + """Raise an exception if dry run record limit exceeded. + + Raised if we find dry run record limit exceeded, + aka `current_record_index > self.ABORT_AT_RECORD_COUNT - 1`. Args: - record_count: TODO. + current_record_index: The zero-based index of the current record. Raises: - MaxRecordsLimitException: TODO. + AbortedSyncFailedException: Raised if sync could not reach a valid state. + AbortedSyncPausedException: Raised if sync was able to be transitioned into + a valid state without data loss or corruption. """ if ( - self._MAX_RECORDS_LIMIT is not None - and record_count >= self._MAX_RECORDS_LIMIT + self.ABORT_AT_RECORD_COUNT is not None + and current_record_index > self.ABORT_AT_RECORD_COUNT - 1 ): - raise MaxRecordsLimitException( - "Stream prematurely aborted due to the stream's max record " - f"limit ({self._MAX_RECORDS_LIMIT}) being reached." - ) + try: + self._abort_sync( + abort_reason=MaxRecordsLimitException( + "Stream prematurely aborted due to the stream's max dry run " + f"record limit ({self.ABORT_AT_RECORD_COUNT}) being reached.", + ), + ) + except (AbortedSyncFailedException, AbortedSyncPausedException) as ex: + raise ex + + def _abort_sync(self, abort_reason: Exception) -> None: + """Handle a sync operation being aborted. + + This method will attempt to close out the sync operation as gracefully as + possible - for instance, if a max runtime or record count is reached. This can + also be called for `SIGTERM` and KeyboardInterrupt events. + + If a state message is pending, we will attempt to write it to STDOUT before + shutting down. + + If the stream can reach a valid resumable state, then we will raise + `AbortedSyncPausedException`. Otherwise, `AbortedSyncFailedException` will be + raised. + + Args: + abort_reason: The exception that triggered the sync to be aborted. + + Raises: + AbortedSyncFailedException: Raised if sync could not reach a valid state. + AbortedSyncPausedException: Raised if sync was able to be transitioned into + a valid state without data loss or corruption. + """ + self._write_state_message() # Write out state message if pending. + + if self.replication_method == "FULL_TABLE": + msg = "Sync operation aborted for stream in 'FULL_TABLE' replication mode." + raise AbortedSyncFailedException(msg) from abort_reason + + if is_state_non_resumable(self.stream_state): + msg = "Sync operation aborted and state is not in a resumable state." + raise AbortedSyncFailedException(msg) from abort_reason + + # Else, the sync operation can be assumed to be in a valid resumable state. + raise AbortedSyncPausedException from abort_reason # Handle interim stream state - def reset_state_progress_markers(self, state: Optional[dict] = None) -> None: + def reset_state_progress_markers(self, state: dict | None = None) -> None: """Reset progress markers. If all=True, all state contexts will be set. This method is internal to the SDK and should not need to be overridden. @@ -908,21 +967,27 @@ def reset_state_progress_markers(self, state: Optional[dict] = None) -> None: state: State object to promote progress markers with. """ if state is None or state == {}: - context: Optional[dict] + context: dict | None for context in self.partitions or [{}]: - context = context or None - state = self.get_context_state(context) + state = self.get_context_state(context or None) reset_state_progress_markers(state) return reset_state_progress_markers(state) - def finalize_state_progress_markers(self, state: Optional[dict] = None) -> None: - """Reset progress markers. If all=True, all state contexts will be finalized. + def _finalize_state(self, state: dict | None = None) -> None: + """Reset progress markers and state flushed flag to ensure state is written. - This method is internal to the SDK and should not need to be overridden. + Args: + state: State object to promote progress markers with. + """ + state = finalize_state_progress_markers(state) # type: ignore[arg-type] + self._is_state_flushed = False - If all=True and the stream has children, child streams will also be finalized. + def finalize_state_progress_markers(self, state: dict | None = None) -> None: + """Reset progress markers and emit state message if necessary. + + This method is internal to the SDK and should not need to be overridden. Args: state: State object to promote progress markers with. @@ -931,107 +996,178 @@ def finalize_state_progress_markers(self, state: Optional[dict] = None) -> None: for child_stream in self.child_streams or []: child_stream.finalize_state_progress_markers() - context: Optional[dict] + context: dict | None for context in self.partitions or [{}]: - context = context or None - state = self.get_context_state(context) - finalize_state_progress_markers(state) - return + state = self.get_context_state(context or None) + self._finalize_state(state) + else: + self._finalize_state(state) - finalize_state_progress_markers(state) + self._write_state_message() # Private sync methods: - def _sync_records( # noqa C901 # too complex - self, context: Optional[dict] = None + def _process_record( + self, + record: dict, + child_context: dict | None = None, + partition_context: dict | None = None, ) -> None: + """Process a record. + + Args: + record: The record to process. + child_context: The child context. + partition_context: The partition context. + """ + partition_context = partition_context or {} + child_context = copy.copy( + self.get_child_context(record=record, context=child_context), + ) + for key, val in partition_context.items(): + # Add state context to records if not already present + if key not in record: + record[key] = val + + # Sync children, except when primary mapper filters out the record + if self.stream_maps[0].get_filter_result(record): + self._sync_children(child_context) + + def _sync_records( # noqa: C901 + self, + context: dict | None = None, + *, + write_messages: bool = True, + ) -> t.Generator[dict, t.Any, t.Any]: """Sync records, emitting RECORD and STATE messages. Args: context: Stream partition or context dictionary. + write_messages: Whether to write Singer messages to stdout. Raises: - InvalidStreamSortException: TODO + InvalidStreamSortException: Raised if sorting errors are found while + syncing the records. + + Yields: + Each record from the source. """ - record_count = 0 - current_context: Optional[dict] - context_list: Optional[List[dict]] + # Initialize metrics + record_counter = metrics.record_counter(self.name) + timer = metrics.sync_timer(self.name) + + record_index = 0 + context_element: dict | None + context_list: list[dict] | None context_list = [context] if context is not None else self.partitions selected = self.selected - for current_context in context_list or [{}]: - partition_record_count = 0 - current_context = current_context or None - state = self.get_context_state(current_context) - state_partition_context = self._get_state_partition_context(current_context) - self._write_starting_replication_value(current_context) - child_context: Optional[dict] = ( - None if current_context is None else copy.copy(current_context) - ) - for record_result in self.get_records(current_context): - if isinstance(record_result, tuple): - # Tuple items should be the record and the child context - record, child_context = record_result - else: - record = record_result - child_context = copy.copy( - self.get_child_context(record=record, context=child_context) + with record_counter, timer: + for context_element in context_list or [{}]: + record_counter.context = context_element + timer.context = context_element + + partition_record_index = 0 + current_context = context_element or None + state = self.get_context_state(current_context) + state_partition_context = self._get_state_partition_context( + current_context, ) - for key, val in (state_partition_context or {}).items(): - # Add state context to records if not already present - if key not in record: - record[key] = val - - # Sync children, except when primary mapper filters out the record - if self.stream_maps[0].get_filter_result(record): - self._sync_children(child_context) - self._check_max_record_limit(record_count) - if selected: - if (record_count - 1) % self.STATE_MSG_FREQUENCY == 0: - self._write_state_message() - self._write_record_message(record) + self._write_starting_replication_value(current_context) + child_context: dict | None = ( + None if current_context is None else copy.copy(current_context) + ) + + for record_result in self.get_records(current_context): + self._check_max_record_limit(current_record_index=record_index) + + if isinstance(record_result, tuple): + # Tuple items should be the record and the child context + record, child_context = record_result + else: + record = record_result try: - self._increment_stream_state(record, context=current_context) + self._process_record( + record, + child_context=child_context, + partition_context=state_partition_context, + ) except InvalidStreamSortException as ex: log_sort_error( log_fn=self.logger.error, ex=ex, - record_count=record_count + 1, - partition_record_count=partition_record_count + 1, + record_count=record_index + 1, + partition_record_count=partition_record_index + 1, current_context=current_context, state_partition_context=state_partition_context, stream_name=self.name, ) raise ex - record_count += 1 - partition_record_count += 1 - if current_context == state_partition_context: - # Finalize per-partition state only if 1:1 with context - finalize_state_progress_markers(state) + if selected: + if write_messages: + self._write_record_message(record) + + self._increment_stream_state(record, context=current_context) + if ( + record_index + 1 + ) % self.STATE_MSG_FREQUENCY == 0 and write_messages: + self._write_state_message() + + record_counter.increment() + yield record + + record_index += 1 + partition_record_index += 1 + + if current_context == state_partition_context: + # Finalize per-partition state only if 1:1 with context + self._finalize_state(state) + if not context: - # Finalize total stream only if we have the full full context. + # Finalize total stream only if we have the full context. # Otherwise will be finalized by tap at end of sync. - finalize_state_progress_markers(self.stream_state) - self._write_record_count_log(record_count=record_count, context=context) - # Reset interim bookmarks before emitting final STATE message: - self._write_state_message() + self._finalize_state(self.stream_state) + + if write_messages: + # Write final state message if we haven't already + self._write_state_message() + + def _sync_batches( + self, + batch_config: BatchConfig, + context: dict | None = None, + ) -> None: + """Sync batches, emitting BATCH messages. + + Args: + batch_config: The batch configuration. + context: Stream partition or context dictionary. + """ + with metrics.batch_counter(self.name, context=context) as counter: + for encoding, manifest in self.get_batches(batch_config, context): + counter.increment() + self._write_batch_message(encoding=encoding, manifest=manifest) + self._write_state_message() # Public methods ("final", not recommended to be overridden) @final - def sync(self, context: Optional[dict] = None) -> None: + def sync(self, context: dict | None = None) -> None: """Sync this stream. This method is internal to the SDK and should not need to be overridden. Args: context: Stream partition or context dictionary. + + Raises: + Exception: Any exception raised by the sync process. """ msg = f"Beginning {self.replication_method.lower()} sync of '{self.name}'" if context: msg += f" with context: {context}" - self.logger.info(f"{msg}...") + self.logger.info("%s...", msg) # Use a replication signpost, if available signpost = self.get_replication_key_signpost(context) @@ -1041,17 +1177,38 @@ def sync(self, context: Optional[dict] = None) -> None: # Send a SCHEMA message to the downstream target: if self.selected: self._write_schema_message() - # Sync the records themselves: - self._sync_records(context) - def _sync_children(self, child_context: dict) -> None: + try: + batch_config = self.get_batch_config(self.config) + if batch_config: + self._sync_batches(batch_config, context=context) + else: + # Sync the records themselves: + for _ in self._sync_records(context=context): + pass + except Exception as ex: + self.logger.exception( + "An unhandled error occurred while syncing '%s'", + self.name, + ) + raise ex + + def _sync_children(self, child_context: dict | None) -> None: + if child_context is None: + self.logger.warning( + "Context for child streams of '%s' is null, " + "skipping sync of any child streams", + self.name, + ) + return + for child_stream in self.child_streams: if child_stream.selected or child_stream.has_selected_descendents: child_stream.sync(context=child_context) # Overridable Methods - def apply_catalog(self, catalog: Catalog) -> None: + def apply_catalog(self, catalog: singer.Catalog) -> None: """Apply a catalog dict, updating any settings overridden within the catalog. Developers may override this method in order to introduce advanced catalog @@ -1071,7 +1228,7 @@ def apply_catalog(self, catalog: Catalog) -> None: if catalog_entry.replication_method: self.forced_replication_method = catalog_entry.replication_method - def _get_state_partition_context(self, context: Optional[dict]) -> Optional[Dict]: + def _get_state_partition_context(self, context: dict | None) -> dict | None: """Override state handling if Stream.state_partitioning_keys is specified. Args: @@ -1088,7 +1245,7 @@ def _get_state_partition_context(self, context: Optional[dict]) -> Optional[Dict return {k: v for k, v in context.items() if k in self.state_partitioning_keys} - def get_child_context(self, record: dict, context: Optional[dict]) -> dict: + def get_child_context(self, record: dict, context: dict | None) -> dict | None: """Return a child context object from the record and optional provided context. By default, will return context if provided and otherwise the record dict. @@ -1096,29 +1253,34 @@ def get_child_context(self, record: dict, context: Optional[dict]) -> dict: Developers may override this behavior to send specific information to child streams for context. + Return ``None`` if no child streams should be synced, for example if the + parent record was deleted and the child records can no longer be synced. + Args: record: Individual record in the stream. context: Stream partition or context dictionary. Returns: - A dictionary with context values for a child stream. + A dictionary with context values for a child stream, or None if no child + streams should be synced. Raises: NotImplementedError: If the stream has children but this method is not - overriden. + overridden. """ if context is None: for child_stream in self.child_streams: if child_stream.state_partitioning_keys is None: parent_type = type(self).__name__ child_type = type(child_stream).__name__ - raise NotImplementedError( + msg = ( "No child context behavior was defined between parent stream " - f"'{self.name}' and child stream '{child_stream.name}'." - "The parent stream must define " + f"'{self.name}' and child stream '{child_stream.name}'. " + f"The parent stream must define " f"`{parent_type}.get_child_context()` and/or the child stream " f"must define `{child_type}.state_partitioning_keys`." ) + raise NotImplementedError(msg) return context or record @@ -1126,11 +1288,12 @@ def get_child_context(self, record: dict, context: Optional[dict]) -> dict: @abc.abstractmethod def get_records( - self, context: Optional[dict] - ) -> Iterable[Union[dict, Tuple[dict, dict]]]: - """Abstract row generator function. Must be overridden by the child class. + self, + context: dict | None, + ) -> t.Iterable[dict | tuple[dict, dict | None]]: + """Abstract record generator function. Must be overridden by the child class. - Each row emitted should be a dictionary of property names to their values. + Each record emitted should be a dictionary of property names to their values. Returns either a record dict or a tuple: (record_dict, child_context) A method which should retrieve data from the source and return records @@ -1146,14 +1309,60 @@ def get_records( Parent streams can optionally return a tuple, in which case the second item in the tuple being a `child_context` dictionary for the stream's `context`. + + If the child context object in the tuple is ``None``, the child streams will + be skipped. This is useful for cases where the parent record was deleted and + the child records can no longer be synced. + More info: :doc:`/parent_streams` Args: context: Stream partition or context dictionary. """ - pass - def post_process(self, row: dict, context: Optional[dict] = None) -> Optional[dict]: + def get_batch_config(self, config: t.Mapping) -> BatchConfig | None: + """Return the batch config for this stream. + + Args: + config: Tap configuration dictionary. + + Returns: + Batch config for this stream. + """ + raw = config.get("batch_config") + return BatchConfig.from_dict(raw) if raw else None + + def get_batches( + self, + batch_config: BatchConfig, + context: dict | None = None, + ) -> t.Iterable[tuple[BaseBatchFileEncoding, list[str]]]: + """Batch generator function. + + Developers are encouraged to override this method to customize batching + behavior for databases, bulk APIs, etc. + + Args: + batch_config: Batch config for this stream. + context: Stream partition or context dictionary. + + Yields: + A tuple of (encoding, manifest) for each batch. + """ + batcher = JSONLinesBatcher( + tap_name=self.tap_name, + stream_name=self.name, + batch_config=batch_config, + ) + records = self._sync_records(context, write_messages=False) + for manifest in batcher.get_batches(records=records): + yield batch_config.encoding, manifest + + def post_process( + self, + row: dict, + context: dict | None = None, # noqa: ARG002 + ) -> dict | None: """As needed, append or transform raw data to match expected structure. Optional. This method gives developers an opportunity to "clean up" the results diff --git a/singer_sdk/streams/graphql.py b/singer_sdk/streams/graphql.py index 4f98695b4..fde4f99b9 100644 --- a/singer_sdk/streams/graphql.py +++ b/singer_sdk/streams/graphql.py @@ -1,13 +1,17 @@ """Abstract base class for API-type streams.""" +from __future__ import annotations + import abc -from typing import Any, Optional +import typing as t from singer_sdk.helpers._classproperty import classproperty from singer_sdk.streams.rest import RESTStream +_TToken = t.TypeVar("_TToken") + -class GraphQLStream(RESTStream, metaclass=abc.ABCMeta): +class GraphQLStream(RESTStream, t.Generic[_TToken], metaclass=abc.ABCMeta): """Abstract base class for API-type streams. GraphQL streams inherit from the class `GraphQLStream`, which in turn inherits from @@ -20,7 +24,7 @@ class GraphQLStream(RESTStream, metaclass=abc.ABCMeta): rest_method = "POST" @classproperty - def records_jsonpath(cls) -> str: # type: ignore # OK: str vs @classproperty + def records_jsonpath(cls) -> str: # type: ignore[override] # noqa: N805 """Get the JSONPath expression to extract records from an API response. Returns: @@ -35,11 +39,14 @@ def query(self) -> str: Raises: NotImplementedError: If the derived class doesn't define this property. """ - raise NotImplementedError("GraphQLStream `query` is not defined.") + msg = "GraphQLStream `query` is not defined." + raise NotImplementedError(msg) def prepare_request_payload( - self, context: Optional[dict], next_page_token: Optional[Any] - ) -> Optional[dict]: + self, + context: dict | None, + next_page_token: _TToken | None, + ) -> dict | None: """Prepare the data payload for the GraphQL API request. Developers generally should generally not need to override this method. @@ -58,17 +65,20 @@ def prepare_request_payload( ValueError: If the `query` property is not set in the request body. """ params = self.get_url_params(context, next_page_token) - if self.query is None: - raise ValueError("Graphql `query` property not set.") - else: - query = self.query + query = self.query + + if query is None: + msg = "Graphql `query` property not set." + raise ValueError(msg) + if not query.lstrip().startswith("query"): # Wrap text in "query { }" if not already wrapped query = "query { " + query + " }" + query = query.lstrip() request_data = { "query": (" ".join([line.strip() for line in query.splitlines()])), "variables": params, } - self.logger.debug(f"Attempting query:\n{query}") + self.logger.debug("Attempting query:\n%s", query) return request_data diff --git a/singer_sdk/streams/rest.py b/singer_sdk/streams/rest.py index 34c77de60..563956a5f 100644 --- a/singer_sdk/streams/rest.py +++ b/singer_sdk/streams/rest.py @@ -5,40 +5,61 @@ import abc import copy import logging -from datetime import datetime -from typing import Any, Callable, Generator, Generic, Iterable, TypeVar, Union +import typing as t +from http import HTTPStatus from urllib.parse import urlparse +from warnings import warn import backoff import requests -from singer.schema import Schema -from singer_sdk.authenticators import APIAuthenticatorBase, SimpleAuthenticator +from singer_sdk import metrics +from singer_sdk.authenticators import SimpleAuthenticator from singer_sdk.exceptions import FatalAPIError, RetriableAPIError from singer_sdk.helpers.jsonpath import extract_jsonpath -from singer_sdk.plugin_base import PluginBase as TapBaseClass +from singer_sdk.pagination import ( + BaseAPIPaginator, + JSONPathPaginator, + LegacyStreamPaginator, + SimpleHeaderPaginator, +) from singer_sdk.streams.core import Stream +if t.TYPE_CHECKING: + import sys + from datetime import datetime + + from backoff.types import Details + + from singer_sdk._singerlib import Schema + from singer_sdk.tap_base import Tap + + if sys.version_info >= (3, 10): + from typing import TypeAlias # noqa: ICN003 + else: + from typing_extensions import TypeAlias + DEFAULT_PAGE_SIZE = 1000 DEFAULT_REQUEST_TIMEOUT = 300 # 5 minutes -_TToken = TypeVar("_TToken") -_T = TypeVar("_T") -_MaybeCallable = Union[_T, Callable[[], _T]] +_TToken = t.TypeVar("_TToken") +_Auth: TypeAlias = t.Callable[[requests.PreparedRequest], requests.PreparedRequest] -class RESTStream(Stream, Generic[_TToken], metaclass=abc.ABCMeta): +class RESTStream(Stream, t.Generic[_TToken], metaclass=abc.ABCMeta): """Abstract base class for REST API streams.""" _page_size: int = DEFAULT_PAGE_SIZE _requests_session: requests.Session | None + + #: HTTP method to use for requests. Defaults to "GET". rest_method = "GET" #: JSONPath expression to extract records from the API response. records_jsonpath: str = "$[*]" #: Response code reference for rate limit retries - extra_retry_statuses: list[int] = [429] + extra_retry_statuses: t.Sequence[int] = [HTTPStatus.TOO_MANY_REQUESTS] #: Optional JSONPath expression to extract a pagination token from the API response. #: Example: `"$.next_page"` @@ -53,13 +74,12 @@ class RESTStream(Stream, Generic[_TToken], metaclass=abc.ABCMeta): @abc.abstractmethod def url_base(self) -> str: """Return the base url, e.g. ``https://api.mysite.com/v3/``.""" - pass def __init__( self, - tap: TapBaseClass, + tap: Tap, name: str | None = None, - schema: dict[str, Any] | Schema | None = None, + schema: dict[str, t.Any] | Schema | None = None, path: str | None = None, ) -> None: """Initialize the REST stream. @@ -88,11 +108,7 @@ def _url_encode(val: str | datetime | bool | int | list[str]) -> str: Returns: TODO """ - if isinstance(val, str): - result = val.replace("/", "%2F") - else: - result = str(val) - return result + return val.replace("/", "%2F") if isinstance(val, str) else str(val) def get_url(self, context: dict | None) -> str: """Get stream entity URL. @@ -124,7 +140,7 @@ def requests_session(self) -> requests.Session: The `requests.Session`_ object for HTTP requests. .. _requests.Session: - https://docs.python-requests.org/en/latest/api/#request-sessions + https://requests.readthedocs.io/en/latest/api/#request-sessions """ if not self._requests_session: self._requests_session = requests.Session() @@ -159,15 +175,22 @@ def validate_response(self, response: requests.Response) -> None: RetriableAPIError: If the request is retriable. .. _requests.Response: - https://docs.python-requests.org/en/latest/api/#requests.Response + https://requests.readthedocs.io/en/latest/api/#requests.Response """ if ( response.status_code in self.extra_retry_statuses - or 500 <= response.status_code < 600 + or HTTPStatus.INTERNAL_SERVER_ERROR + <= response.status_code + <= max(HTTPStatus) ): msg = self.response_error_message(response) raise RetriableAPIError(msg, response) - elif 400 <= response.status_code < 500: + + if ( + HTTPStatus.BAD_REQUEST + <= response.status_code + < HTTPStatus.INTERNAL_SERVER_ERROR + ): msg = self.response_error_message(response) raise FatalAPIError(msg) @@ -183,17 +206,20 @@ def response_error_message(self, response: requests.Response) -> str: str: The error message """ full_path = urlparse(response.url).path or self.path - if 400 <= response.status_code < 500: - error_type = "Client" - else: - error_type = "Server" + error_type = ( + "Client" + if HTTPStatus.BAD_REQUEST + <= response.status_code + < HTTPStatus.INTERNAL_SERVER_ERROR + else "Server" + ) return ( f"{response.status_code} {error_type} Error: " f"{response.reason} for path: {full_path}" ) - def request_decorator(self, func: Callable) -> Callable: + def request_decorator(self, func: t.Callable) -> t.Callable: """Instantiate a decorator for handling request failures. Uses a wait generator defined in `backoff_wait_generator` to @@ -208,19 +234,26 @@ def request_decorator(self, func: Callable) -> Callable: Returns: A decorated method. """ - decorator: Callable = backoff.on_exception( + decorator: t.Callable = backoff.on_exception( self.backoff_wait_generator, ( + ConnectionResetError, RetriableAPIError, requests.exceptions.ReadTimeout, + requests.exceptions.ConnectionError, + requests.exceptions.ChunkedEncodingError, + requests.exceptions.ContentDecodingError, ), max_tries=self.backoff_max_tries, on_backoff=self.backoff_handler, + jitter=self.backoff_jitter, )(func) return decorator def _request( - self, prepared_request: requests.PreparedRequest, context: dict | None + self, + prepared_request: requests.PreparedRequest, + context: dict | None, ) -> requests.Response: """TODO. @@ -232,41 +265,85 @@ def _request( TODO """ response = self.requests_session.send(prepared_request, timeout=self.timeout) - if self._LOG_REQUEST_METRICS: - extra_tags = {} - if self._LOG_REQUEST_METRIC_URLS: - extra_tags["url"] = prepared_request.path_url - self._write_request_duration_log( - endpoint=self.path, - response=response, - context=context, - extra_tags=extra_tags, - ) + self._write_request_duration_log( + endpoint=self.path, + response=response, + context=context, + extra_tags={"url": prepared_request.path_url} + if self._LOG_REQUEST_METRIC_URLS + else None, + ) self.validate_response(response) logging.debug("Response received successfully.") return response def get_url_params( - self, context: dict | None, next_page_token: _TToken | None - ) -> dict[str, Any]: - """Return a dictionary of values to be used in URL parameterization. + self, + context: dict | None, # noqa: ARG002 + next_page_token: _TToken | None, # noqa: ARG002 + ) -> dict[str, t.Any] | str: + """Return a dictionary or string of URL query parameters. If paging is supported, developers may override with specific paging logic. + If your source needs special handling and, for example, parentheses should not + be encoded, you can return a string constructed with + `urllib.parse.urlencode`_: + + .. code-block:: python + + from urllib.parse import urlencode + + class MyStream(RESTStream): + def get_url_params(self, context, next_page_token): + params = {"key": "(a,b,c)"} + return urlencode(params, safe="()") + Args: context: Stream partition or context dictionary. next_page_token: Token, page number or any request argument to request the next page of data. Returns: - Dictionary of URL query parameters to use in the request. + Dictionary or encoded string with URL query parameters to use in the + request. + + .. _urllib.parse.urlencode: + https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlencode """ return {} + def build_prepared_request( + self, + *args: t.Any, + **kwargs: t.Any, + ) -> requests.PreparedRequest: + """Build a generic but authenticated request. + + Uses the authenticator instance to mutate the request with authentication. + + Args: + *args: Arguments to pass to `requests.Request`_. + **kwargs: Keyword arguments to pass to `requests.Request`_. + + Returns: + A `requests.PreparedRequest`_ object. + + .. _requests.PreparedRequest: + https://requests.readthedocs.io/en/latest/api/#requests.PreparedRequest + .. _requests.Request: + https://requests.readthedocs.io/en/latest/api/#requests.Request + """ + request = requests.Request(*args, **kwargs) + self.requests_session.auth = self.authenticator + return self.requests_session.prepare_request(request) + def prepare_request( - self, context: dict | None, next_page_token: _TToken | None + self, + context: dict | None, + next_page_token: _TToken | None, ) -> requests.PreparedRequest: - """Prepare a request object. + """Prepare a request object for this stream. If partitioning is supported, the `context` object will contain the partition definitions. Pagination information can be parsed from `next_page_token` if @@ -283,27 +360,19 @@ def prepare_request( """ http_method = self.rest_method url: str = self.get_url(context) - params: dict = self.get_url_params(context, next_page_token) + params: dict | str = self.get_url_params(context, next_page_token) request_data = self.prepare_request_payload(context, next_page_token) headers = self.http_headers - authenticator = self.authenticator - if authenticator: - headers.update(authenticator.auth_headers or {}) - params.update(authenticator.auth_params or {}) - - request = self.requests_session.prepare_request( - requests.Request( - method=http_method, - url=url, - params=params, - headers=headers, - json=request_data, - ), + return self.build_prepared_request( + method=http_method, + url=url, + params=params, + headers=headers, + json=request_data, ) - return request - def request_records(self, context: dict | None) -> Iterable[dict]: + def request_records(self, context: dict | None) -> t.Iterable[dict]: """Request records from REST endpoint(s), returning response records. If pagination is detected, pages will be recursed automatically. @@ -313,33 +382,61 @@ def request_records(self, context: dict | None) -> Iterable[dict]: Yields: An item for every record in the response. - - Raises: - RuntimeError: If a loop in pagination is detected. That is, when two - consecutive pagination tokens are identical. """ - next_page_token: _TToken | None = None - finished = False + paginator = self.get_new_paginator() decorated_request = self.request_decorator(self._request) - while not finished: - prepared_request = self.prepare_request( - context, next_page_token=next_page_token - ) - resp = decorated_request(prepared_request, context) - self.update_sync_costs(prepared_request, resp, context) - yield from self.parse_response(resp) - previous_token = copy.deepcopy(next_page_token) - next_page_token = self.get_next_page_token( - response=resp, previous_token=previous_token - ) - if next_page_token and next_page_token == previous_token: - raise RuntimeError( - f"Loop detected in pagination. " - f"Pagination token {next_page_token} is identical to prior token." + with metrics.http_request_counter(self.name, self.path) as request_counter: + request_counter.context = context + + while not paginator.finished: + prepared_request = self.prepare_request( + context, + next_page_token=paginator.current_value, ) - # Cycle until get_next_page_token() no longer returns a value - finished = not next_page_token + resp = decorated_request(prepared_request, context) + request_counter.increment() + self.update_sync_costs(prepared_request, resp, context) + yield from self.parse_response(resp) + + paginator.advance(resp) + + def _write_request_duration_log( + self, + endpoint: str, + response: requests.Response, + context: dict | None, + extra_tags: dict | None, + ) -> None: + """TODO. + + Args: + endpoint: TODO + response: TODO + context: Stream partition or context dictionary. + extra_tags: TODO + """ + extra_tags = extra_tags or {} + if context: + extra_tags[metrics.Tag.CONTEXT] = context + + point = metrics.Point( + "timer", + metric=metrics.Metric.HTTP_REQUEST_DURATION, + value=response.elapsed.total_seconds(), + tags={ + metrics.Tag.STREAM: self.name, + metrics.Tag.ENDPOINT: endpoint, + metrics.Tag.HTTP_STATUS_CODE: response.status_code, + metrics.Tag.STATUS: ( + metrics.Status.SUCCEEDED + if response.status_code < HTTPStatus.BAD_REQUEST + else metrics.Status.FAILED + ), + **extra_tags, + }, + ) + self._log_metric(point) def update_sync_costs( self, @@ -360,8 +457,7 @@ def update_sync_costs( """ call_costs = self.calculate_sync_cost(request, response, context) self._sync_costs = { - k: self._sync_costs.get(k, 0) + call_costs.get(k, 0) - for k in call_costs.keys() + k: self._sync_costs.get(k, 0) + call_costs.get(k, 0) for k in call_costs } return self._sync_costs @@ -369,9 +465,9 @@ def update_sync_costs( def calculate_sync_cost( self, - request: requests.PreparedRequest, - response: requests.Response, - context: dict | None, + request: requests.PreparedRequest, # noqa: ARG002 + response: requests.Response, # noqa: ARG002 + context: dict | None, # noqa: ARG002 ) -> dict[str, int]: """Calculate the cost of the last API call made. @@ -399,7 +495,9 @@ def calculate_sync_cost( return {} def prepare_request_payload( - self, context: dict | None, next_page_token: _TToken | None + self, + context: dict | None, + next_page_token: _TToken | None, ) -> dict | None: """Prepare the data payload for the REST API request. @@ -413,39 +511,28 @@ def prepare_request_payload( context: Stream partition or context dictionary. next_page_token: Token, page number or any request argument to request the next page of data. - - Returns: - Dictionary with the body to use for the request. """ - return None - def get_next_page_token( - self, - response: requests.Response, - previous_token: _TToken | None, - ) -> _TToken | None: - """Return token identifying next page or None if all records have been read. - - Args: - response: A raw `requests.Response`_ object. - previous_token: Previous pagination reference. + def get_new_paginator(self) -> BaseAPIPaginator: + """Get a fresh paginator for this API endpoint. Returns: - Reference value to retrieve next page. - - .. _requests.Response: - https://docs.python-requests.org/en/latest/api/#requests.Response + A paginator instance. """ - if self.next_page_token_jsonpath: - all_matches = extract_jsonpath( - self.next_page_token_jsonpath, response.json() + if hasattr(self, "get_next_page_token"): + warn( + "`RESTStream.get_next_page_token` is deprecated and will not be used " + "in a future version of the Meltano Singer SDK. " + "Override `RESTStream.get_new_paginator` instead.", + DeprecationWarning, + stacklevel=2, ) - first_match = next(iter(all_matches), None) - next_page_token = first_match - else: - next_page_token = response.headers.get("X-Next-Page", None) + return LegacyStreamPaginator(self) + + if self.next_page_token_jsonpath: + return JSONPathPaginator(self.next_page_token_jsonpath) - return next_page_token + return SimpleHeaderPaginator("X-Next-Page") @property def http_headers(self) -> dict: @@ -475,10 +562,10 @@ def timeout(self) -> int: # Records iterator - def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: - """Return a generator of row-type dictionary objects. + def get_records(self, context: dict | None) -> t.Iterable[dict[str, t.Any]]: + """Return a generator of record-type dictionary objects. - Each row emitted should be a dictionary of property names to their values. + Each record emitted should be a dictionary of property names to their values. Args: context: Stream partition or context dictionary. @@ -493,8 +580,8 @@ def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]: continue yield transformed_record - def parse_response(self, response: requests.Response) -> Iterable[dict]: - """Parse the response and return an iterator of result rows. + def parse_response(self, response: requests.Response) -> t.Iterable[dict]: + """Parse the response and return an iterator of result records. Args: response: A raw `requests.Response`_ object. @@ -503,14 +590,14 @@ def parse_response(self, response: requests.Response) -> Iterable[dict]: One item for every item found in the response. .. _requests.Response: - https://docs.python-requests.org/en/latest/api/#requests.Response + https://requests.readthedocs.io/en/latest/api/#requests.Response """ yield from extract_jsonpath(self.records_jsonpath, input=response.json()) # Abstract methods: @property - def authenticator(self) -> APIAuthenticatorBase | None: + def authenticator(self) -> _Auth: """Return or set the authenticator for managing HTTP auth headers. If an authenticator is not specified, REST-based taps will simply pass @@ -522,7 +609,7 @@ def authenticator(self) -> APIAuthenticatorBase | None: """ return SimpleAuthenticator(stream=self) - def backoff_wait_generator(self) -> Callable[..., Generator[int, Any, None]]: + def backoff_wait_generator(self) -> t.Generator[float, None, None]: """The wait generator used by the backoff decorator on request failure. See for options: @@ -533,21 +620,36 @@ def backoff_wait_generator(self) -> Callable[..., Generator[int, Any, None]]: Returns: The wait generator """ - return backoff.expo(factor=2) # type: ignore # ignore 'Returning Any' + return backoff.expo(factor=2) - def backoff_max_tries(self) -> _MaybeCallable[int] | None: + def backoff_max_tries(self) -> int: """The number of attempts before giving up when retrying requests. - Can be an integer, a zero-argument callable that returns an integer, - or ``None`` to retry indefinitely. - Returns: - int | Callable[[], int] | None: Number of max retries, callable or - ``None``. + Number of max retries. """ return 5 - def backoff_handler(self, details: dict) -> None: + def backoff_jitter(self, value: float) -> float: + """Amount of jitter to add. + + For more information see + https://github.com/litl/backoff/blob/master/backoff/_jitter.py + + We chose to default to ``random_jitter`` instead of ``full_jitter`` as we keep + some level of default jitter to be "nice" to downstream APIs but it's still + relatively close to the default value that's passed in to make tap developers' + life easier. + + Args: + value: Base amount to wait in seconds + + Returns: + Time in seconds to wait until the next request. + """ + return backoff.random_jitter(value) + + def backoff_handler(self, details: Details) -> None: """Adds additional behaviour prior to retry. By default will log out backoff details, developers can override @@ -558,19 +660,29 @@ def backoff_handler(self, details: dict) -> None: https://github.com/litl/backoff#event-handlers """ logging.error( - "Backing off {wait:0.1f} seconds after {tries} tries " - "calling function {target} with args {args} and kwargs " - "{kwargs}".format(**details) + "Backing off %0.2f seconds after %d tries " + "calling function %s with args %s and kwargs " + "%s", + details.get("wait"), + details.get("tries"), + details.get("target"), + details.get("args"), + details.get("kwargs"), ) def backoff_runtime( - self, *, value: Callable[[Any], int] - ) -> Generator[int, None, None]: + self, + *, + value: t.Callable[[t.Any], int], + ) -> t.Generator[int, None, None]: """Optional backoff wait generator that can replace the default `backoff.expo`. It is based on parsing the thrown exception of the decorated method, making it possible for response values to be in scope. + You may want to review :meth:`~singer_sdk.RESTStream.backoff_jitter` if you're + overriding this function. + Args: value: a callable which takes as input the decorated function's thrown exception and determines how diff --git a/singer_sdk/streams/sql.py b/singer_sdk/streams/sql.py index dfafa2e3e..d5fb52219 100644 --- a/singer_sdk/streams/sql.py +++ b/singer_sdk/streams/sql.py @@ -1,865 +1,30 @@ """Base class for SQL-type streams.""" -import abc -import logging -from datetime import datetime -from functools import lru_cache -from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union, cast - -import singer -import sqlalchemy -from sqlalchemy.engine import Engine -from sqlalchemy.engine.reflection import Inspector - -from singer_sdk import typing as th -from singer_sdk.exceptions import ConfigValidationError -from singer_sdk.helpers._singer import CatalogEntry, MetadataMapping -from singer_sdk.plugin_base import PluginBase as TapBaseClass -from singer_sdk.streams.core import Stream - - -class SQLConnector: - """Base class for SQLAlchemy-based connectors. - - The connector class serves as a wrapper around the SQL connection. - - The functions of the connector are: - - - connecting to the source - - generating SQLAlchemy connection and engine objects - - discovering schema catalog entries - - performing type conversions to/from JSONSchema types - - dialect-specific functions, such as escaping and fully qualified names - """ - - allow_column_add: bool = True # Whether ADD COLUMN is supported. - allow_column_rename: bool = True # Whether RENAME COLUMN is supported. - allow_column_alter: bool = False # Whether altering column types is supported. - allow_merge_upsert: bool = False # Whether MERGE UPSERT is supported. - allow_temp_tables: bool = True # Whether temp tables are supported. - - def __init__( - self, config: Optional[dict] = None, sqlalchemy_url: Optional[str] = None - ) -> None: - """Initialize the SQL connector. - - Args: - config: The parent tap or target object's config. - sqlalchemy_url: Optional URL for the connection. - """ - self._config: Dict[str, Any] = config or {} - self._sqlalchemy_url: Optional[str] = sqlalchemy_url or None - self._connection: Optional[sqlalchemy.engine.Connection] = None - - @property - def config(self) -> dict: - """If set, provides access to the tap or target config. - - Returns: - The settings as a dict. - """ - return self._config - - @property - def logger(self) -> logging.Logger: - """Get logger. - - Returns: - Plugin logger. - """ - return logging.getLogger("sqlconnector") - - def create_sqlalchemy_connection(self) -> sqlalchemy.engine.Connection: - """Return a new SQLAlchemy connection using the provided config. - - By default this will create using the sqlalchemy `stream_results=True` option - described here: - https://docs.sqlalchemy.org/en/14/core/connections.html#using-server-side-cursors-a-k-a-stream-results - - Developers may override this method if their provider does not support - server side cursors (`stream_results`) or in order to use different - configurations options when creating the connection object. - - Returns: - A newly created SQLAlchemy engine object. - """ - return ( - self.create_sqlalchemy_engine() - .connect() - .execution_options(stream_results=True) - ) - - def create_sqlalchemy_engine(self) -> sqlalchemy.engine.Engine: - """Return a new SQLAlchemy engine using the provided config. - - Developers can generally override just one of the following: - `sqlalchemy_engine`, sqlalchemy_url`. - - Returns: - A newly created SQLAlchemy engine object. - """ - return sqlalchemy.create_engine(self.sqlalchemy_url, echo=False) - - @property - def connection(self) -> sqlalchemy.engine.Connection: - """Return or set the SQLAlchemy connection object. - - Returns: - The active SQLAlchemy connection object. - """ - if not self._connection: - self._connection = self.create_sqlalchemy_connection() - - return self._connection - - @property - def sqlalchemy_url(self) -> str: - """Return the SQLAlchemy URL string. - - Returns: - The URL as a string. - """ - if not self._sqlalchemy_url: - self._sqlalchemy_url = self.get_sqlalchemy_url(self.config) - - return self._sqlalchemy_url - - def get_sqlalchemy_url(self, config: Dict[str, Any]) -> str: - """Return the SQLAlchemy URL string. - - Developers can generally override just one of the following: - `sqlalchemy_engine`, `get_sqlalchemy_url`. - - Args: - config: A dictionary of settings from the tap or target config. - - Returns: - The URL as a string. - - Raises: - ConfigValidationError: If no valid sqlalchemy_url can be found. - """ - if "sqlalchemy_url" not in config: - raise ConfigValidationError( - "Could not find or create 'sqlalchemy_url' for connection." - ) - - return cast(str, config["sqlalchemy_url"]) - - @staticmethod - def to_jsonschema_type( - sql_type: Union[ - str, sqlalchemy.types.TypeEngine, Type[sqlalchemy.types.TypeEngine], Any - ] - ) -> dict: - """Return a JSON Schema representation of the provided type. - - By default will call `typing.to_jsonschema_type()` for strings and SQLAlchemy - types. - - Developers may override this method to accept additional input argument types, - to support non-standard types, or to provide custom typing logic. - - Args: - sql_type: The string representation of the SQL type, a SQLAlchemy - TypeEngine class or object, or a custom-specified object. - - Raises: - ValueError: If the type received could not be translated to jsonschema. - - Returns: - The JSON Schema representation of the provided type. - """ - if isinstance(sql_type, (str, sqlalchemy.types.TypeEngine)): - return th.to_jsonschema_type(sql_type) - - if isinstance(sql_type, type): - if issubclass(sql_type, sqlalchemy.types.TypeEngine): - return th.to_jsonschema_type(sql_type) - - raise ValueError(f"Unexpected type received: '{sql_type.__name__}'") - - raise ValueError(f"Unexpected type received: '{type(sql_type).__name__}'") - - @staticmethod - def to_sql_type(jsonschema_type: dict) -> sqlalchemy.types.TypeEngine: - """Return a JSON Schema representation of the provided type. - - By default will call `typing.to_sql_type()`. - - Developers may override this method to accept additional input argument types, - to support non-standard types, or to provide custom typing logic. - - If overriding this method, developers should call the default implementation - from the base class for all unhandled cases. - - Args: - jsonschema_type: The JSON Schema representation of the source type. - - Returns: - The SQLAlchemy type representation of the data type. - """ - return th.to_sql_type(jsonschema_type) - - @staticmethod - def get_fully_qualified_name( - table_name: str, - schema_name: Optional[str] = None, - db_name: Optional[str] = None, - delimiter: str = ".", - ) -> str: - """Concatenates a fully qualified name from the parts. - - Args: - table_name: The name of the table. - schema_name: The name of the schema. Defaults to None. - db_name: The name of the database. Defaults to None. - delimiter: Generally: '.' for SQL names and '-' for Singer names. - - Raises: - ValueError: If table_name is not provided or if neither schema_name or - db_name are provided. - - Returns: - The fully qualified name as a string. - """ - if db_name and schema_name: - result = delimiter.join([db_name, schema_name, table_name]) - elif db_name: - result = delimiter.join([db_name, table_name]) - elif schema_name: - result = delimiter.join([schema_name, table_name]) - elif table_name: - result = table_name - else: - raise ValueError( - "Could not generate fully qualified name for stream: " - + ":".join( - [ - db_name or "(unknown-db)", - schema_name or "(unknown-schema)", - table_name or "(unknown-table-name)", - ] - ) - ) - - return result - - @property - def _dialect(self) -> sqlalchemy.engine.Dialect: - """Return the dialect object. - - Returns: - The dialect object. - """ - return cast(sqlalchemy.engine.Dialect, self.connection.engine.dialect) - - @property - def _engine(self) -> sqlalchemy.engine.Engine: - """Return the dialect object. - - Returns: - The dialect object. - """ - return cast(sqlalchemy.engine.Engine, self.connection.engine) - - def quote(self, name: str) -> str: - """Quote a name if it needs quoting, using '.' as a name-part delimiter. - - Examples: - "my_table" => "`my_table`" - "my_schema.my_table" => "`my_schema`.`my_table`" - - Args: - name: The unquoted name. - - Returns: - str: The quoted name. - """ - return ".".join( - [ - self._dialect.identifier_preparer.quote(name_part) - for name_part in name.split(".") - ] - ) - - @lru_cache() - def _warn_no_view_detection(self) -> None: - """Print a warning, but only the first time.""" - self.logger.warning( - "Provider does not support get_view_names(). " - "Streams list may be incomplete or `is_view` may be unpopulated." - ) - - def get_schema_names(self, engine: Engine, inspected: Inspector) -> List[str]: - """Return a list of schema names in DB. - - Args: - engine: SQLAlchemy engine - inspected: SQLAlchemy inspector instance for engine - - Returns: - List of schema names - """ - return inspected.get_schema_names() - - def get_object_names( - self, engine: Engine, inspected: Inspector, schema_name: str - ) -> List[Tuple[str, bool]]: - """Return a list of syncable objects. - - Args: - engine: SQLAlchemy engine - inspected: SQLAlchemy inspector instance for engine - schema_name: Schema name to inspect - - Returns: - List of tuples (<table_or_view_name>, <is_view>) - """ - # Get list of tables and views - table_names = inspected.get_table_names(schema=schema_name) - try: - view_names = inspected.get_view_names(schema=schema_name) - except NotImplementedError: - # Some DB providers do not understand 'views' - self._warn_no_view_detection() - view_names = [] - object_names = [(t, False) for t in table_names] + [ - (v, True) for v in view_names - ] - - return object_names - - # TODO maybe should be splitted into smaller parts? - def discover_catalog_entry( - self, - engine: Engine, - inspected: Inspector, - schema_name: str, - table_name: str, - is_view: bool, - ) -> CatalogEntry: - """Create `CatalogEntry` object for the given table or a view. - - Args: - engine: SQLAlchemy engine - inspected: SQLAlchemy inspector instance for engine - schema_name: Schema name to inspect - table_name: Name of the table or a view - is_view: Flag whether this object is a view, returned by `get_object_names` - - Returns: - `CatalogEntry` object for the given table or a view - """ - # Initialize unique stream name - unique_stream_id = self.get_fully_qualified_name( - db_name=None, - schema_name=schema_name, - table_name=table_name, - delimiter="-", - ) - - # Detect key properties - possible_primary_keys: List[List[str]] = [] - pk_def = inspected.get_pk_constraint(table_name, schema=schema_name) - if pk_def and "constrained_columns" in pk_def: - possible_primary_keys.append(pk_def["constrained_columns"]) - for index_def in inspected.get_indexes(table_name, schema=schema_name): - if index_def.get("unique", False): - possible_primary_keys.append(index_def["column_names"]) - key_properties = next(iter(possible_primary_keys), None) - - # Initialize columns list - table_schema = th.PropertiesList() - for column_def in inspected.get_columns(table_name, schema=schema_name): - column_name = column_def["name"] - is_nullable = column_def.get("nullable", False) - jsonschema_type: dict = self.to_jsonschema_type( - cast(sqlalchemy.types.TypeEngine, column_def["type"]) - ) - table_schema.append( - th.Property( - name=column_name, - wrapped=th.CustomType(jsonschema_type), - required=not is_nullable, - ) - ) - schema = table_schema.to_dict() - - # Initialize available replication methods - addl_replication_methods: List[str] = [""] # By default an empty list. - # Notes regarding replication methods: - # - 'INCREMENTAL' replication must be enabled by the user by specifying - # a replication_key value. - # - 'LOG_BASED' replication must be enabled by the developer, according - # to source-specific implementation capabilities. - replication_method = next(reversed(["FULL_TABLE"] + addl_replication_methods)) - - # Create the catalog entry object - catalog_entry = CatalogEntry( - tap_stream_id=unique_stream_id, - stream=unique_stream_id, - table=table_name, - key_properties=key_properties, - schema=singer.Schema.from_dict(schema), - is_view=is_view, - replication_method=replication_method, - metadata=MetadataMapping.get_standard_metadata( - schema_name=schema_name, - schema=schema, - replication_method=replication_method, - key_properties=key_properties, - valid_replication_keys=None, # Must be defined by user - ), - database=None, # Expects single-database context - row_count=None, - stream_alias=None, - replication_key=None, # Must be defined by user - ) - - return catalog_entry - - def discover_catalog_entries(self) -> List[dict]: - """Return a list of catalog entries from discovery. - - Returns: - The discovered catalog entries as a list. - """ - result: List[dict] = [] - engine = self.create_sqlalchemy_engine() - inspected = sqlalchemy.inspect(engine) - for schema_name in self.get_schema_names(engine, inspected): - # Iterate through each table and view - for table_name, is_view in self.get_object_names( - engine, inspected, schema_name - ): - catalog_entry = self.discover_catalog_entry( - engine, inspected, schema_name, table_name, is_view - ) - result.append(catalog_entry.to_dict()) - - return result - - def parse_full_table_name( - self, full_table_name: str - ) -> Tuple[Optional[str], Optional[str], str]: - """Parse a fully qualified table name into its parts. - - Developers may override this method if their platform does not support the - traditional 3-part convention: `db_name.schema_name.table_name` - - Args: - full_table_name: A table name or a fully qualified table name. Depending on - SQL the platform, this could take the following forms: - - `<db>.<schema>.<table>` (three part names) - - `<db>.<table>` (platforms which do not use schema groupings) - - `<schema>.<name>` (if DB name is already in context) - - `<table>` (if DB name and schema name are already in context) - - Returns: - A three part tuple (db_name, schema_name, table_name) with any unspecified - or unused parts returned as None. - """ - db_name: Optional[str] = None - schema_name: Optional[str] = None - - parts = full_table_name.split(".") - if len(parts) == 1: - table_name = full_table_name - if len(parts) == 2: - schema_name, table_name = parts - if len(parts) == 3: - db_name, schema_name, table_name = parts - - return db_name, schema_name, table_name - - def table_exists(self, full_table_name: str) -> bool: - """Determine if the target table already exists. - - Args: - full_table_name: the target table name. - - Returns: - True if table exists, False if not, None if unsure or undetectable. - """ - return cast( - bool, - sqlalchemy.inspect(self._engine).has_table(full_table_name), - ) - - def get_table_columns(self, full_table_name: str) -> Dict[str, sqlalchemy.Column]: - """Return a list of table columns. - - Args: - full_table_name: Fully qualified table name. - - Returns: - An ordered list of column objects. - """ - _, schema_name, table_name = self.parse_full_table_name(full_table_name) - inspector = sqlalchemy.inspect(self._engine) - columns = inspector.get_columns(table_name, schema_name) - - result: Dict[str, sqlalchemy.Column] = {} - for col_meta in columns: - result[col_meta["name"]] = sqlalchemy.Column( - col_meta["name"], - col_meta["type"], - nullable=col_meta.get("nullable", False), - ) - - return result - - def get_table(self, full_table_name: str) -> sqlalchemy.Table: - """Return a table object. - - Args: - full_table_name: Fully qualified table name. - - Returns: - A table object with column list. - """ - columns = self.get_table_columns(full_table_name).values() - _, schema_name, table_name = self.parse_full_table_name(full_table_name) - meta = sqlalchemy.MetaData() - return sqlalchemy.schema.Table( - table_name, meta, *list(columns), schema=schema_name - ) - - def column_exists(self, full_table_name: str, column_name: str) -> bool: - """Determine if the target table already exists. - - Args: - full_table_name: the target table name. - column_name: the target column name. - - Returns: - True if table exists, False if not. - """ - return column_name in self.get_table_columns(full_table_name) - - def create_empty_table( - self, - full_table_name: str, - schema: dict, - primary_keys: Optional[List[str]] = None, - partition_keys: Optional[List[str]] = None, - as_temp_table: bool = False, - ) -> None: - """Create an empty target table. - - Args: - full_table_name: the target table name. - schema: the JSON schema for the new table. - primary_keys: list of key properties. - partition_keys: list of partition keys. - as_temp_table: True to create a temp table. +from __future__ import annotations - Raises: - NotImplementedError: if temp tables are unsupported and as_temp_table=True. - RuntimeError: if a variant schema is passed with no properties defined. - """ - if as_temp_table: - raise NotImplementedError("Temporary tables are not supported.") - - _ = partition_keys # Not supported in generic implementation. - - meta = sqlalchemy.MetaData() - columns: List[sqlalchemy.Column] = [] - primary_keys = primary_keys or [] - try: - properties: dict = schema["properties"] - except KeyError: - raise RuntimeError( - f"Schema for '{full_table_name}' does not define properties: {schema}" - ) - for property_name, property_jsonschema in properties.items(): - is_primary_key = property_name in primary_keys - columns.append( - sqlalchemy.Column( - property_name, - self.to_sql_type(property_jsonschema), - primary_key=is_primary_key, - ) - ) - - _ = sqlalchemy.Table(full_table_name, meta, *columns) - meta.create_all(self._engine) - - def _create_empty_column( - self, - full_table_name: str, - column_name: str, - sql_type: sqlalchemy.types.TypeEngine, - ) -> None: - """Create a new column. - - Args: - full_table_name: The target table name. - column_name: The name of the new column. - sql_type: SQLAlchemy type engine to be used in creating the new column. - - Raises: - NotImplementedError: if adding columns is not supported. - """ - if not self.allow_column_add: - raise NotImplementedError("Adding columns is not supported.") - - create_column_clause = sqlalchemy.schema.CreateColumn( - sqlalchemy.Column( - column_name, - sql_type, - ) - ) - self.connection.execute( - sqlalchemy.DDL( - "ALTER TABLE %(table)s ADD COLUMN %(create_column)s", - { - "table": full_table_name, - "create_column": create_column_clause, - }, - ) - ) - - def prepare_table( - self, - full_table_name: str, - schema: dict, - primary_keys: List[str], - partition_keys: Optional[List[str]] = None, - as_temp_table: bool = False, - ) -> None: - """Adapt target table to provided schema if possible. - - Args: - full_table_name: the target table name. - schema: the JSON Schema for the table. - primary_keys: list of key properties. - partition_keys: list of partition keys. - as_temp_table: True to create a temp table. - """ - if not self.table_exists(full_table_name=full_table_name): - self.create_empty_table( - full_table_name=full_table_name, - schema=schema, - primary_keys=primary_keys, - partition_keys=partition_keys, - as_temp_table=as_temp_table, - ) - return - - for property_name, property_def in schema["properties"].items(): - self.prepare_column( - full_table_name, property_name, self.to_sql_type(property_def) - ) - - def prepare_column( - self, - full_table_name: str, - column_name: str, - sql_type: sqlalchemy.types.TypeEngine, - ) -> None: - """Adapt target table to provided schema if possible. - - Args: - full_table_name: the target table name. - column_name: the target column name. - sql_type: the SQLAlchemy type. - """ - if not self.column_exists(full_table_name, column_name): - self._create_empty_column( - full_table_name=full_table_name, - column_name=column_name, - sql_type=sql_type, - ) - return - - self._adapt_column_type( - full_table_name, - column_name=column_name, - sql_type=sql_type, - ) - - def rename_column(self, full_table_name: str, old_name: str, new_name: str) -> None: - """Rename the provided columns. - - Args: - full_table_name: The fully qualified table name. - old_name: The old column to be renamed. - new_name: The new name for the column. - - Raises: - NotImplementedError: If `self.allow_column_rename` is false. - """ - if not self.allow_column_rename: - raise NotImplementedError("Renaming columns is not supported.") - - self.connection.execute( - f"ALTER TABLE {full_table_name} " - f'RENAME COLUMN "{old_name}" to "{new_name}"' - ) - - def merge_sql_types( - self, sql_types: List[sqlalchemy.types.TypeEngine] - ) -> sqlalchemy.types.TypeEngine: - """Return a compatible SQL type for the selected type list. - - Args: - sql_types: List of SQL types. - - Returns: - A SQL type that is compatible with the input types. - - Raises: - ValueError: If sql_types argument has zero members. - """ - if not sql_types: - raise ValueError("Expected at least one member in `sql_types` argument.") - - if len(sql_types) == 1: - return sql_types[0] - - sql_types = self._sort_types(sql_types) - - if len(sql_types) > 2: - return self.merge_sql_types( - [self.merge_sql_types([sql_types[0], sql_types[1]])] + sql_types[2:] - ) - - assert len(sql_types) == 2 - generic_type = type(sql_types[0].as_generic()) - if isinstance(generic_type, type): - if issubclass( - generic_type, - (sqlalchemy.types.String, sqlalchemy.types.Unicode), - ): - return sql_types[0] - - elif isinstance( - generic_type, - (sqlalchemy.types.String, sqlalchemy.types.Unicode), - ): - return sql_types[0] - - raise ValueError( - f"Unable to merge sql types: {', '.join([str(t) for t in sql_types])}" - ) - - def _sort_types( - self, - sql_types: Iterable[sqlalchemy.types.TypeEngine], - ) -> List[sqlalchemy.types.TypeEngine]: - """Return the input types sorted from most to least compatible. - - For example, [Smallint, Integer, Datetime, String, Double] would become - [Unicode, String, Double, Integer, Smallint, Datetime]. - String types will be listed first, then decimal types, then integer types, - then bool types, and finally datetime and date. Higher precision, scale, and - length will be sorted earlier. - - Args: - sql_types (List[sqlalchemy.types.TypeEngine]): [description] - - Returns: - The sorted list. - """ - - def _get_type_sort_key( - sql_type: sqlalchemy.types.TypeEngine, - ) -> Tuple[int, int]: - # return rank, with higher numbers ranking first - - _len = int(getattr(sql_type, "length", 0) or 0) - - _pytype = cast(type, sql_type.python_type) - if issubclass(_pytype, (str, bytes)): - return 900, _len - elif issubclass(_pytype, datetime): - return 600, _len - elif issubclass(_pytype, float): - return 400, _len - elif issubclass(_pytype, int): - return 300, _len - - return 0, _len - - return sorted(sql_types, key=_get_type_sort_key, reverse=True) - - def _get_column_type( - self, full_table_name: str, column_name: str - ) -> sqlalchemy.types.TypeEngine: - """Gets the SQL type of the declared column. - - Args: - full_table_name: The name of the table. - column_name: The name of the column. - - Returns: - The type of the column. - - Raises: - KeyError: If the provided column name does not exist. - """ - try: - column = self.get_table_columns(full_table_name)[column_name] - except KeyError as ex: - raise KeyError( - f"Column `{column_name}` does not exist in table `{full_table_name}`." - ) from ex - - return cast(sqlalchemy.types.TypeEngine, column.type) +import abc +import typing as t - def _adapt_column_type( - self, - full_table_name: str, - column_name: str, - sql_type: sqlalchemy.types.TypeEngine, - ) -> None: - """Adapt table column type to support the new JSON schema type. +import singer_sdk.helpers._catalog as catalog +from singer_sdk._singerlib import CatalogEntry, MetadataMapping +from singer_sdk.connectors import SQLConnector +from singer_sdk.streams.core import Stream - Args: - full_table_name: The target table name. - column_name: The target column name. - sql_type: The new SQLAlchemy type. - - Raises: - NotImplementedError: if altering columns is not supported. - """ - current_type = self._get_column_type(full_table_name, column_name) - compatible_sql_type = self.merge_sql_types([current_type, sql_type]) - if current_type == compatible_sql_type: - # Nothing to do - return - - if not self.allow_column_alter: - raise NotImplementedError( - "Altering columns is not supported. " - f"Could not convert column '{full_table_name}.column_name' " - f"from '{current_type}' to '{compatible_sql_type}'." - ) - - self.connection.execute( - sqlalchemy.DDL( - "ALTER TABLE %(table)s ALTER COLUMN %(col_name)s (%(col_type)s)", - { - "table": full_table_name, - "col_name": column_name, - "col_type": compatible_sql_type, - }, - ) - ) +if t.TYPE_CHECKING: + from singer_sdk.tap_base import Tap class SQLStream(Stream, metaclass=abc.ABCMeta): """Base class for SQLAlchemy-based streams.""" connector_class = SQLConnector + _cached_schema: dict | None = None def __init__( self, - tap: TapBaseClass, + tap: Tap, catalog_entry: dict, - connector: Optional[SQLConnector] = None, + connector: SQLConnector | None = None, ) -> None: """Initialize the database stream. @@ -871,11 +36,7 @@ def __init__( connector: Optional connector to reuse. """ self._connector: SQLConnector - if connector: - self._connector = connector - else: - self._connector = self.connector_class(dict(tap.config)) - + self._connector = connector or self.connector_class(dict(tap.config)) self.catalog_entry = catalog_entry super().__init__( tap=tap, @@ -890,11 +51,11 @@ def _singer_catalog_entry(self) -> CatalogEntry: Returns: A CatalogEntry object. """ - return cast(CatalogEntry, CatalogEntry.from_dict(self.catalog_entry)) + return t.cast(CatalogEntry, CatalogEntry.from_dict(self.catalog_entry)) @property def connector(self) -> SQLConnector: - """The connector object. + """Return a connector object. Returns: The connector object. @@ -903,7 +64,7 @@ def connector(self) -> SQLConnector: @property def metadata(self) -> MetadataMapping: - """The Singer metadata. + """Return the Singer metadata. Metadata from an input catalog will override standard metadata. @@ -912,7 +73,7 @@ def metadata(self) -> MetadataMapping: """ return self._singer_catalog_entry.metadata - @property + @property # TODO: Investigate @cached_property after py > 3.7 def schema(self) -> dict: """Return metadata object (dict) as specified in the Singer spec. @@ -921,7 +82,13 @@ def schema(self) -> dict: Returns: The schema object. """ - return cast(dict, self._singer_catalog_entry.schema.to_dict()) + if not self._cached_schema: + self._cached_schema = t.cast( + dict, + self._singer_catalog_entry.schema.to_dict(), + ) + + return self._cached_schema @property def tap_stream_id(self) -> str: @@ -938,7 +105,7 @@ def tap_stream_id(self) -> str: return self._singer_catalog_entry.tap_stream_id @property - def primary_keys(self) -> Optional[List[str]]: + def primary_keys(self) -> list[str] | None: """Get primary keys from the catalog entry definition. Returns: @@ -947,7 +114,7 @@ def primary_keys(self) -> Optional[List[str]]: return self._singer_catalog_entry.metadata.root.table_key_properties or [] @primary_keys.setter - def primary_keys(self, new_value: List[str]) -> None: + def primary_keys(self, new_value: list[str]) -> None: """Set or reset the primary key(s) in the stream's catalog entry. Args: @@ -967,9 +134,8 @@ def fully_qualified_name(self) -> str: """ catalog_entry = self._singer_catalog_entry if not catalog_entry.table: - raise ValueError( - f"Missing table name in catalog entry: {catalog_entry.to_dict()}" - ) + msg = f"Missing table name in catalog entry: {catalog_entry.to_dict()}" + raise ValueError(msg) return self.connector.get_fully_qualified_name( table_name=catalog_entry.table, @@ -977,10 +143,23 @@ def fully_qualified_name(self) -> str: db_name=catalog_entry.database, ) - # Get records from stream + def get_selected_schema(self) -> dict: + """Return a copy of the Stream JSON schema, dropping any fields not selected. - def get_records(self, context: Optional[dict]) -> Iterable[Dict[str, Any]]: - """Return a generator of row-type dictionary objects. + Returns: + A dictionary containing a copy of the Stream JSON schema, filtered + to any selection criteria. + """ + return catalog.get_selected_schema( + stream_name=self.name, + schema=self.schema, + mask=self.mask, + logger=self.logger, + ) + + # Get records from stream + def get_records(self, context: dict | None) -> t.Iterable[dict[str, t.Any]]: + """Return a generator of record-type dictionary objects. If the stream has a replication_key value defined, records will be sorted by the incremental key. If the stream also has an available starting bookmark, the @@ -998,26 +177,38 @@ def get_records(self, context: Optional[dict]) -> Iterable[Dict[str, Any]]: not support partitioning. """ if context: - raise NotImplementedError( - f"Stream '{self.name}' does not support partitioning." - ) + msg = f"Stream '{self.name}' does not support partitioning." + raise NotImplementedError(msg) - table = self.connector.get_table(self.fully_qualified_name) + selected_column_names = self.get_selected_schema()["properties"].keys() + table = self.connector.get_table( + full_table_name=self.fully_qualified_name, + column_names=selected_column_names, + ) query = table.select() + if self.replication_key: replication_key_col = table.columns[self.replication_key] query = query.order_by(replication_key_col) start_val = self.get_starting_replication_key_value(context) if start_val: - query = query.where( - sqlalchemy.text(":replication_key >= :start_val").bindparams( - replication_key=replication_key_col, start_val=start_val - ) - ) - - for row in self.connector.connection.execute(query): - yield dict(row) + query = query.where(replication_key_col >= start_val) + + if self.ABORT_AT_RECORD_COUNT is not None: + # Limit record count to one greater than the abort threshold. This ensures + # `MaxRecordsLimitException` exception is properly raised by caller + # `Stream._sync_records()` if more records are available than can be + # processed. + query = query.limit(self.ABORT_AT_RECORD_COUNT + 1) + + with self.connector._connect() as conn: + for record in conn.execute(query): + transformed_record = self.post_process(dict(record._mapping)) + if transformed_record is None: + # Record filtered out during post_process() + continue + yield transformed_record __all__ = ["SQLStream", "SQLConnector"] diff --git a/singer_sdk/tap_base.py b/singer_sdk/tap_base.py index 75f1b481e..d4b5a8dc3 100644 --- a/singer_sdk/tap_base.py +++ b/singer_sdk/tap_base.py @@ -1,29 +1,38 @@ """Tap abstract class.""" + +from __future__ import annotations + import abc +import contextlib import json +import typing as t from enum import Enum -from pathlib import Path, PurePath -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast import click -from singer_sdk.cli import common_options -from singer_sdk.exceptions import MaxRecordsLimitException +from singer_sdk._singerlib import Catalog, StateMessage, write_message +from singer_sdk.configuration._dict_config import merge_missing_config_jsonschema +from singer_sdk.exceptions import AbortedSyncFailedException, AbortedSyncPausedException from singer_sdk.helpers import _state from singer_sdk.helpers._classproperty import classproperty from singer_sdk.helpers._compat import final -from singer_sdk.helpers._singer import Catalog from singer_sdk.helpers._state import write_stream_state from singer_sdk.helpers._util import read_json_file from singer_sdk.helpers.capabilities import ( + BATCH_CONFIG, CapabilitiesEnum, PluginCapabilities, TapCapabilities, ) -from singer_sdk.mapper import PluginMapper from singer_sdk.plugin_base import PluginBase -from singer_sdk.streams import SQLStream, Stream + +if t.TYPE_CHECKING: + from pathlib import PurePath + + from singer_sdk.connectors import SQLConnector + from singer_sdk.mapper import PluginMapper + from singer_sdk.streams import SQLStream, Stream STREAM_MAPS_CONFIG = "stream_maps" @@ -33,7 +42,7 @@ class CliTestOptionValue(Enum): All = "all" Schema = "schema" - Disabled = False + Disabled = "disabled" class Tap(PluginBase, metaclass=abc.ABCMeta): @@ -47,11 +56,13 @@ class Tap(PluginBase, metaclass=abc.ABCMeta): def __init__( self, - config: Optional[Union[dict, PurePath, str, List[Union[PurePath, str]]]] = None, - catalog: Union[PurePath, str, dict, Catalog, None] = None, - state: Union[PurePath, str, dict, None] = None, + *, + config: dict | PurePath | str | list[PurePath | str] | None = None, + catalog: PurePath | str | dict | Catalog | None = None, + state: PurePath | str | dict | None = None, parse_env_config: bool = False, validate_config: bool = True, + setup_mapper: bool = True, ) -> None: """Initialize the tap. @@ -64,6 +75,7 @@ def __init__( parse_env_config: Whether to look for configuration values in environment variables. validate_config: True to require validation of config settings. + setup_mapper: True to initialize the plugin mapper. """ super().__init__( config=config, @@ -72,27 +84,23 @@ def __init__( ) # Declare private members - self._streams: Optional[Dict[str, Stream]] = None - self._input_catalog: Optional[Catalog] = None - self._state: Dict[str, Stream] = {} - self._catalog: Optional[Catalog] = None # Tap's working catalog + self._streams: dict[str, Stream] | None = None + self._input_catalog: Catalog | None = None + self._state: dict[str, Stream] = {} + self._catalog: Catalog | None = None # Tap's working catalog # Process input catalog if isinstance(catalog, Catalog): self._input_catalog = catalog elif isinstance(catalog, dict): - self._input_catalog = Catalog.from_dict(catalog) + self._input_catalog = Catalog.from_dict(catalog) # type: ignore[arg-type] elif catalog is not None: self._input_catalog = Catalog.from_dict(read_json_file(catalog)) - # Initialize mapper - self.mapper: PluginMapper - self.mapper = PluginMapper( - plugin_config=dict(self.config), - logger=self.logger, - ) + self._mapper: PluginMapper | None = None - self.mapper.register_raw_streams_from_catalog(self.catalog) + if setup_mapper: + self.setup_mapper() # Process state state_dict: dict = {} @@ -105,7 +113,7 @@ def __init__( # Class properties @property - def streams(self) -> Dict[str, Stream]: + def streams(self) -> dict[str, Stream]: """Get streams discovered or catalogued for this tap. Results will be cached after first execution. @@ -134,11 +142,12 @@ def state(self) -> dict: RuntimeError: If state has not been initialized. """ if self._state is None: - raise RuntimeError("Could not read from uninitialized state.") + msg = "Could not read from uninitialized state." + raise RuntimeError(msg) return self._state @property - def input_catalog(self) -> Optional[Catalog]: + def input_catalog(self) -> Catalog | None: """Get the catalog passed to the tap. Returns: @@ -158,8 +167,13 @@ def catalog(self) -> Catalog: return self._catalog + def setup_mapper(self) -> None: + """Initialize the plugin mapper for this tap.""" + super().setup_mapper() + self.mapper.register_raw_streams_from_catalog(self.catalog) + @classproperty - def capabilities(self) -> List[CapabilitiesEnum]: + def capabilities(self) -> list[CapabilitiesEnum]: """Get tap capabilities. Returns: @@ -172,39 +186,95 @@ def capabilities(self) -> List[CapabilitiesEnum]: PluginCapabilities.ABOUT, PluginCapabilities.STREAM_MAPS, PluginCapabilities.FLATTENING, + PluginCapabilities.BATCH, ] - # Connection test: + @classmethod + def append_builtin_config(cls: type[PluginBase], config_jsonschema: dict) -> None: + """Appends built-in config to `config_jsonschema` if not already set. + + To customize or disable this behavior, developers may either override this class + method or override the `capabilities` property to disabled any unwanted + built-in capabilities. + + For all except very advanced use cases, we recommend leaving these + implementations "as-is", since this provides the most choice to users and is + the most "future proof" in terms of taking advantage of built-in capabilities + which may be added in the future. + + Args: + config_jsonschema: [description] + """ + PluginBase.append_builtin_config(config_jsonschema) + + capabilities = cls.capabilities + if PluginCapabilities.BATCH in capabilities: + merge_missing_config_jsonschema(BATCH_CONFIG, config_jsonschema) + + # Connection and sync tests: @final def run_connection_test(self) -> bool: + """Run connection test, aborting each stream after 1 record. + + Returns: + True if the test succeeded. + """ + return self.run_sync_dry_run( + dry_run_record_limit=1, + streams=self.streams.values(), + ) + + @final + def run_sync_dry_run( + self, + dry_run_record_limit: int | None = 1, + streams: t.Iterable[Stream] | None = None, + ) -> bool: """Run connection test. + Exceptions of type `MaxRecordsLimitException` and + `PartialSyncSuccessException` will be ignored. + + Args: + dry_run_record_limit: The max number of records to sync per stream object. + streams: The streams to test. If omitted, all streams will be tested. + Returns: True if the test succeeded. """ - for stream in self.streams.values(): + if streams is None: + streams = self.streams.values() + + for stream in streams: # Initialize streams' record limits before beginning the sync test. - stream._MAX_RECORDS_LIMIT = 1 + stream.ABORT_AT_RECORD_COUNT = dry_run_record_limit - for stream in self.streams.values(): + # Force selection of streams. + stream.selected = True + + for stream in streams: if stream.parent_stream_type: self.logger.debug( - f"Child stream '{type(stream).__name__}' should be called by " - f"parent stream '{stream.parent_stream_type.__name__}'. " - "Skipping direct invocation." + "Child stream '%s' should be called by " + "parent stream '%s'. " + "Skipping direct invocation.", + type(stream).__name__, + stream.parent_stream_type.__name__, ) continue - try: + with contextlib.suppress( + AbortedSyncFailedException, + AbortedSyncPausedException, + ): stream.sync() - except MaxRecordsLimitException: - pass return True @final def write_schemas(self) -> None: """Write a SCHEMA message for all known streams to STDOUT.""" for stream in self.streams.values(): + stream.selected = True stream._write_schema_message() # Stream detection: @@ -216,7 +286,7 @@ def run_discovery(self) -> str: The catalog as a string of JSON. """ catalog_text = self.catalog_json_text - print(catalog_text) + print(catalog_text) # noqa: T201 return catalog_text @property @@ -226,7 +296,7 @@ def catalog_dict(self) -> dict: Returns: The tap's catalog as a dict """ - return cast(dict, self._singer_catalog.to_dict()) + return t.cast(dict, self._singer_catalog.to_dict()) @property def catalog_json_text(self) -> str: @@ -242,14 +312,14 @@ def _singer_catalog(self) -> Catalog: """Return a Catalog object. Returns: - :class:`singer_sdk.helpers._singer.Catalog`. + :class:`singer_sdk._singerlib.Catalog`. """ return Catalog( (stream.tap_stream_id, stream._singer_catalog_entry) for stream in self.streams.values() ) - def discover_streams(self) -> List[Stream]: + def discover_streams(self) -> t.Sequence[Stream]: """Initialize all available streams and return them as a list. Return: @@ -259,13 +329,14 @@ def discover_streams(self) -> List[Stream]: NotImplementedError: If the tap implementation does not override this method. """ - raise NotImplementedError( - f"Tap '{self.name}' does not support discovery. " - "Please set the '--catalog' command line argument and try again." + msg = ( + f"Tap '{self.name}' does not support discovery. Please set the '--catalog' " + "command line argument and try again." ) + raise NotImplementedError(msg) @final - def load_streams(self) -> List[Stream]: + def load_streams(self) -> list[Stream]: """Load streams from discovery and initialize DAG. Return the output of `self.discover_streams()` to enumerate @@ -277,7 +348,7 @@ def load_streams(self) -> List[Stream]: # Build the parent-child dependency DAG # Index streams by type - streams_by_type: Dict[Type[Stream], List[Stream]] = {} + streams_by_type: dict[type[Stream], list[Stream]] = {} for stream in self.discover_streams(): stream_type = type(stream) if stream_type not in streams_by_type: @@ -292,7 +363,9 @@ def load_streams(self) -> List[Stream]: for stream in streams: parent.child_streams.append(stream) self.logger.info( - f"Added '{stream.name}' as child stream to '{parent.name}'" + "Added '%s' as child stream to '%s'", + stream.name, + parent.name, ) streams = [stream for streams in streams_by_type.values() for stream in streams] @@ -304,7 +377,7 @@ def load_streams(self) -> List[Stream]: # Bookmarks and state management - def load_state(self, state: Dict[str, Any]) -> None: + def load_state(self, state: dict[str, t.Any]) -> None: """Merge or initialize stream state with the provided state dictionary input. Override this method to perform validation and backwards-compatibility patches @@ -319,7 +392,8 @@ def load_state(self, state: Dict[str, Any]) -> None: initialized. """ if self.state is None: - raise ValueError("Cannot write to uninitialized state dictionary.") + msg = "Cannot write to uninitialized state dictionary." + raise ValueError(msg) for stream_name, stream_state in state.get("bookmarks", {}).items(): for key, val in stream_state.items(): @@ -334,7 +408,7 @@ def load_state(self, state: Dict[str, Any]) -> None: def _reset_state_progress_markers(self) -> None: """Clear prior jobs' progress markers at beginning of sync.""" - for _, state in self.state.get("bookmarks", {}).items(): + for state in self.state.get("bookmarks", {}).values(): _state.reset_state_progress_markers(state) for partition_state in state.get("partitions", []): _state.reset_state_progress_markers(partition_state) @@ -347,10 +421,13 @@ def _set_compatible_replication_methods(self) -> None: for descendent in stream.descendent_streams: if descendent.selected and descendent.ignore_parent_replication_key: self.logger.warning( - f"Stream descendent '{descendent.name}' is selected and " - f"its parent '{stream.name}' does not use inclusive " - f"replication keys. " - f"Forcing full table replication for '{stream.name}'." + "Stream descendent '%s' is selected and " + "its parent '%s' does not use inclusive " + "replication keys. " + "Forcing full table replication for '%s'.", + descendent.name, + stream.name, + stream.name, ) stream.replication_key = None stream.forced_replication_method = "FULL_TABLE" @@ -362,17 +439,21 @@ def sync_all(self) -> None: """Sync all streams.""" self._reset_state_progress_markers() self._set_compatible_replication_methods() - stream: "Stream" + write_message(StateMessage(value=self.state)) + + stream: Stream for stream in self.streams.values(): if not stream.selected and not stream.has_selected_descendents: - self.logger.info(f"Skipping deselected stream '{stream.name}'.") + self.logger.info("Skipping deselected stream '%s'.", stream.name) continue if stream.parent_stream_type: self.logger.debug( - f"Child stream '{type(stream).__name__}' is expected to be called " - f"by parent stream '{stream.parent_stream_type.__name__}'. " - "Skipping direct invocation." + "Child stream '%s' is expected to be called " + "by parent stream '%s'. " + "Skipping direct invocation.", + type(stream).__name__, + stream.parent_stream_type.__name__, ) continue @@ -386,164 +467,178 @@ def sync_all(self) -> None: # Command Line Execution - @classproperty - def cli(cls) -> Callable: - """Execute standard CLI handler for taps. + @classmethod + def invoke( # type: ignore[override] + cls: type[Tap], + *, + about: bool = False, + about_format: str | None = None, + config: tuple[str, ...] = (), + state: str | None = None, + catalog: str | None = None, + ) -> None: + """Invoke the tap's command line interface. - Returns: - A callable CLI object. + Args: + about: Display package metadata and settings. + about_format: Specify output style for `--about`. + config: Configuration file location or 'ENV' to use environment + variables. Accepts multiple inputs as a tuple. + catalog: Use a Singer catalog file with the tap.", + state: Use a bookmarks file for incremental replication. """ + super().invoke(about=about, about_format=about_format) + cls.print_version(print_fn=cls.logger.info) + config_files, parse_env_config = cls.config_from_cli_args(*config) - @common_options.PLUGIN_VERSION - @common_options.PLUGIN_ABOUT - @common_options.PLUGIN_ABOUT_FORMAT - @common_options.PLUGIN_CONFIG - @click.option( - "--discover", - is_flag=True, - help="Run the tap in discovery mode.", - ) - @click.option( - "--test", - is_flag=False, - flag_value=CliTestOptionValue.All.value, - default=CliTestOptionValue.Disabled, - help=( - "Use --test to sync a single record for each stream. " - + "Use --test=schema to test schema output without syncing " - + "records." - ), - ) - @click.option( - "--catalog", - help="Use a Singer catalog file with the tap.", - type=click.Path(), + tap = cls( + config=config_files, # type: ignore[arg-type] + state=state, + catalog=catalog, + parse_env_config=parse_env_config, + validate_config=True, ) - @click.option( - "--state", - help="Use a bookmarks file for incremental replication.", - type=click.Path(), + tap.sync_all() + + @classmethod + def cb_discover( + cls: type[Tap], + ctx: click.Context, + param: click.Option, # noqa: ARG003 + value: bool, # noqa: FBT001 + ) -> None: + """CLI callback to run the tap in discovery mode. + + Args: + ctx: Click context. + param: Click option. + value: Whether to run in discovery mode. + """ + if not value: + return + + config_args = ctx.params.get("config", ()) + config_files, parse_env_config = cls.config_from_cli_args(*config_args) + tap = cls( + config=config_files, # type: ignore[arg-type] + parse_env_config=parse_env_config, + validate_config=False, + setup_mapper=False, ) - @click.command( - help="Execute the Singer tap.", - context_settings={"help_option_names": ["--help"]}, + tap.run_discovery() + ctx.exit() + + @classmethod + def cb_test( + cls: type[Tap], + ctx: click.Context, + param: click.Option, # noqa: ARG003 + value: bool, # noqa: FBT001 + ) -> None: + """CLI callback to run the tap in test mode. + + Args: + ctx: Click context. + param: Click option. + value: Whether to run in test mode. + """ + if value == CliTestOptionValue.Disabled.value: + return + + config_args = ctx.params.get("config", ()) + config_files, parse_env_config = cls.config_from_cli_args(*config_args) + tap = cls( + config=config_files, # type: ignore[arg-type] + parse_env_config=parse_env_config, + validate_config=True, ) - def cli( - version: bool = False, - about: bool = False, - discover: bool = False, - test: CliTestOptionValue = CliTestOptionValue.Disabled, - config: Tuple[str, ...] = (), - state: str = None, - catalog: str = None, - format: str = None, - ) -> None: - """Handle command line execution. - - Args: - version: Display the package version. - about: Display package metadata and settings. - discover: Run the tap in discovery mode. - test: Test connectivity by syncing a single record and exiting. - format: Specify output style for `--about`. - config: Configuration file location or 'ENV' to use environment - variables. Accepts multiple inputs as a tuple. - catalog: Use a Singer catalog file with the tap.", - state: Use a bookmarks file for incremental replication. - - Raises: - FileNotFoundError: If the config file does not exist. - """ - if version: - cls.print_version() - return - - if not about: - cls.print_version(print_fn=cls.logger.info) - else: - cls.print_about(format=format) - return - - validate_config: bool = True - if discover: - # Don't abort on validation failures - validate_config = False - - parse_env_config = False - config_files: List[PurePath] = [] - for config_path in config: - if config_path == "ENV": - # Allow parse from env vars: - parse_env_config = True - continue - - # Validate config file paths before adding to list - if not Path(config_path).is_file(): - raise FileNotFoundError( - f"Could not locate config file at '{config_path}'." - "Please check that the file exists." - ) - config_files.append(Path(config_path)) + if value == CliTestOptionValue.Schema.value: + tap.write_schemas() + else: + tap.run_connection_test() - tap = cls( # type: ignore # Ignore 'type not callable' - config=config_files or None, - state=state, - catalog=catalog, - parse_env_config=parse_env_config, - validate_config=validate_config, - ) + ctx.exit() - if discover: - tap.run_discovery() - if test == CliTestOptionValue.All.value: - tap.run_connection_test() - elif test == CliTestOptionValue.All.value: - tap.run_connection_test() - elif test == CliTestOptionValue.Schema.value: - tap.write_schemas() - else: - tap.sync_all() + @classmethod + def get_singer_command(cls: type[Tap]) -> click.Command: + """Execute standard CLI handler for taps. - return cli + Returns: + A click.Command object. + """ + command = super().get_singer_command() + command.help = "Execute the Singer tap." + command.params.extend( + [ + click.Option( + ["--discover"], + is_flag=True, + help="Run the tap in discovery mode.", + callback=cls.cb_discover, + expose_value=False, + ), + click.Option( + ["--test"], + is_flag=False, + flag_value=CliTestOptionValue.All.value, + default=CliTestOptionValue.Disabled.value, + help=( + "Use --test to sync a single record for each stream. " + "Use --test=schema to test schema output without syncing " + "records." + ), + callback=cls.cb_test, + expose_value=False, + ), + click.Option( + ["--catalog"], + help="Use a Singer catalog file with the tap.", + type=click.Path(), + ), + click.Option( + ["--state"], + help="Use a bookmarks file for incremental replication.", + type=click.Path(), + ), + ], + ) + + return command class SQLTap(Tap): """A specialized Tap for extracting from SQL streams.""" # Stream class used to initialize new SQL streams from their catalog declarations. - default_stream_class: Type[SQLStream] + default_stream_class: type[SQLStream] - def __init__( - self, - config: Optional[Union[dict, PurePath, str, List[Union[PurePath, str]]]] = None, - catalog: Union[PurePath, str, dict, None] = None, - state: Union[PurePath, str, dict, None] = None, - parse_env_config: bool = False, - validate_config: bool = True, - ) -> None: + _tap_connector: SQLConnector | None = None + + def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: """Initialize the SQL tap. The SQLTap initializer additionally creates a cache variable for _catalog_dict. Args: - config: Tap configuration. Can be a dictionary, a single path to a - configuration file, or a list of paths to multiple configuration - files. - catalog: Tap catalog. Can be a dictionary or a path to the catalog file. - state: Tap state. Can be dictionary or a path to the state file. - parse_env_config: Whether to look for configuration values in environment - variables. - validate_config: True to require validation of config settings. + *args: Positional arguments for the Tap initializer. + **kwargs: Keyword arguments for the Tap initializer. """ - self._catalog_dict: Optional[dict] = None - super().__init__( - config=config, - catalog=catalog, - state=state, - parse_env_config=parse_env_config, - validate_config=validate_config, - ) + self._catalog_dict: dict | None = None + super().__init__(*args, **kwargs) + + @property + def tap_connector(self) -> SQLConnector: + """The connector object. + + Returns: + The connector object. + """ + if self._tap_connector is None: + self._tap_connector = self.default_stream_class.connector_class( + dict(self.config), + ) + return self._tap_connector @property def catalog_dict(self) -> dict: @@ -558,22 +653,25 @@ def catalog_dict(self) -> dict: if self.input_catalog: return self.input_catalog.to_dict() - connector = self.default_stream_class.connector_class(dict(self.config)) + connector = self.tap_connector - result: Dict[str, List[dict]] = {"streams": []} + result: dict[str, list[dict]] = {"streams": []} result["streams"].extend(connector.discover_catalog_entries()) self._catalog_dict = result return self._catalog_dict - def discover_streams(self) -> List[Stream]: + def discover_streams(self) -> list[Stream]: """Initialize all available streams and return them as a list. Returns: List of discovered Stream objects. """ - result: List[Stream] = [] - for catalog_entry in self.catalog_dict["streams"]: - result.append(self.default_stream_class(self, catalog_entry)) - - return result + return [ + self.default_stream_class( + tap=self, + catalog_entry=catalog_entry, + connector=self.tap_connector, + ) + for catalog_entry in self.catalog_dict["streams"] + ] diff --git a/singer_sdk/target_base.py b/singer_sdk/target_base.py index dad17f09c..a5386199f 100644 --- a/singer_sdk/target_base.py +++ b/singer_sdk/target_base.py @@ -1,26 +1,38 @@ """Target abstract class.""" +from __future__ import annotations + import abc import copy import json import sys import time -from io import FileIO -from pathlib import Path, PurePath -from typing import IO, Callable, Counter, Dict, List, Optional, Tuple, Type, Union +import typing as t import click from joblib import Parallel, delayed, parallel_backend -from singer_sdk.cli import common_options -from singer_sdk.exceptions import RecordsWitoutSchemaException +from singer_sdk.exceptions import RecordsWithoutSchemaException +from singer_sdk.helpers._batch import BaseBatchFileEncoding from singer_sdk.helpers._classproperty import classproperty from singer_sdk.helpers._compat import final -from singer_sdk.helpers.capabilities import CapabilitiesEnum, PluginCapabilities +from singer_sdk.helpers.capabilities import ( + ADD_RECORD_METADATA_CONFIG, + BATCH_CONFIG, + TARGET_SCHEMA_CONFIG, + CapabilitiesEnum, + PluginCapabilities, + TargetCapabilities, +) from singer_sdk.io_base import SingerMessageType, SingerReader -from singer_sdk.mapper import PluginMapper from singer_sdk.plugin_base import PluginBase -from singer_sdk.sinks import Sink + +if t.TYPE_CHECKING: + from pathlib import PurePath + + from singer_sdk.connectors import SQLConnector + from singer_sdk.mapper import PluginMapper + from singer_sdk.sinks import Sink, SQLSink _MAX_PARALLELISM = 8 @@ -35,17 +47,19 @@ class Target(PluginBase, SingerReader, metaclass=abc.ABCMeta): object for that record. """ - _MAX_RECORD_AGE_IN_MINUTES: float = 30.0 + _MAX_RECORD_AGE_IN_MINUTES: float = 5.0 # Default class to use for creating new sink objects. # Required if `Target.get_sink_class()` is not defined. - default_sink_class: Optional[Type[Sink]] = None + default_sink_class: type[Sink] def __init__( self, - config: Optional[Union[dict, PurePath, str, List[Union[PurePath, str]]]] = None, + *, + config: dict | PurePath | str | list[PurePath | str] | None = None, parse_env_config: bool = False, validate_config: bool = True, + setup_mapper: bool = True, ) -> None: """Initialize the target. @@ -56,6 +70,7 @@ def __init__( parse_env_config: Whether to look for configuration values in environment variables. validate_config: True to require validation of config settings. + setup_mapper: True to setup the mapper. Set to False if you want to """ super().__init__( config=config, @@ -63,24 +78,22 @@ def __init__( validate_config=validate_config, ) - self._latest_state: Dict[str, dict] = {} - self._drained_state: Dict[str, dict] = {} - self._sinks_active: Dict[str, Sink] = {} - self._sinks_to_clear: List[Sink] = [] - self._max_parallelism: Optional[int] = _MAX_PARALLELISM + self._latest_state: dict[str, dict] = {} + self._drained_state: dict[str, dict] = {} + self._sinks_active: dict[str, Sink] = {} + self._sinks_to_clear: list[Sink] = [] + self._max_parallelism: int | None = _MAX_PARALLELISM # Approximated for max record age enforcement self._last_full_drain_at: float = time.time() - # Initialize mapper - self.mapper: PluginMapper - self.mapper = PluginMapper( - plugin_config=dict(self.config), - logger=self.logger, - ) + self._mapper: PluginMapper | None = None + + if setup_mapper: + self.setup_mapper() @classproperty - def capabilities(self) -> List[CapabilitiesEnum]: + def capabilities(self) -> list[CapabilitiesEnum]: """Get target capabilities. Returns: @@ -121,9 +134,9 @@ def get_sink( self, stream_name: str, *, - record: Optional[dict] = None, - schema: Optional[dict] = None, - key_properties: Optional[List[str]] = None, + record: dict | None = None, + schema: dict | None = None, + key_properties: list[str] | None = None, ) -> Sink: """Return a sink for the given stream name. @@ -135,7 +148,7 @@ def get_sink( sink depending on the values within the `record` object. Otherwise, please see `default_sink_class` property and/or the `get_sink_class()` method. - Raises :class:`singer_sdk.exceptions.RecordsWitoutSchemaException` if sink does + Raises :class:`singer_sdk.exceptions.RecordsWithoutSchemaException` if sink does not exist and schema is not sent. Args: @@ -149,7 +162,6 @@ def get_sink( """ _ = record # Custom implementations may use record in sink selection. if schema is None: - self._assert_sink_exists(stream_name) return self._sinks_active[stream_name] existing_sink = self._sinks_active.get(stream_name, None) @@ -157,19 +169,21 @@ def get_sink( return self.add_sink(stream_name, schema, key_properties) if ( - existing_sink.schema != schema + existing_sink.original_schema != schema or existing_sink.key_properties != key_properties ): self.logger.info( - f"Schema or key properties for '{stream_name}' stream have changed. " - f"Initializing a new '{stream_name}' sink..." + "Schema or key properties for '%s' stream have changed. " + "Initializing a new '%s' sink...", + stream_name, + stream_name, ) self._sinks_to_clear.append(self._sinks_active.pop(stream_name)) return self.add_sink(stream_name, schema, key_properties) return existing_sink - def get_sink_class(self, stream_name: str) -> Type[Sink]: + def get_sink_class(self, stream_name: str) -> type[Sink]: """Get sink for a stream. Developers can override this method to return a custom Sink type depending @@ -187,10 +201,11 @@ def get_sink_class(self, stream_name: str) -> Type[Sink]: if self.default_sink_class: return self.default_sink_class - raise ValueError( - f"No sink class defined for '{stream_name}' " - "and no default sink class available." + msg = ( + f"No sink class defined for '{stream_name}' and no default sink class " + "available." ) + raise ValueError(msg) def sink_exists(self, stream_name: str) -> bool: """Check sink for a stream. @@ -207,7 +222,10 @@ def sink_exists(self, stream_name: str) -> bool: @final def add_sink( - self, stream_name: str, schema: dict, key_properties: Optional[List[str]] = None + self, + stream_name: str, + schema: dict, + key_properties: list[str] | None = None, ) -> Sink: """Create a sink and register it. @@ -221,35 +239,49 @@ def add_sink( Returns: A new sink for the stream. """ - self.logger.info(f"Initializing '{self.name}' target sink...") + self.logger.info("Initializing '%s' target sink...", self.name) sink_class = self.get_sink_class(stream_name=stream_name) - result = sink_class( + sink = sink_class( target=self, stream_name=stream_name, schema=schema, key_properties=key_properties, ) - self._sinks_active[stream_name] = result - return result + sink.setup() + self._sinks_active[stream_name] = sink + return sink def _assert_sink_exists(self, stream_name: str) -> None: - """Raise a RecordsWitoutSchemaException exception if stream doesn't exist. + """Raise a RecordsWithoutSchemaException exception if stream doesn't exist. Args: stream_name: TODO Raises: - RecordsWitoutSchemaException: If sink does not exist and schema is not sent. + RecordsWithoutSchemaException: If sink does not exist and schema + is not sent. """ if not self.sink_exists(stream_name): - raise RecordsWitoutSchemaException( + msg = ( f"A record for stream '{stream_name}' was encountered before a " - "corresponding schema." + "corresponding schema. Check that the Tap correctly implements " + "the Singer spec." ) + raise RecordsWithoutSchemaException(msg) # Message handling - def _process_lines(self, file_input: IO[str]) -> Counter[str]: + def _handle_max_record_age(self) -> None: + """Check if _MAX_RECORD_AGE_IN_MINUTES reached, and if so trigger drain.""" + if self._max_record_age_in_minutes > self._MAX_RECORD_AGE_IN_MINUTES: + self.logger.info( + "One or more records have exceeded the max age of %d minutes. " + "Draining all sinks.", + self._MAX_RECORD_AGE_IN_MINUTES, + ) + self.drain_all() + + def _process_lines(self, file_input: t.IO[str]) -> t.Counter[str]: """Internal method to process jsonl lines from a Singer tap. Args: @@ -258,15 +290,20 @@ def _process_lines(self, file_input: IO[str]) -> Counter[str]: Returns: A counter object for the processed lines. """ - self.logger.info(f"Target '{self.name}' is listening for input from tap.") + self.logger.info("Target '%s' is listening for input from tap.", self.name) counter = super()._process_lines(file_input) line_count = sum(counter.values()) self.logger.info( - f"Target '{self.name}' completed reading {line_count} lines of input " - f"({counter[SingerMessageType.RECORD]} records, " - f"{counter[SingerMessageType.STATE]} state messages)." + "Target '%s' completed reading %d lines of input " + "(%d schemas, %d records, %d batch manifests, %d state messages).", + self.name, + line_count, + counter[SingerMessageType.SCHEMA], + counter[SingerMessageType.RECORD], + counter[SingerMessageType.BATCH], + counter[SingerMessageType.STATE], ) return counter @@ -284,8 +321,9 @@ def _process_record_message(self, message_dict: dict) -> None: self._assert_line_requires(message_dict, requires={"stream", "record"}) stream_name = message_dict["stream"] + self._assert_sink_exists(stream_name) + for stream_map in self.mapper.stream_maps[stream_name]: - # new_schema = helpers._float_to_decimal(new_schema) raw_record = copy.copy(message_dict["record"]) transformed_record = stream_map.transform(raw_record) if transformed_record is None: @@ -296,24 +334,30 @@ def _process_record_message(self, message_dict: dict) -> None: context = sink._get_context(transformed_record) if sink.include_sdc_metadata_properties: sink._add_sdc_metadata_to_record( - transformed_record, message_dict, context + transformed_record, + message_dict, + context, ) else: sink._remove_sdc_metadata_from_record(transformed_record) sink._validate_and_parse(transformed_record) + transformed_record = sink.preprocess_record(transformed_record, context) + sink._singer_validate_message(transformed_record) sink.tally_record_read() - transformed_record = sink.preprocess_record(transformed_record, context) sink.process_record(transformed_record, context) sink._after_process_record(context) if sink.is_full: self.logger.info( - f"Target sink for '{sink.stream_name}' is full. Draining..." + "Target sink for '%s' is full. Draining...", + sink.stream_name, ) self.drain_one(sink) + self._handle_max_record_age() + def _process_schema_message(self, message_dict: dict) -> None: """Process a SCHEMA messages. @@ -321,6 +365,7 @@ def _process_schema_message(self, message_dict: dict) -> None: message_dict: The newly received schema message. """ self._assert_line_requires(message_dict, requires={"stream", "schema"}) + self._assert_line_requires(message_dict["schema"], requires={"properties"}) stream_name = message_dict["stream"] schema = message_dict["schema"] @@ -330,23 +375,25 @@ def _process_schema_message(self, message_dict: dict) -> None: do_registration = True elif self.mapper.stream_maps[stream_name][0].raw_schema != schema: self.logger.info( - f"Schema has changed for stream '{stream_name}'. " - "Mapping definitions will be reset." + "Schema has changed for stream '%s'. " + "Mapping definitions will be reset.", + stream_name, ) do_registration = True elif ( self.mapper.stream_maps[stream_name][0].raw_key_properties != key_properties ): self.logger.info( - f"Key properties have changed for stream '{stream_name}'. " - "Mapping definitions will be reset." + "Key properties have changed for stream '%s'. " + "Mapping definitions will be reset.", + stream_name, ) do_registration = True if not do_registration: self.logger.debug( - f"No changes detected in SCHEMA message for stream '{stream_name}'. " - "Ignoring." + "No changes detected in SCHEMA message for stream '%s'. Ignoring.", + stream_name, ) return @@ -356,7 +403,6 @@ def _process_schema_message(self, message_dict: dict) -> None: key_properties, ) for stream_map in self.mapper.stream_maps[stream_name]: - # new_schema = helpers._float_to_decimal(new_schema) _ = self.get_sink( stream_map.stream_alias, schema=stream_map.transformed_schema, @@ -383,12 +429,6 @@ def _process_state_message(self, message_dict: dict) -> None: if self._latest_state == state: return self._latest_state = state - if self._max_record_age_in_minutes > self._MAX_RECORD_AGE_IN_MINUTES: - self.logger.info( - "One or more records have exceeded the max age of " - f"{self._MAX_RECORD_AGE_IN_MINUTES} minutes. Draining all sinks." - ) - self.drain_all() def _process_activate_version_message(self, message_dict: dict) -> None: """Handle the optional ACTIVATE_VERSION message extension. @@ -400,10 +440,25 @@ def _process_activate_version_message(self, message_dict: dict) -> None: sink = self.get_sink(stream_name) sink.activate_version(message_dict["version"]) + def _process_batch_message(self, message_dict: dict) -> None: + """Handle the optional BATCH message extension. + + Args: + message_dict: TODO + """ + sink = self.get_sink(message_dict["stream"]) + + encoding = BaseBatchFileEncoding.from_dict(message_dict["encoding"]) + sink.process_batch_files( + encoding, + message_dict["manifest"], + ) + self._handle_max_record_age() + # Sink drain methods @final - def drain_all(self, is_endofpipe: bool = False) -> None: + def drain_all(self, *, is_endofpipe: bool = False) -> None: """Drains all sinks, starting with those cleared due to changed schema. This method is internal to the SDK and should not need to be overridden. @@ -443,7 +498,7 @@ def drain_one(self, sink: Sink) -> None: sink.process_batch(draining_status) sink.mark_drained() - def _drain_all(self, sink_list: List[Sink], parallelism: int) -> None: + def _drain_all(self, sink_list: list[Sink], parallelism: int) -> None: if parallelism == 1: for sink in sink_list: self.drain_one(sink) @@ -462,93 +517,268 @@ def _write_state_message(self, state: dict) -> None: state: TODO """ state_json = json.dumps(state) - self.logger.info(f"Emitting completed target state {state_json}") + self.logger.info("Emitting completed target state %s", state_json) sys.stdout.write(f"{state_json}\n") sys.stdout.flush() # CLI handler - @classproperty - def cli(cls) -> Callable: + @classmethod + def invoke( # type: ignore[override] + cls: type[Target], + *, + about: bool = False, + about_format: str | None = None, + config: tuple[str, ...] = (), + file_input: t.IO[str] | None = None, + ) -> None: + """Invoke the target. + + Args: + about: Display package metadata and settings. + about_format: Specify output style for `--about`. + config: Configuration file location or 'ENV' to use environment + variables. Accepts multiple inputs as a tuple. + file_input: Optional file to read input from. + """ + super().invoke(about=about, about_format=about_format) + cls.print_version(print_fn=cls.logger.info) + config_files, parse_env_config = cls.config_from_cli_args(*config) + + target = cls( + config=config_files, # type: ignore[arg-type] + validate_config=True, + parse_env_config=parse_env_config, + ) + target.listen(file_input) + + @classmethod + def get_singer_command(cls: type[Target]) -> click.Command: """Execute standard CLI handler for taps. Returns: - A callable CLI object. + A click.Command object. """ - - @common_options.PLUGIN_VERSION - @common_options.PLUGIN_ABOUT - @common_options.PLUGIN_ABOUT_FORMAT - @common_options.PLUGIN_CONFIG - @common_options.PLUGIN_FILE_INPUT - @click.command( - help="Execute the Singer target.", - context_settings={"help_option_names": ["--help"]}, + command = super().get_singer_command() + command.help = "Execute the Singer target." + command.params.extend( + [ + click.Option( + ["--input", "file_input"], + help="A path to read messages from instead of from standard in.", + type=click.File("r"), + ), + ], ) - def cli( - version: bool = False, - about: bool = False, - config: Tuple[str, ...] = (), - format: str = None, - file_input: FileIO = None, - ) -> None: - """Handle command line execution. - - Args: - version: Display the package version. - about: Display package metadata and settings. - format: Specify output style for `--about`. - config: Configuration file location or 'ENV' to use environment - variables. Accepts multiple inputs as a tuple. - file_input: Specify a path to an input file to read messages from. - Defaults to standard in if unspecified. - - Raises: - FileNotFoundError: If the config file does not exist. - """ - if version: - cls.print_version() - return - - if not about: - cls.print_version(print_fn=cls.logger.info) - else: - cls.print_about(format=format) - return - - validate_config: bool = True - - cls.print_version(print_fn=cls.logger.info) - - parse_env_config = False - config_files: List[PurePath] = [] - for config_path in config: - if config_path == "ENV": - # Allow parse from env vars: - parse_env_config = True - continue - - # Validate config file paths before adding to list - if not Path(config_path).is_file(): - raise FileNotFoundError( - f"Could not locate config file at '{config_path}'." - "Please check that the file exists." - ) - - config_files.append(Path(config_path)) - - target = cls( # type: ignore # Ignore 'type not callable' - config=config_files or None, - parse_env_config=parse_env_config, - validate_config=validate_config, - ) - target.listen(file_input) + return command + + @classmethod + def append_builtin_config(cls: type[Target], config_jsonschema: dict) -> None: + """Appends built-in config to `config_jsonschema` if not already set. + + To customize or disable this behavior, developers may either override this class + method or override the `capabilities` property to disabled any unwanted + built-in capabilities. - return cli + For all except very advanced use cases, we recommend leaving these + implementations "as-is", since this provides the most choice to users and is + the most "future proof" in terms of taking advantage of built-in capabilities + which may be added in the future. + + Args: + config_jsonschema: [description] + """ + + def _merge_missing(source_jsonschema: dict, target_jsonschema: dict) -> None: + # Append any missing properties in the target with those from source. + for k, v in source_jsonschema["properties"].items(): + if k not in target_jsonschema["properties"]: + target_jsonschema["properties"][k] = v + + _merge_missing(ADD_RECORD_METADATA_CONFIG, config_jsonschema) + + capabilities = cls.capabilities + + if PluginCapabilities.BATCH in capabilities: + _merge_missing(BATCH_CONFIG, config_jsonschema) + + super().append_builtin_config(config_jsonschema) + + pass class SQLTarget(Target): """Target implementation for SQL destinations.""" + _target_connector: SQLConnector | None = None + + default_sink_class: type[SQLSink] + + @property + def target_connector(self) -> SQLConnector: + """The connector object. + + Returns: + The connector object. + """ + if self._target_connector is None: + self._target_connector = self.default_sink_class.connector_class( + dict(self.config), + ) + return self._target_connector + + @classproperty + def capabilities(self) -> list[CapabilitiesEnum]: + """Get target capabilities. + + Returns: + A list of capabilities supported by this target. + """ + sql_target_capabilities: list[CapabilitiesEnum] = super().capabilities + sql_target_capabilities.extend([TargetCapabilities.TARGET_SCHEMA]) + + return sql_target_capabilities + + @classmethod + def append_builtin_config(cls: type[SQLTarget], config_jsonschema: dict) -> None: + """Appends built-in config to `config_jsonschema` if not already set. + + To customize or disable this behavior, developers may either override this class + method or override the `capabilities` property to disabled any unwanted + built-in capabilities. + + For all except very advanced use cases, we recommend leaving these + implementations "as-is", since this provides the most choice to users and is + the most "future proof" in terms of taking advantage of built-in capabilities + which may be added in the future. + + Args: + config_jsonschema: [description] + """ + + def _merge_missing(source_jsonschema: dict, target_jsonschema: dict) -> None: + # Append any missing properties in the target with those from source. + for k, v in source_jsonschema["properties"].items(): + if k not in target_jsonschema["properties"]: + target_jsonschema["properties"][k] = v + + capabilities = cls.capabilities + + if TargetCapabilities.TARGET_SCHEMA in capabilities: + _merge_missing(TARGET_SCHEMA_CONFIG, config_jsonschema) + + super().append_builtin_config(config_jsonschema) + pass + + @final + def add_sqlsink( + self, + stream_name: str, + schema: dict, + key_properties: list[str] | None = None, + ) -> Sink: + """Create a sink and register it. + + This method is internal to the SDK and should not need to be overridden. + + Args: + stream_name: Name of the stream. + schema: Schema of the stream. + key_properties: Primary key of the stream. + + Returns: + A new sink for the stream. + """ + self.logger.info("Initializing '%s' target sink...", self.name) + sink_class = self.get_sink_class(stream_name=stream_name) + sink = sink_class( + target=self, + stream_name=stream_name, + schema=schema, + key_properties=key_properties, + connector=self.target_connector, + ) + sink.setup() + self._sinks_active[stream_name] = sink + + return sink + + def get_sink_class(self, stream_name: str) -> type[SQLSink]: + """Get sink for a stream. + + Developers can override this method to return a custom Sink type depending + on the value of `stream_name`. Optional when `default_sink_class` is set. + + Args: + stream_name: Name of the stream. + + Raises: + ValueError: If no :class:`singer_sdk.sinks.Sink` class is defined. + + Returns: + The sink class to be used with the stream. + """ + if self.default_sink_class: + return self.default_sink_class + + msg = ( + f"No sink class defined for '{stream_name}' and no default sink class " + "available." + ) + raise ValueError(msg) + + def get_sink( + self, + stream_name: str, + *, + record: dict | None = None, + schema: dict | None = None, + key_properties: list[str] | None = None, + ) -> Sink: + """Return a sink for the given stream name. + + A new sink will be created if `schema` is provided and if either `schema` or + `key_properties` has changed. If so, the old sink becomes archived and held + until the next drain_all() operation. + + Developers only need to override this method if they want to provide a different + sink depending on the values within the `record` object. Otherwise, please see + `default_sink_class` property and/or the `get_sink_class()` method. + + Raises :class:`singer_sdk.exceptions.RecordsWithoutSchemaException` if sink does + not exist and schema is not sent. + + Args: + stream_name: Name of the stream. + record: Record being processed. + schema: Stream schema. + key_properties: Primary key of the stream. + + Returns: + The sink used for this target. + """ + _ = record # Custom implementations may use record in sink selection. + if schema is None: + self._assert_sink_exists(stream_name) + return self._sinks_active[stream_name] + + existing_sink = self._sinks_active.get(stream_name, None) + if not existing_sink: + return self.add_sqlsink(stream_name, schema, key_properties) + + if ( + existing_sink.schema != schema + or existing_sink.key_properties != key_properties + ): + self.logger.info( + "Schema or key properties for '%s' stream have changed. " + "Initializing a new '%s' sink...", + stream_name, + stream_name, + ) + self._sinks_to_clear.append(self._sinks_active.pop(stream_name)) + return self.add_sqlsink(stream_name, schema, key_properties) + + return existing_sink diff --git a/singer_sdk/testing/__init__.py b/singer_sdk/testing/__init__.py new file mode 100644 index 000000000..83ca9aacc --- /dev/null +++ b/singer_sdk/testing/__init__.py @@ -0,0 +1,64 @@ +"""Tools and standard tests for Tap/Target implementations.""" + +from __future__ import annotations + +import typing as t +import warnings + +from .config import SuiteConfig +from .factory import get_tap_test_class, get_target_test_class +from .legacy import ( + _get_tap_catalog, + _select_all, + sync_end_to_end, + tap_sync_test, + tap_to_target_sync_test, + target_sync_test, +) +from .runners import SingerTestRunner, TapTestRunner, TargetTestRunner + + +def __getattr__(name: str) -> t.Any: # noqa: ANN401 + if name == "get_standard_tap_tests": + warnings.warn( + "The function singer_sdk.testing.get_standard_tap_tests is deprecated " + "and will be removed in a future release. Use get_tap_test_class instead.", + DeprecationWarning, + stacklevel=2, + ) + + from .legacy import get_standard_tap_tests + + return get_standard_tap_tests + + if name == "get_standard_target_tests": + warnings.warn( + "The function singer_sdk.testing.get_standard_target_tests is deprecated " + "and will be removed in a future release. Use get_target_test_class " + "instead.", + DeprecationWarning, + stacklevel=2, + ) + + from .legacy import get_standard_target_tests + + return get_standard_target_tests + + msg = f"module {__name__} has no attribute {name}" + raise AttributeError(msg) + + +__all__ = [ + "get_tap_test_class", + "get_target_test_class", + "_get_tap_catalog", + "_select_all", + "sync_end_to_end", + "tap_sync_test", + "tap_to_target_sync_test", + "target_sync_test", + "SingerTestRunner", + "TapTestRunner", + "TargetTestRunner", + "SuiteConfig", +] diff --git a/singer_sdk/testing/config.py b/singer_sdk/testing/config.py new file mode 100644 index 000000000..ebdc81ff7 --- /dev/null +++ b/singer_sdk/testing/config.py @@ -0,0 +1,22 @@ +"""Test config classes.""" + +from __future__ import annotations + +from dataclasses import dataclass, field + + +@dataclass +class SuiteConfig: + """Test Suite Config, passed to each test. + + Args: + max_records_limit: Max records to fetch during tap testing. + ignore_no_records: Ignore stream test failures if stream returns no records, + for all streams. + ignore_no_records_for_streams: Ignore stream test failures if stream returns + no records, for named streams. + """ + + max_records_limit: int | None = None + ignore_no_records: bool = False + ignore_no_records_for_streams: list[str] = field(default_factory=list) diff --git a/singer_sdk/testing/factory.py b/singer_sdk/testing/factory.py new file mode 100644 index 000000000..1c15f4844 --- /dev/null +++ b/singer_sdk/testing/factory.py @@ -0,0 +1,431 @@ +"""Test Class Factory.""" +from __future__ import annotations + +import typing as t + +import pytest + +from .config import SuiteConfig +from .runners import TapTestRunner, TargetTestRunner +from .suites import ( + tap_stream_attribute_tests, + tap_stream_tests, + tap_tests, + target_tests, +) + +if t.TYPE_CHECKING: + from singer_sdk import Tap, Target + + +class BaseTestClass: + """Base test class.""" + + params: dict[str, t.Any] + param_ids: dict[str, list[str]] + + def __init_subclass__(cls, **kwargs: t.Any) -> None: + """Initialize a subclass. + + Args: + **kwargs: Keyword arguments. + """ + # Add empty params and param_ids attributes to a direct subclass but not to + # subclasses of subclasses + if cls.__base__ == BaseTestClass: + cls.params = {} + cls.param_ids = {} + + +class TapTestClassFactory: + """Factory for Tap Test Classes.""" + + def __init__( + self, + tap_class: type[Tap], + *, + config: dict | None = None, + ): + """Initialize TapTestClassFactory. + + Args: + tap_class: Tap class to be tested. + config: Tap configuration for testing. + """ + self.tap_class = tap_class + self.config = config + + def new_test_class( + self, + *, + include_tap_tests: bool = True, + include_stream_tests: bool = True, + include_stream_attribute_tests: bool = True, + custom_suites: list | None = None, + suite_config: SuiteConfig | None = None, + **kwargs: t.Any, + ) -> type[BaseTestClass]: + """Get a new test class. + + Args: + include_tap_tests: Include tap tests in the test class. + include_stream_tests: Include stream tests in the test class. + include_stream_attribute_tests: + Include stream attribute tests in the test class. + custom_suites: List of custom test suites to include in the test class. + suite_config: SuiteConfig instance to be used when instantiating tests. + kwargs: Default arguments to be passed to tap on create. + + Returns: + A new test class. + """ + # compile test suites + suites = custom_suites or [] + if include_tap_tests: + suites.append(tap_tests) + if include_stream_tests: + suites.append(tap_stream_tests) + if include_stream_attribute_tests: + suites.append(tap_stream_attribute_tests) + + # set default values + if "parse_env_config" not in kwargs: + kwargs["parse_env_config"] = True + + # create singleton test runner + test_runner = TapTestRunner( + tap_class=self.tap_class, + config=self.config, + suite_config=suite_config, + **kwargs, + ) + + empty_test_class = self._get_empty_test_class( + test_runner=test_runner, + suite_config=suite_config, + ) + return self._annotate_test_class( + empty_test_class=empty_test_class, + test_suites=suites, + test_runner=test_runner, + ) + + def _get_empty_test_class( + self, + test_runner: TapTestRunner, + suite_config: SuiteConfig | None, + ) -> type[BaseTestClass]: + """Get an empty test class. + + Args: + test_runner: Test runner to be used in the test class. + suite_config: SuiteConfig instance to be used when instantiating tests. + + Returns: + An empty test class. + """ + + class TapTestClass(BaseTestClass): + """Tap Test Class.""" + + @pytest.fixture + def config(self) -> SuiteConfig: + return suite_config or SuiteConfig() + + @pytest.fixture + def resource(self) -> t.Any: # noqa: ANN401, PT004 + yield # noqa: PT022 + + @pytest.fixture(scope="class") + def runner(self) -> TapTestRunner | TargetTestRunner: + # Populate runner class with cached records for use in tests + test_runner.sync_all() + return test_runner + + return TapTestClass + + def _annotate_test_class( # noqa: C901 + self, + empty_test_class: type[BaseTestClass], + test_suites: list, + test_runner: TapTestRunner, + ) -> type[BaseTestClass]: + """Annotate test class with test methods. + + Args: + empty_test_class: Empty test class to be annotated. + test_suites: List of test suites to include in the test class. + test_runner: Test runner to be used in the test class. + + Returns: + An annotated test class. + """ + for suite in test_suites: + if suite.kind == "tap": + for test_class in suite.tests: + test = test_class() + test_name = f"test_{suite.kind}_{test.name}" + setattr(empty_test_class, test_name, test.run) + + if suite.kind in {"tap_stream", "tap_stream_attribute"}: + streams = list(test_runner.new_tap().streams.values()) + + if suite.kind == "tap_stream": + params = [ + { + "stream": stream, + } + for stream in streams + ] + param_ids = [stream.name for stream in streams] + + for test_class in suite.tests: + test = test_class() + test_name = f"test_{suite.kind}_{test.name}" + setattr( + empty_test_class, + test_name, + test.run, + ) + empty_test_class.params[test_name] = params + empty_test_class.param_ids[test_name] = param_ids + + if suite.kind == "tap_stream_attribute": + for test_class in suite.tests: + test = test_class() + test_name = f"test_{suite.kind}_{test.name}" + test_params = [] + test_ids: list[str] = [] + for stream in streams: + test_params.extend( + [ + { + "stream": stream, + "attribute_name": property_name, + } + for property_name, property_schema in stream.schema[ + "properties" + ].items() + if test_class.evaluate( + stream=stream, + property_name=property_name, + property_schema=property_schema, + ) + ], + ) + test_ids.extend( + [ + f"{stream.name}.{property_name}" + for property_name, property_schema in stream.schema[ + "properties" + ].items() + if test_class.evaluate( + stream=stream, + property_name=property_name, + property_schema=property_schema, + ) + ], + ) + + if test_params: + setattr( + empty_test_class, + test_name, + test.run, + ) + empty_test_class.params[test_name] = test_params + empty_test_class.param_ids[test_name] = test_ids + + return empty_test_class + + +class TargetTestClassFactory: + """Factory for Target Test Classes.""" + + def __init__(self, target_class: type[Target], *, config: dict | None = None): + """Initialize TargetTestClassFactory. + + Args: + target_class: Target class to be tested. + config: Config to be used when instantiating tests. + """ + self.target_class = target_class + self.config = config + + def new_test_class( + self, + *, + custom_suites: list | None = None, + suite_config: SuiteConfig | None = None, + include_target_tests: bool = True, + **kwargs: t.Any, + ) -> type[BaseTestClass]: + """Get a new Target test class. + + Args: + custom_suites: List of custom test suites to include in the test class. + suite_config: SuiteConfig instance to be used when instantiating tests. + include_target_tests: Whether to include target tests in the test class. + kwargs: Keyword arguments to be passed to the Target on run. + + Returns: + A new Target test class. + """ + # compile test suites + suites = custom_suites or [] + if include_target_tests: + suites.append(target_tests) + + # set default values + if "parse_env_config" not in kwargs: + kwargs["parse_env_config"] = True + + empty_test_class = self._get_empty_test_class( + target_class=self.target_class, + config=self.config, + suite_config=suite_config, + **kwargs, + ) + return self._annotate_test_class( + empty_test_class=empty_test_class, + test_suites=suites, + ) + + def _get_empty_test_class( + self, + target_class: type[Target], + suite_config: SuiteConfig | None, + config: dict | None = None, + **kwargs: t.Any, + ) -> type[BaseTestClass]: + """Get an empty test class. + + Args: + target_class: Target class to be tested. + suite_config: SuiteConfig instance to be used when instantiating tests. + config: Config to be used when instantiating tests. + kwargs: Keyword arguments to be passed to the Target on run. + + Returns: + An empty test class. + """ + + class TargetTestClass(BaseTestClass): + """Target Test Class.""" + + @pytest.fixture + def config(self) -> SuiteConfig: + return suite_config or SuiteConfig() + + @pytest.fixture + def resource(self) -> t.Any: # noqa: ANN401, PT004 + yield # noqa: PT022 + + @pytest.fixture + def runner(self) -> TargetTestRunner: + # Instantiate new runner class and populate records for use in tests + return TargetTestRunner( + target_class=target_class, + config=config, + suite_config=suite_config, + **kwargs, + ) + + return TargetTestClass + + def _annotate_test_class( + self, + empty_test_class: type[BaseTestClass], + test_suites: list, + ) -> type[BaseTestClass]: + """Annotate test class with test methods. + + Args: + empty_test_class: Empty test class to be annotated. + test_suites: List of test suites to be included in the test class. + + Returns: + Annotated test class. + """ + for suite in test_suites: + if suite.kind == "target": + for test_class in suite.tests: + test = test_class() + test_name = f"test_{suite.kind}_{test.name}" + setattr(empty_test_class, test_name, test.run) + + return empty_test_class + + +def get_tap_test_class( + tap_class: type[Tap], + *, + config: dict | None = None, + include_tap_tests: bool = True, + include_stream_tests: bool = True, + include_stream_attribute_tests: bool = True, + custom_suites: list | None = None, + suite_config: SuiteConfig | None = None, + **kwargs: t.Any, +) -> type[BaseTestClass]: + """Get Tap Test Class. + + Args: + tap_class: Meltano Singer SDK Tap class to test. + config: Config dict to use for testing. + include_tap_tests: Include tap tests. + include_stream_tests: Include Tap stream tests. + include_stream_attribute_tests: Include Tap stream attribute tests. + custom_suites: Custom test suites to add to standard tests. + suite_config: SuiteConfig instance to pass to tests. + kwargs: Keyword arguments to pass to the TapRunner. + + Returns: + A test class usable by pytest. + """ + factory = TapTestClassFactory( + tap_class=tap_class, + config=config, + ) + return factory.new_test_class( + custom_suites=custom_suites, + suite_config=suite_config, + include_tap_tests=include_tap_tests, + include_stream_tests=include_stream_tests, + include_stream_attribute_tests=include_stream_attribute_tests, + **kwargs, + ) + + +def get_target_test_class( + target_class: type[Target], + *, + config: dict | None = None, + custom_suites: list | None = None, + suite_config: SuiteConfig | None = None, + include_target_tests: bool = True, + **kwargs: t.Any, +) -> type[BaseTestClass]: + """Get Target Test Class. + + Args: + target_class: Meltano Singer SDK Target class to test. + config: Config dict to use for testing. + custom_suites: Custom test suites to add to standard tests. + suite_config: SuiteConfig instance to pass to tests. + include_target_tests: Include standard target tests. + kwargs: Keyword arguments to pass to the TapRunner. + + Returns: + A test class usable by pytest. + """ + factory = TargetTestClassFactory( + target_class=target_class, + config=config, + ) + return factory.new_test_class( + custom_suites=custom_suites, + suite_config=suite_config, + include_target_tests=include_target_tests, + **kwargs, + ) diff --git a/singer_sdk/testing.py b/singer_sdk/testing/legacy.py similarity index 61% rename from singer_sdk/testing.py rename to singer_sdk/testing/legacy.py index d2fa94d91..5baa94034 100644 --- a/singer_sdk/testing.py +++ b/singer_sdk/testing/legacy.py @@ -1,16 +1,23 @@ """Pre-built test functions which can be applied to multiple taps.""" +from __future__ import annotations + import io +import typing as t from contextlib import redirect_stderr, redirect_stdout -from typing import Callable, List, Optional, Tuple, Type, cast -from singer_sdk.helpers import _singer -from singer_sdk.mapper_base import InlineMapper -from singer_sdk.tap_base import Tap -from singer_sdk.target_base import Target +import singer_sdk._singerlib as singer + +if t.TYPE_CHECKING: + from singer_sdk.mapper_base import InlineMapper + from singer_sdk.tap_base import Tap + from singer_sdk.target_base import Target -def get_standard_tap_tests(tap_class: Type[Tap], config: dict = None) -> List[Callable]: +def get_standard_tap_tests( # noqa: C901 + tap_class: type[Tap], + config: dict | None = None, +) -> list[t.Callable]: """Return callable pytest which executes simple discovery and connection tests. Args: @@ -27,7 +34,7 @@ def _test_cli_prints() -> None: # Test CLI prints tap1.print_version() tap1.print_about() - tap1.print_about(format="json") + tap1.print_about(output_format="json") def _test_discovery() -> None: catalog1 = _get_tap_catalog(tap_class, config or {}) @@ -40,13 +47,58 @@ def _test_stream_connections() -> None: tap1: Tap = tap_class(config=config, parse_env_config=True) tap1.run_connection_test() - return [_test_cli_prints, _test_discovery, _test_stream_connections] + def _test_pkeys_in_schema() -> None: + """Verify that primary keys are actually in the stream's schema.""" + tap = tap_class(config=config, parse_env_config=True) + for name, stream in tap.streams.items(): + pkeys = stream.primary_keys or [] + schema_props = set(stream.schema["properties"].keys()) + for pkey in pkeys: + error_message = ( + f"Coding error in stream '{name}': " + f"primary_key '{pkey}' is missing in schema" + ) + assert pkey in schema_props, error_message + + def _test_state_partitioning_keys_in_schema() -> None: + """Verify that state partitioning keys are actually in the stream's schema.""" + tap = tap_class(config=config, parse_env_config=True) + for name, stream in tap.streams.items(): + sp_keys = stream.state_partitioning_keys or [] + schema_props = set(stream.schema["properties"].keys()) + for sp_key in sp_keys: + assert sp_key in schema_props, ( + f"Coding error in stream '{name}': state_partitioning_key " + f"'{sp_key}' is missing in schema" + ) + + def _test_replication_keys_in_schema() -> None: + """Verify that the replication key is actually in the stream's schema.""" + tap = tap_class(config=config, parse_env_config=True) + for name, stream in tap.streams.items(): + rep_key = stream.replication_key + if rep_key is None: + continue + schema_props = set(stream.schema["properties"].keys()) + assert rep_key in schema_props, ( + f"Coding error in stream '{name}': replication_key " + f"'{rep_key}' is missing in schema" + ) + + return [ + _test_cli_prints, + _test_discovery, + _test_stream_connections, + _test_pkeys_in_schema, + _test_state_partitioning_keys_in_schema, + _test_replication_keys_in_schema, + ] def get_standard_target_tests( - target_class: Type[Target], - config: dict = None, -) -> List[Callable]: + target_class: type[Target], # noqa: ARG001 + config: dict | None = None, # noqa: ARG001 +) -> list[t.Callable]: """Return callable pytest which executes simple discovery and connection tests. Args: @@ -59,7 +111,7 @@ def get_standard_target_tests( return [] -def tap_sync_test(tap: Tap) -> Tuple[io.StringIO, io.StringIO]: +def tap_sync_test(tap: Tap) -> tuple[io.StringIO, io.StringIO]: """Invokes a Tap object and return STDOUT and STDERR results in StringIO buffers. Args: @@ -78,7 +130,10 @@ def tap_sync_test(tap: Tap) -> Tuple[io.StringIO, io.StringIO]: def _get_tap_catalog( - tap_class: Type[Tap], config: dict, select_all: bool = False + tap_class: type[Tap], + config: dict, + *, + select_all: bool = False, ) -> dict: """Return a catalog dict by running discovery. @@ -110,16 +165,19 @@ def _select_all(catalog_dict: dict) -> dict: Returns: dict: [description] """ - catalog = _singer.Catalog.from_dict(catalog_dict) + catalog = singer.Catalog.from_dict(catalog_dict) for catalog_entry in catalog.streams: catalog_entry.metadata.root.selected = True - return cast(dict, catalog.to_dict()) + return t.cast(dict, catalog.to_dict()) def target_sync_test( - target: Target, input: Optional[io.StringIO], finalize: bool = True -) -> Tuple[io.StringIO, io.StringIO]: + target: Target, + input: io.StringIO | None, # noqa: A002 + *, + finalize: bool = True, +) -> tuple[io.StringIO, io.StringIO]: """Invoke the target with the provided input. Args: @@ -146,8 +204,9 @@ def target_sync_test( def tap_to_target_sync_test( - tap: Tap, target: Target -) -> Tuple[io.StringIO, io.StringIO, io.StringIO, io.StringIO]: + tap: Tap, + target: Target, +) -> tuple[io.StringIO, io.StringIO, io.StringIO, io.StringIO]: """Test and end-to-end sink from the tap to the target. Note: This method buffers all output from the tap in memory and should not be diff --git a/singer_sdk/testing/pytest_plugin.py b/singer_sdk/testing/pytest_plugin.py new file mode 100644 index 000000000..aaaa83703 --- /dev/null +++ b/singer_sdk/testing/pytest_plugin.py @@ -0,0 +1,35 @@ +"""Pytest Plugin.""" + +from __future__ import annotations + +import pytest + + +def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: + """Pytest Hook, responsible for parameterizing tests. + + Called once per each test function, this hook will check if the function name is + registered in the parent classes 'params' dict, and if so will parameterize + the given test function with the values therein. + + Args: + metafunc: Pytest MetaFunc instance, representing a test function or method. + """ + if metafunc.cls and hasattr(metafunc.cls, "params"): + func_arg_list = metafunc.cls.params.get(metafunc.definition.name) + func_arg_ids = ( + metafunc.cls.param_ids.get(metafunc.definition.name) + if hasattr(metafunc.cls, "param_ids") + else None + ) + if func_arg_list: + arg_names = list(func_arg_list[0].keys()) + parameters = [ + pytest.param(*tuple(func_args[name] for name in arg_names)) + for func_args in func_arg_list + ] + metafunc.parametrize( + ",".join(arg_names), + parameters, + ids=func_arg_ids, + ) diff --git a/singer_sdk/testing/runners.py b/singer_sdk/testing/runners.py new file mode 100644 index 000000000..71f294335 --- /dev/null +++ b/singer_sdk/testing/runners.py @@ -0,0 +1,306 @@ +"""Utility object for running taps/targets, capturing sync output during testing.""" + +from __future__ import annotations + +import abc +import io +import json +import typing as t +from collections import defaultdict +from contextlib import redirect_stderr, redirect_stdout +from pathlib import Path + +from singer_sdk import Tap, Target +from singer_sdk.testing.config import SuiteConfig + + +class SingerTestRunner(metaclass=abc.ABCMeta): + """Base Singer Test Runner.""" + + def __init__( + self, + singer_class: type[Tap] | type[Target], + config: dict | None = None, + suite_config: SuiteConfig | None = None, + **kwargs: t.Any, + ) -> None: + """Initialize the test runner object. + + Args: + singer_class (type[PluginBase]): Singer class to be tested. + config (dict): Tap/Target configuration for testing. + suite_config (SuiteConfig): SuiteConfig instance to be used when + instantiating tests. + kwargs (dict): Default arguments to be passed to tap/target on create. + """ + self.singer_class = singer_class + self.config = config or {} + self.default_kwargs = kwargs + self.suite_config = suite_config or SuiteConfig() + + self.raw_messages: list[dict] = [] + self.schema_messages: list[dict] = [] + self.record_messages: list[dict] = [] + self.state_messages: list[dict] = [] + self.records: defaultdict = defaultdict(list) + + @staticmethod + def _clean_sync_output(raw_records: str) -> list[dict]: + """Clean sync output. + + Args: + raw_records: String containing raw messages. + + Returns: + A list of raw messages in dict form. + """ + lines = raw_records.strip().split("\n") + return [json.loads(ii) for ii in lines if ii] + + def create(self, kwargs: dict | None = None) -> Tap | Target: + """Create a new tap/target from the runner defaults. + + Args: + kwargs (dict, optional): [description]. Defaults to None. + + Returns: + An instantiated Tap or Target. + """ + if not kwargs: + kwargs = self.default_kwargs + return self.singer_class(config=self.config, **kwargs) + + @abc.abstractmethod + def sync_all(self, **kwargs: t.Any) -> None: + """Sync all records. + + Args: + kwargs: Keyword arguments. + """ + + +class TapTestRunner(SingerTestRunner): + """Utility class to simplify tap testing.""" + + def __init__( + self, + tap_class: type[Tap], + config: dict | None = None, + suite_config: SuiteConfig | None = None, + **kwargs: t.Any, + ) -> None: + """Initialize Tap instance. + + Args: + tap_class: Tap class to run. + config: Config dict to pass to Tap class. + suite_config (SuiteConfig): SuiteConfig instance to be used when + instantiating tests. + kwargs: Default arguments to be passed to tap on create. + """ + super().__init__( + singer_class=tap_class, + config=config or {}, + suite_config=suite_config, + **kwargs, + ) + + def new_tap(self) -> Tap: + """Get new Tap instance. + + Returns: + A configured Tap instance. + """ + return t.cast(Tap, self.create()) + + def run_discovery(self) -> str: + """Run tap discovery. + + Returns: + The catalog as a string. + """ + return self.new_tap().run_discovery() + + def run_connection_test(self) -> bool: + """Run tap connection test. + + Returns: + True if connection test passes, else False. + """ + new_tap = self.new_tap() + return new_tap.run_connection_test() + + def run_sync_dry_run(self) -> bool: + """Run tap sync dry run. + + Returns: + True if dry run test passes, else False. + """ + new_tap = self.new_tap() + dry_run_record_limit = None + if self.suite_config.max_records_limit is not None: + dry_run_record_limit = self.suite_config.max_records_limit + + return new_tap.run_sync_dry_run(dry_run_record_limit=dry_run_record_limit) + + def sync_all(self, **kwargs: t.Any) -> None: # noqa: ARG002 + """Run a full tap sync, assigning output to the runner object. + + Args: + kwargs: Unused keyword arguments. + """ + stdout, stderr = self._execute_sync() + messages = self._clean_sync_output(stdout) + self._parse_records(messages) + + def _parse_records(self, messages: list[dict]) -> None: + """Save raw and parsed messages onto the runner object. + + Args: + messages: A list of messages in dict form. + """ + self.raw_messages = messages + for message in messages: + if message: + if message["type"] == "STATE": + self.state_messages.append(message) + continue + if message["type"] == "SCHEMA": + self.schema_messages.append(message) + continue + if message["type"] == "RECORD": + stream_name = message["stream"] + self.record_messages.append(message) + self.records[stream_name].append(message["record"]) + continue + + def _execute_sync(self) -> tuple[str, str]: + """Invoke a Tap object and return STDOUT and STDERR results in StringIO buffers. + + Returns: + A 2-item tuple with StringIO buffers from the Tap's output: (stdout, stderr) + """ + stdout_buf = io.StringIO() + stderr_buf = io.StringIO() + with redirect_stdout(stdout_buf), redirect_stderr(stderr_buf): + self.run_sync_dry_run() + stdout_buf.seek(0) + stderr_buf.seek(0) + return stdout_buf.read(), stderr_buf.read() + + +class TargetTestRunner(SingerTestRunner): + """Utility class to simplify target testing.""" + + def __init__( + self, + target_class: type[Target], + config: dict | None = None, + suite_config: SuiteConfig | None = None, + input_filepath: Path | None = None, + input_io: io.StringIO | None = None, + **kwargs: t.Any, + ) -> None: + """Initialize TargetTestRunner. + + Args: + target_class: Target Class to instantiate. + config: Config to pass to instantiated Target. + suite_config: Config to pass to tests. + input_filepath: (optional) Path to a singer file containing records, to pass + to the Target during testing. + input_io: (optional) StringIO containing raw records to pass to the Target + during testing. + kwargs: Default arguments to be passed to tap/target on create. + """ + super().__init__( + singer_class=target_class, + config=config or {}, + suite_config=suite_config, + **kwargs, + ) + self.input_filepath = input_filepath + self.input_io = input_io + self._input: t.IO[str] | None = None + + def new_target(self) -> Target: + """Get new Target instance. + + Returns: + A configured Target instance. + """ + return t.cast(Target, self.create()) + + @property + def target_input(self) -> t.IO[str]: + """Input messages to pass to Target. + + Returns: + A list of raw input messages in string form. + """ + if self._input is None: + if self.input_io: + self._input = self.input_io + elif self.input_filepath: + self._input = Path(self.input_filepath).open( # noqa: SIM115 + encoding="utf8", + ) + return t.cast(t.IO[str], self._input) + + @target_input.setter + def target_input(self, value: t.IO[str]) -> None: + self._input = value + + def sync_all( + self, + *, + finalize: bool = True, + **kwargs: t.Any, # noqa: ARG002 + ) -> None: + """Run a full tap sync, assigning output to the runner object. + + Args: + finalize: True to process as the end of stream as a completion signal; + False to keep the sink operation open for further records. + kwargs: Unused keyword arguments. + """ + target = self.new_target() + stdout, stderr = self._execute_sync( + target=target, + target_input=self.target_input, + finalize=finalize, + ) + self.stdout, self.stderr = (stdout.read(), stderr.read()) + self.state_messages.extend(self._clean_sync_output(self.stdout)) + + def _execute_sync( + self, + target: Target, + target_input: t.IO[str], + *, + finalize: bool = True, + ) -> tuple[io.StringIO, io.StringIO]: + """Invoke the target with the provided input. + + Args: + target: Target to sync. + target_input: The input to process as if from STDIN. + finalize: True to process as the end of stream as a completion signal; + False to keep the sink operation open for further records. + + Returns: + A 2-item tuple with StringIO buffers from the Target's output: + (stdout, stderr) + """ + stdout_buf = io.StringIO() + stderr_buf = io.StringIO() + + with redirect_stdout(stdout_buf), redirect_stderr(stderr_buf): + if target_input is not None: + target._process_lines(target_input) + if finalize: + target._process_endofpipe() + + stdout_buf.seek(0) + stderr_buf.seek(0) + return stdout_buf, stderr_buf diff --git a/singer_sdk/testing/suites.py b/singer_sdk/testing/suites.py new file mode 100644 index 000000000..d795cf153 --- /dev/null +++ b/singer_sdk/testing/suites.py @@ -0,0 +1,112 @@ +"""Standard Tap and Target test suites.""" + +from __future__ import annotations + +import typing as t +from dataclasses import dataclass + +from .tap_tests import ( + AttributeIsBooleanTest, + AttributeIsDateTimeTest, + AttributeIsIntegerTest, + AttributeIsNumberTest, + AttributeIsObjectTest, + AttributeNotNullTest, + StreamCatalogSchemaMatchesRecordTest, + StreamPrimaryKeysTest, + StreamRecordMatchesStreamSchema, + StreamRecordSchemaMatchesCatalogTest, + StreamReturnsRecordTest, + TapCLIPrintsTest, + TapDiscoveryTest, + TapStreamConnectionTest, + TapValidFinalStateTest, +) + +# TODO: add TargetMultipleStateMessages +# TODO: fix behavior in SDK to make this pass +from .target_tests import ( + TargetArrayData, + TargetCamelcaseComplexSchema, + TargetCamelcaseTest, + TargetCliPrintsTest, + TargetDuplicateRecords, + TargetEncodedStringData, + TargetInvalidSchemaTest, + TargetNoPrimaryKeys, + TargetOptionalAttributes, + TargetRecordBeforeSchemaTest, + TargetRecordMissingKeyProperty, + TargetRecordMissingOptionalFields, + TargetSchemaNoProperties, + TargetSchemaUpdates, + TargetSpecialCharsInAttributes, +) + +if t.TYPE_CHECKING: + from .templates import TapTestTemplate, TargetTestTemplate, TestTemplate + + +@dataclass +class TestSuite: + """Test Suite container class.""" + + kind: str + tests: list[type[TestTemplate] | type[TapTestTemplate] | type[TargetTestTemplate]] + + +# Tap Test Suites +tap_tests = TestSuite( + kind="tap", + tests=[ + TapCLIPrintsTest, + TapDiscoveryTest, + TapStreamConnectionTest, + TapValidFinalStateTest, + ], +) +tap_stream_tests = TestSuite( + kind="tap_stream", + tests=[ + StreamCatalogSchemaMatchesRecordTest, + StreamRecordMatchesStreamSchema, + StreamRecordSchemaMatchesCatalogTest, + StreamReturnsRecordTest, + StreamPrimaryKeysTest, + ], +) +tap_stream_attribute_tests = TestSuite( + kind="tap_stream_attribute", + tests=[ + AttributeIsBooleanTest, + AttributeIsDateTimeTest, + AttributeIsIntegerTest, + AttributeIsNumberTest, + AttributeIsObjectTest, + AttributeNotNullTest, + ], +) + + +# Target Test Suites +target_tests = TestSuite( + kind="target", + tests=[ + TargetArrayData, + TargetCamelcaseComplexSchema, + TargetCamelcaseTest, + TargetCliPrintsTest, + TargetDuplicateRecords, + TargetEncodedStringData, + TargetInvalidSchemaTest, + # TargetMultipleStateMessages, + TargetNoPrimaryKeys, + TargetOptionalAttributes, + TargetRecordBeforeSchemaTest, + TargetRecordMissingKeyProperty, + TargetRecordMissingOptionalFields, + TargetSchemaNoProperties, + TargetSchemaUpdates, + TargetSpecialCharsInAttributes, + ], +) diff --git a/singer_sdk/testing/tap_tests.py b/singer_sdk/testing/tap_tests.py new file mode 100644 index 000000000..ecb7eb811 --- /dev/null +++ b/singer_sdk/testing/tap_tests.py @@ -0,0 +1,376 @@ +"""Standard Tap Tests.""" + +from __future__ import annotations + +import typing as t +import warnings + +from dateutil import parser +from jsonschema import Draft7Validator + +import singer_sdk.helpers._typing as th +from singer_sdk import Tap + +from .templates import AttributeTestTemplate, StreamTestTemplate, TapTestTemplate + +if t.TYPE_CHECKING: + from singer_sdk.streams.core import Stream + + +class TapCLIPrintsTest(TapTestTemplate): + """Test that the tap is able to print standard metadata.""" + + name = "cli_prints" + + def test(self) -> None: + """Run test.""" + self.tap.print_version() + self.tap.print_about() + self.tap.print_about(output_format="json") + + +class TapDiscoveryTest(TapTestTemplate): + """Test that discovery mode generates a valid tap catalog.""" + + name = "discovery" + + def test(self) -> None: + """Run test.""" + tap1 = self.tap + tap1.run_discovery() + catalog = tap1.catalog_dict + # Reset and re-initialize with discovered catalog + kwargs = {k: v for k, v in self.runner.default_kwargs.items() if k != "catalog"} + tap2: Tap = t.cast(t.Type[Tap], self.runner.singer_class)( + config=self.runner.config, + catalog=catalog, + **kwargs, + ) + assert tap2 + + +class TapStreamConnectionTest(TapTestTemplate): + """Test that the tap can connect to each stream.""" + + name = "stream_connections" + + def test(self) -> None: + """Run test.""" + self.tap.run_connection_test() + + +class TapValidFinalStateTest(TapTestTemplate): + """Test that the final state is a valid catalog.""" + + name = "valid_final_state" + message = "Final state has in-progress markers." + + def test(self) -> None: + """Run test.""" + final_state = self.runner.state_messages[-1] + assert "progress_markers" not in final_state, self.message + + +class StreamReturnsRecordTest(StreamTestTemplate): + """Test that a stream sync returns at least 1 record.""" + + name = "returns_record" + + def test(self) -> None: + """Run test.""" + no_records_message = f"No records returned in stream '{self.stream.name}'." + if ( + self.config.ignore_no_records + or self.stream.name in self.config.ignore_no_records_for_streams + ): + # only warn if this or all streams are set to ignore no records + warnings.warn(UserWarning(no_records_message), stacklevel=2) + else: + record_count = len(self.stream_records) + assert record_count > 0, no_records_message + + +class StreamCatalogSchemaMatchesRecordTest(StreamTestTemplate): + """Test all attributes in the catalog schema are present in the record schema.""" + + name = "catalog_schema_matches_record" + + def test(self) -> None: + """Run test.""" + stream_catalog_keys = set(self.stream.schema["properties"].keys()) + stream_record_keys = set().union(*(d.keys() for d in self.stream_records)) + diff = stream_catalog_keys - stream_record_keys + if diff: + warnings.warn( + UserWarning(f"Fields in catalog but not in records: ({diff})"), + stacklevel=2, + ) + + +class StreamRecordSchemaMatchesCatalogTest(StreamTestTemplate): + """Test all attributes in the record schema are present in the catalog schema.""" + + name = "record_schema_matches_catalog" + + def test(self) -> None: + """Run test.""" + stream_catalog_keys = set(self.stream.schema["properties"].keys()) + stream_record_keys = set().union(*(d.keys() for d in self.stream_records)) + diff = stream_record_keys - stream_catalog_keys + assert not diff, f"Fields in records but not in catalog: ({diff})" + + +class StreamRecordMatchesStreamSchema(StreamTestTemplate): + """Test all attributes in the record schema are present in the catalog schema.""" + + name = "record_matches_stream_schema" + + def test(self) -> None: + """Run test.""" + schema = self.stream.schema + validator = Draft7Validator( + schema, + format_checker=Draft7Validator.FORMAT_CHECKER, + ) + for record in self.stream_records: + errors = list(validator.iter_errors(record)) + error_messages = "\n".join( + [ + f"{e.message} (path: {'.'.join(str(p) for p in e.path)})" + for e in errors + if e.path + ], + ) + assert not errors, f"Record does not match stream schema: {error_messages}" + + +class StreamPrimaryKeysTest(StreamTestTemplate): + """Test all records for a stream's primary key are unique and non-null.""" + + name = "primary_keys" + + def test(self) -> None: + """Run test. + + Raises: + AssertionError: if record is missing primary key. + """ + primary_keys = self.stream.primary_keys + try: + record_ids = [ + (r[k] for k in primary_keys or []) for r in self.stream_records + ] + except KeyError as e: + msg = f"Record missing primary key: {e!s}" + raise AssertionError(msg) from e + count_unique_records = len(set(record_ids)) + count_records = len(self.stream_records) + assert count_unique_records == count_records, ( + f"Length of set of records IDs ({count_unique_records})" + f" is not equal to number of records ({count_records})." + ) + assert all( + all(k is not None for k in pk) for pk in record_ids + ), "Primary keys contain some key values that are null." + + +class AttributeIsDateTimeTest(AttributeTestTemplate): + """Test a given attribute contains unique values (ignores null values).""" + + name = "is_datetime" + + def test(self) -> None: + """Run test. + + Raises: + AssertionError: if value cannot be parsed as a datetime. + """ + try: + for v in self.non_null_attribute_values: + error_message = f"Unable to parse value ('{v}') with datetime parser." + assert parser.parse(v), error_message + except parser.ParserError as e: + raise AssertionError(error_message) from e + + @classmethod + def evaluate( + cls, + stream: Stream, # noqa: ARG003 + property_name: str, # noqa: ARG003 + property_schema: dict, + ) -> bool: + """Determine if this attribute test is applicable to the given property. + + Args: + stream: Parent Stream of given attribute. + property_name: Name of given attribute. + property_schema: JSON Schema of given property, in dict form. + + Returns: + True if this test is applicable, False if not. + """ + return bool(th.is_date_or_datetime_type(property_schema)) + + +class AttributeIsBooleanTest(AttributeTestTemplate): + """Test an attribute is of boolean datatype (or can be cast to it).""" + + name = "is_boolean" + + def test(self) -> None: + """Run test.""" + for v in self.non_null_attribute_values: + assert isinstance(v, bool) or str(v).lower() in { + "true", + "false", + }, f"Unable to cast value ('{v}') to boolean type." + + @classmethod + def evaluate( + cls, + stream: Stream, # noqa: ARG003 + property_name: str, # noqa: ARG003 + property_schema: dict, + ) -> bool: + """Determine if this attribute test is applicable to the given property. + + Args: + stream: Parent Stream of given attribute. + property_name: Name of given attribute. + property_schema: JSON Schema of given property, in dict form. + + Returns: + True if this test is applicable, False if not. + """ + return bool(th.is_boolean_type(property_schema)) + + +class AttributeIsObjectTest(AttributeTestTemplate): + """Test that a given attribute is an object type.""" + + name = "is_object" + + def test(self) -> None: + """Run test.""" + for v in self.non_null_attribute_values: + assert isinstance(v, dict), f"Unable to cast value ('{v}') to dict type." + + @classmethod + def evaluate( + cls, + stream: Stream, # noqa: ARG003 + property_name: str, # noqa: ARG003 + property_schema: dict, + ) -> bool: + """Determine if this attribute test is applicable to the given property. + + Args: + stream: Parent Stream of given attribute. + property_name: Name of given attribute. + property_schema: JSON Schema of given property, in dict form. + + Returns: + True if this test is applicable, False if not. + """ + return bool(th.is_object_type(property_schema)) + + +class AttributeIsIntegerTest(AttributeTestTemplate): + """Test that a given attribute can be converted to an integer type.""" + + name = "is_integer" + + def test(self) -> None: + """Run test.""" + for v in self.non_null_attribute_values: + assert isinstance(v, int) or isinstance( + int(v), + int, + ), f"Unable to cast value ('{v}') to int type." + + @classmethod + def evaluate( + cls, + stream: Stream, # noqa: ARG003 + property_name: str, # noqa: ARG003 + property_schema: dict, + ) -> bool: + """Determine if this attribute test is applicable to the given property. + + Args: + stream: Parent Stream of given attribute. + property_name: Name of given attribute. + property_schema: JSON Schema of given property, in dict form. + + Returns: + True if this test is applicable, False if not. + """ + return bool(th.is_integer_type(property_schema)) + + +class AttributeIsNumberTest(AttributeTestTemplate): + """Test that a given attribute can be converted to a floating point number type.""" + + name = "is_numeric" + + def test(self) -> None: + """Run test. + + Raises: + AssertionError: if value cannot be cast to float type. + """ + for v in self.non_null_attribute_values: + error_message = f"Unable to cast value ('{v}') to float type." + if not isinstance(v, (float, int)): + raise AssertionError(error_message) + + @classmethod + def evaluate( + cls, + stream: Stream, # noqa: ARG003 + property_name: str, # noqa: ARG003 + property_schema: dict, + ) -> bool: + """Determine if this attribute test is applicable to the given property. + + Args: + stream: Parent Stream of given attribute. + property_name: Name of given attribute. + property_schema: JSON Schema of given property, in dict form. + + Returns: + True if this test is applicable, False if not. + """ + return bool(th.is_number_type(property_schema)) + + +class AttributeNotNullTest(AttributeTestTemplate): + """Test that a given attribute does not contain any null values.""" + + name = "not_null" + + def test(self) -> None: + """Run test.""" + for r in self.stream_records: + assert ( + r.get(self.attribute_name) is not None + ), f"Detected null values for attribute ('{self.attribute_name}')." + + @classmethod + def evaluate( + cls, + stream: Stream, # noqa: ARG003 + property_name: str, # noqa: ARG003 + property_schema: dict, + ) -> bool: + """Determine if this attribute test is applicable to the given property. + + Args: + stream: Parent Stream of given attribute. + property_name: Name of given attribute. + property_schema: JSON Schema of given property, in dict form. + + Returns: + True if this test is applicable, False if not. + """ + return not bool(th.is_null_type(property_schema)) diff --git a/singer_sdk/testing/target_test_streams/__init__.py b/singer_sdk/testing/target_test_streams/__init__.py new file mode 100644 index 000000000..14d313288 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/__init__.py @@ -0,0 +1 @@ +"""Singer output samples, used for testing target behavior.""" diff --git a/singer_sdk/testing/target_test_streams/array_data.singer b/singer_sdk/testing/target_test_streams/array_data.singer new file mode 100644 index 000000000..dab02cb9a --- /dev/null +++ b/singer_sdk/testing/target_test_streams/array_data.singer @@ -0,0 +1,6 @@ +{"type": "SCHEMA", "stream": "test_array_data", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "fruits": {"type": "array","items": {"type": "string"}}}}} +{"type": "RECORD", "stream": "test_array_data", "record": {"id": 1, "fruits": [ "apple", "orange", "pear" ]}} +{"type": "RECORD", "stream": "test_array_data", "record": {"id": 2, "fruits": [ "banana", "apple" ]}} +{"type": "RECORD", "stream": "test_array_data", "record": {"id": 3, "fruits": [ "pear" ]}} +{"type": "RECORD", "stream": "test_array_data", "record": {"id": 4, "fruits": [ "orange", "banana", "apple", "pear" ]}} +{"type": "STATE", "value": {"test_array_data": 4}} diff --git a/singer_sdk/testing/target_test_streams/camelcase.singer b/singer_sdk/testing/target_test_streams/camelcase.singer new file mode 100644 index 000000000..356526973 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/camelcase.singer @@ -0,0 +1,3 @@ +{"type": "SCHEMA", "stream": "TestCamelcase", "schema": {"type": "object", "properties": { "Id": {"type": "string"}, "clientName": {"type": "string"} }}, "key_properties": ["Id"]} +{"type": "RECORD", "stream": "TestCamelcase", "record": {"Id": "1", "clientName": "Gitter Windows Desktop App"}} +{"type": "RECORD", "stream": "TestCamelcase", "record": {"Id": "2", "clientName": "Gitter iOS App"}} diff --git a/singer_sdk/testing/target_test_streams/camelcase_complex_schema.singer b/singer_sdk/testing/target_test_streams/camelcase_complex_schema.singer new file mode 100644 index 000000000..03330c724 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/camelcase_complex_schema.singer @@ -0,0 +1,2 @@ +{"type": "SCHEMA", "stream": "ForecastingTypeToCategory", "schema": {"properties": {"Id": {"type": "string"}, "IsDeleted": {"type": ["null", "boolean"]}, "CreatedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "CreatedById": {"type": ["null", "string"]}, "LastModifiedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "LastModifiedById": {"type": ["null", "string"]}, "SystemModstamp": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "ForecastingTypeId": {"type": ["null", "string"]}, "ForecastingItemCategory": {"type": ["null", "string"]}, "DisplayPosition": {"type": ["null", "integer"]}, "IsAdjustable": {"type": ["null", "boolean"]}, "IsOwnerAdjustable": {"type": ["null", "boolean"]}}, "type": "object", "additionalProperties": false}, "key_properties": ["Id"]} +{"type": "SCHEMA", "stream": "ForecastingTypeToCategory", "schema": {"properties": {"Id": {"type": "string"}, "IsDeleted": {"type": ["null", "boolean"]}, "CreatedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "CreatedById": {"type": ["null", "string"]}, "LastModifiedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "LastModifiedById": {"type": ["null", "string"]}, "SystemModstamp": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "ForecastingTypeId": {"type": ["null", "string"]}, "ForecastingItemCategory": {"type": ["null", "string"]}, "DisplayPosition": {"type": ["null", "integer"]}, "IsAdjustable": {"type": ["null", "boolean"]}, "IsOwnerAdjustable": {"type": ["null", "boolean"]}, "age": {"type": "integer"}, "NewCamelCasedAttribute": {"type": "string"}}, "type": "object", "additionalProperties": false}, "key_properties": ["Id"]} diff --git a/singer_sdk/testing/target_test_streams/duplicate_records.singer b/singer_sdk/testing/target_test_streams/duplicate_records.singer new file mode 100644 index 000000000..b5b9480d5 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/duplicate_records.singer @@ -0,0 +1,7 @@ +{"type": "SCHEMA", "stream": "test_duplicate_records", "key_properties": ["id"], "schema": {"required": ["id", "metric"], "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_duplicate_records", "record": {"id": 1, "metric": 1}} +{"type": "RECORD", "stream": "test_duplicate_records", "record": {"id": 2, "metric": 2}} +{"type": "RECORD", "stream": "test_duplicate_records", "record": {"id": 1, "metric": 10}} +{"type": "RECORD", "stream": "test_duplicate_records", "record": {"id": 2, "metric": 20}} +{"type": "RECORD", "stream": "test_duplicate_records", "record": {"id": 1, "metric": 100}} +{"type": "STATE", "value": {"test_duplicate_records": 2}} diff --git a/singer_sdk/testing/target_test_streams/encoded_string_data.singer b/singer_sdk/testing/target_test_streams/encoded_string_data.singer new file mode 100644 index 000000000..80c9063a4 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/encoded_string_data.singer @@ -0,0 +1,32 @@ +{"type": "SCHEMA", "stream": "test_strings", "key_properties": ["id"], "schema": {"required": ["id", "info"], "type": "object", "properties": {"id": {"type": "integer"}, "info": {"type": "string"}}}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 1, "info": "simple string 2837"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 2, "info": "απλή συμβολοσειρά"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 3, "info": "简单的字串"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 4, "info": "chaîne simple"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 5, "info": "quoted \"string\""}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 6, "info": "various \" \\ \/ \n escape sequences"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 7, "info": "\u006D"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 8, "info": "\u0101"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 9, "info": "\u0199"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 10, "info": "Double quoting: \\u0000 \\u0041 \\u0001"}} +{"type": "RECORD", "stream": "test_strings", "record": {"id": 11, "info": "Control Characters in string: \u0000 \u0041 \u0001"}} +{"type": "SCHEMA", "stream": "test_strings_in_objects", "key_properties": ["id"], "schema": {"required": ["id", "info"], "type": "object", "properties": {"id": {"type": "integer"}, "info": {"required": ["name"], "type": "object", "properties": {"name": {"type": "string"}, "value": {"type": "string"}}}}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 1, "info": {"name": "simple", "value": "simple string 2837"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 2, "info": {"name": "greek", "value": "απλή συμβολοσειρά"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 3, "info": {"name": "chinese", "value": "简单的字串"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 4, "info": {"name": "french", "value": "chaîne simple"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 5, "info": {"name": "quoted string", "value": "quoted \"string\""}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 6, "info": {"name": "escape sequences", "value": "various \" \\ \/ \n escape sequences"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 7, "info": {"name": "unicode", "value": "\u006D"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 8, "info": {"name": "unicode", "value": "\u0101"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 9, "info": {"name": "unicode", "value": "\u0199"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 10, "info": {"name": "Double quoting", "value": " \\u0000 \\u0041 \\u0001"}}} +{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 11, "info": {"name": "Control Characters in string", "value": "\u0000 \u0041 \u0001"}}} +{"type": "SCHEMA", "stream": "test_strings_in_arrays", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "strings": {"type": "array", "items": {"type": "string"}}}}} +{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 1, "strings": ["simple string", "απλή συμβολοσειρά", "简单的字串"]}} +{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 2, "strings": ["chaîne simple", "quoted \"string\""]}} +{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 3, "strings": ["various \" \\ \/ \n escape sequences"]}} +{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 4, "strings": ["\u006D", "\u0101", "\u0199"]}} +{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 5, "strings": ["aaa", "Double quoting: \\u0000 \\u0041 \\u0001"]}} +{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 6, "strings": ["bbb", "Control Characters in string: \u0000 \u0041 \u0001"]}} +{"type": "STATE", "value": {"test_strings": 11, "test_strings_in_objects": 11, "test_strings_in_arrays": 6}} diff --git a/singer_sdk/testing/target_test_streams/invalid_schema.singer b/singer_sdk/testing/target_test_streams/invalid_schema.singer new file mode 100644 index 000000000..861f2ec09 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/invalid_schema.singer @@ -0,0 +1 @@ +{"type": "SCHEMA", "stream": "test_invalid_schema", "schema": {"type": "object"}, "key_properties": []} diff --git a/singer_sdk/testing/target_test_streams/multiple_state_messages.singer b/singer_sdk/testing/target_test_streams/multiple_state_messages.singer new file mode 100644 index 000000000..bb7eee1ba --- /dev/null +++ b/singer_sdk/testing/target_test_streams/multiple_state_messages.singer @@ -0,0 +1,19 @@ +{"type": "SCHEMA", "stream": "test_multiple_state_messages_a", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "SCHEMA", "stream": "test_multiple_state_messages_b", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 1, "metric": 100}} +{"type": "STATE", "value": {"test_multiple_state_messages_a": 1, "test_multiple_state_messages_b": 0}} +{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 2, "metric": 200}} +{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 1, "metric": 110}} +{"type": "STATE", "value": {"test_multiple_state_messages_a": 2, "test_multiple_state_messages_b": 1}} +{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 3, "metric": 300}} +{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 2, "metric": 220}} +{"type": "STATE", "value": {"test_multiple_state_messages_a": 3, "test_multiple_state_messages_b": 2}} +{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 4, "metric": 400}} +{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 5, "metric": 500}} +{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 3, "metric": 330}} +{"type": "STATE", "value": {"test_multiple_state_messages_a": 5, "test_multiple_state_messages_b": 3}} +{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 4, "metric": 440}} +{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 5, "metric": 550}} +{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 6, "metric": 660}} +{"type": "STATE", "value": {"test_multiple_state_messages_a": 5, "test_multiple_state_messages_b": 6}} +{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 6, "metric": 600}} diff --git a/singer_sdk/testing/target_test_streams/no_primary_keys.singer b/singer_sdk/testing/target_test_streams/no_primary_keys.singer new file mode 100644 index 000000000..0be2953de --- /dev/null +++ b/singer_sdk/testing/target_test_streams/no_primary_keys.singer @@ -0,0 +1,5 @@ +{"type": "SCHEMA", "stream": "test_no_pk", "key_properties": [], "schema": { "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 1, "metric": 11}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 2, "metric": 22}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 3, "metric": 33}} +{"type": "STATE", "value": {"test_no_pk": 3}} diff --git a/singer_sdk/testing/target_test_streams/no_primary_keys_append.singer b/singer_sdk/testing/target_test_streams/no_primary_keys_append.singer new file mode 100644 index 000000000..45ec1d9ea --- /dev/null +++ b/singer_sdk/testing/target_test_streams/no_primary_keys_append.singer @@ -0,0 +1,7 @@ +{"type": "SCHEMA", "stream": "test_no_pk", "key_properties": [], "schema": { "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 1, "metric": 101}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 2, "metric": 202}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 3, "metric": 303}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 4, "metric": 404}} +{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 5, "metric": 505}} +{"type": "STATE", "value": {"test_no_pk": 5}} diff --git a/singer_sdk/testing/target_test_streams/optional_attributes.singer b/singer_sdk/testing/target_test_streams/optional_attributes.singer new file mode 100644 index 000000000..777491a24 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/optional_attributes.singer @@ -0,0 +1,6 @@ +{"type": "SCHEMA", "stream": "test_optional_attributes", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "optional": {"type": "string"}}}} +{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 1, "optional": "This is optional"}} +{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 2}} +{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 3, "optional": "Also optional"}} +{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 4}} +{"type": "STATE", "value": {"test_optional_attributes": 4}} diff --git a/singer_sdk/testing/target_test_streams/record_before_schema.singer b/singer_sdk/testing/target_test_streams/record_before_schema.singer new file mode 100644 index 000000000..b27ab501c --- /dev/null +++ b/singer_sdk/testing/target_test_streams/record_before_schema.singer @@ -0,0 +1,3 @@ +{"type": "RECORD", "stream": "test_record_before_schema", "record": {"id": 1, "metric": 6719}} +{"type": "SCHEMA", "stream": "test_record_before_schema", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_record_before_schema", "record": {"id": 2, "metric": 3728}} diff --git a/singer_sdk/testing/target_test_streams/record_missing_fields.singer b/singer_sdk/testing/target_test_streams/record_missing_fields.singer new file mode 100644 index 000000000..a398f6bd6 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/record_missing_fields.singer @@ -0,0 +1,4 @@ +{"type": "SCHEMA", "stream": "record_missing_fields", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "optional": {"type": "string"}}, "required": ["id"]}} +{"type": "RECORD", "stream": "record_missing_fields", "record": {"id": 1, "optional": "now you see me"}} +{"type": "RECORD", "stream": "record_missing_fields", "record": {"id": 2}} +{"type": "STATE", "value": {}} diff --git a/singer_sdk/testing/target_test_streams/record_missing_key_property.singer b/singer_sdk/testing/target_test_streams/record_missing_key_property.singer new file mode 100644 index 000000000..7aba5b8c5 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/record_missing_key_property.singer @@ -0,0 +1,2 @@ +{"type": "SCHEMA", "stream": "test_record_missing_key_property", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_record_missing_key_property", "record": {"metric": 8214}} diff --git a/singer_sdk/testing/target_test_streams/record_missing_required_property.singer b/singer_sdk/testing/target_test_streams/record_missing_required_property.singer new file mode 100644 index 000000000..e28b6e193 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/record_missing_required_property.singer @@ -0,0 +1,2 @@ +{"type": "SCHEMA", "stream": "test_record_missing_required_property", "key_properties": [], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_record_missing_required_property", "record": {"metric": 3215}} diff --git a/singer_sdk/testing/target_test_streams/schema_no_properties.singer b/singer_sdk/testing/target_test_streams/schema_no_properties.singer new file mode 100644 index 000000000..99db9e944 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/schema_no_properties.singer @@ -0,0 +1,6 @@ +{"type": "SCHEMA", "stream": "test_object_schema_with_properties", "key_properties": [], "schema": {"type": "object", "properties": { "object_store": {"type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}}}} +{"type": "RECORD", "stream": "test_object_schema_with_properties", "record": {"object_store": {"id": 1, "metric": 187}}} +{"type": "RECORD", "stream": "test_object_schema_with_properties", "record": {"object_store": {"id": 2, "metric": 203}}} +{"type": "SCHEMA", "stream": "test_object_schema_no_properties", "key_properties": [], "schema": {"type": "object", "properties": { "object_store": {"type": "object"}}}} +{"type": "RECORD", "stream": "test_object_schema_no_properties", "record": {"object_store": {"id": 1, "metric": 1}}} +{"type": "RECORD", "stream": "test_object_schema_no_properties", "record": {"object_store": {"id": 2, "metric": 2}}} diff --git a/singer_sdk/testing/target_test_streams/schema_updates.singer b/singer_sdk/testing/target_test_streams/schema_updates.singer new file mode 100644 index 000000000..9adfed93c --- /dev/null +++ b/singer_sdk/testing/target_test_streams/schema_updates.singer @@ -0,0 +1,11 @@ +{"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}}}} +{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 1, "a1": 101, "a2": "string1"}} +{"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}, "a3": {"type": "boolean"}}}} +{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 2, "a1": 102, "a2": "string2", "a3": true}} +{"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}, "a3": {"type": "boolean"}, "a4": {"type": "object", "properties": {"id": {"type": "integer"}, "value": {"type": "integer"}}}, "a5": {"type": "array", "items": {"type": "string"}}}}} +{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 3, "a1": 103, "a2": "string3", "a3": false, "a4": {"id": 1, "value": 1}, "a5": [ "banana", "apple" ]}} +{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 4, "a1": 104, "a2": "string4", "a3": true, "a4": {"id": 2, "value": 22}, "a5": [ "orange", "pear" ]}} +{"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}, "a3": {"type": "boolean"}, "a4": {"type": "object", "properties": {"id": {"type": "integer"}, "value": {"type": "integer"}}}, "a5": {"type": "array", "items": {"type": "string"}}, "a6": {"type": "integer"}}}} +{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 5, "a1": 105, "a2": "string5", "a3": false, "a4": {"id": 3, "value": 33}, "a5": [ "apple" ], "a6": 985}} +{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 6, "a1": 106, "a2": "string6", "a3": true, "a4": {"id": 4, "value": 444}, "a5": [ "banana", "orange" ], "a6": 341}} +{"type": "STATE", "value": {"test_schema_updates": 6}} diff --git a/singer_sdk/testing/target_test_streams/special_chars_in_attributes.singer b/singer_sdk/testing/target_test_streams/special_chars_in_attributes.singer new file mode 100644 index 000000000..5ec49ecfd --- /dev/null +++ b/singer_sdk/testing/target_test_streams/special_chars_in_attributes.singer @@ -0,0 +1,2 @@ +{"type": "SCHEMA", "stream": "test:SpecialChars!in?attributes", "schema": {"type": "object", "properties": {"_id": {"type": "string"}, "d": {"type": "object", "properties": {"env": {"type": "string"}, "agent:type": {"type": "string"}, "agent:os:version": {"type": "string"}}}}}, "key_properties": ["_id"]} +{"type": "RECORD", "stream": "test:SpecialChars!in?attributes", "record": {"_id": "a2e98886", "d": {"env": "prod", "agent:type": "desktop", "agent:os:version": "10.13.1"}}, "version": 1541199424491, "time_extracted": "2018-11-02T22:57:04.841020Z"} diff --git a/singer_sdk/testing/target_test_streams/user_location_data.singer b/singer_sdk/testing/target_test_streams/user_location_data.singer new file mode 100644 index 000000000..9813fb379 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/user_location_data.singer @@ -0,0 +1,15 @@ +{"type": "SCHEMA", "stream": "test_users", "key_properties": ["id"], "schema": {"required": ["id", "name"], "type": "object", "properties": {"id": {"type": "integer"}, "name": {"type": "string"}}}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 1, "name": "Yannis"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 2, "name": "Micael"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 3, "name": "Jacob"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 4, "name": "Josh"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 5, "name": "Thomas"}} +{"type": "SCHEMA", "stream": "test_locations", "key_properties": ["id"], "schema": {"required": ["id", "name"], "type": "object", "properties": {"id": {"type": "integer"}, "name": {"type": "string"}}}} +{"type": "RECORD", "stream": "test_locations", "record": {"id": 1, "name": "Philadelphia"}} +{"type": "RECORD", "stream": "test_locations", "record": {"id": 2, "name": "NY"}} +{"type": "RECORD", "stream": "test_locations", "record": {"id": 3, "name": "SF"}} +{"type": "SCHEMA", "stream": "test_user_in_location", "key_properties": ["id"], "schema": {"required": ["id", "user_id", "location_id"], "type": "object", "properties": {"id": {"type": "integer"}, "user_id": {"type": "integer"}, "location_id": {"type": "integer"}, "info": {"type": "object", "properties": {"weather": {"type": "string"}, "mood": {"type": "string"}}}}}} +{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 1, "user_id": 1, "location_id": 1, "info": {"weather": "rainy", "mood": "sad"}}} +{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 2, "user_id": 1, "location_id": 2, "info": {"weather": "sunny", "mood": "satisfied"}}} +{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 3, "user_id": 1, "location_id": 3, "info": {"weather": "sunny", "mood": "happy"}}} +{"type": "STATE", "value": {"test_users": 5, "test_locations": 3, "test_user_in_location": 3}} diff --git a/singer_sdk/testing/target_test_streams/user_location_upsert_data.singer b/singer_sdk/testing/target_test_streams/user_location_upsert_data.singer new file mode 100644 index 000000000..348e44493 --- /dev/null +++ b/singer_sdk/testing/target_test_streams/user_location_upsert_data.singer @@ -0,0 +1,18 @@ +{"type": "SCHEMA", "stream": "test_users", "key_properties": ["id"], "schema": {"required": ["id", "name"], "type": "object", "properties": {"id": {"type": "integer"}, "name": {"type": "string"}}}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 1, "name": "Johny"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 2, "name": "George"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 5, "name": "Jim"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 8, "name": "Thomas"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 12, "name": "Paul"}} +{"type": "RECORD", "stream": "test_users", "record": {"id": 13, "name": "Mary"}} +{"type": "SCHEMA", "stream": "test_locations", "key_properties": ["id"], "schema": {"required": ["id", "name"], "type": "object", "properties": {"id": {"type": "integer"}, "name": {"type": "string"}}}} +{"type": "RECORD", "stream": "test_locations", "record": {"id": 1, "name": "Philly"}} +{"type": "RECORD", "stream": "test_locations", "record": {"id": 3, "name": "San Francisco"}} +{"type": "RECORD", "stream": "test_locations", "record": {"id": 6, "name": "Colorado"}} +{"type": "RECORD", "stream": "test_locations", "record": {"id": 8, "name": "Boston"}} +{"type": "SCHEMA", "stream": "test_user_in_location", "key_properties": ["id"], "schema": {"required": ["id", "user_id", "location_id"], "type": "object", "properties": {"id": {"type": "integer"}, "user_id": {"type": "integer"}, "location_id": {"type": "integer"}, "info": {"type": "object", "properties": {"weather": {"type": "string"}, "mood": {"type": "string"}}}}}} +{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 1, "user_id": 1, "location_id": 4, "info": {"weather": "rainy", "mood": "sad"}}} +{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 2, "user_id": 2, "location_id": 3, "info": {"weather": "sunny", "mood": "satisfied"}}} +{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 6, "user_id": 3, "location_id": 2, "info": {"weather": "sunny", "mood": "happy"}}} +{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 14, "user_id": 4, "location_id": 1, "info": {"weather": "cloudy", "mood": "ok"}}} +{"type": "STATE", "value": {"test_users": 13, "test_locations": 8, "test_user_in_location": 14}} diff --git a/singer_sdk/testing/target_tests.py b/singer_sdk/testing/target_tests.py new file mode 100644 index 000000000..96e0b0d59 --- /dev/null +++ b/singer_sdk/testing/target_tests.py @@ -0,0 +1,147 @@ +"""Standard Target tests.""" + +from __future__ import annotations + +import pytest + +from singer_sdk.exceptions import ( + MissingKeyPropertiesError, + RecordsWithoutSchemaException, +) + +from .templates import TargetFileTestTemplate, TargetTestTemplate + + +class TargetArrayData(TargetFileTestTemplate): + """Test Target handles array data.""" + + name = "array_data" + + +class TargetCamelcaseComplexSchema(TargetFileTestTemplate): + """Test Target handles CaMeLcAsE record key and attributes, nested.""" + + name = "camelcase_complex_schema" + + +class TargetCamelcaseTest(TargetFileTestTemplate): + """Test Target handles CaMeLcAsE record key and attributes.""" + + name = "camelcase" + + +class TargetCliPrintsTest(TargetTestTemplate): + """Test Target correctly prints version and about information.""" + + name = "cli_prints" + + def test(self) -> None: + """Run test.""" + self.target.print_version() + self.target.print_about() + self.target.print_about(output_format="json") + + +class TargetDuplicateRecords(TargetFileTestTemplate): + """Test Target handles duplicate records.""" + + name = "duplicate_records" + + +class TargetEncodedStringData(TargetFileTestTemplate): + """Test Target handles encoded string data.""" + + name = "encoded_string_data" + + +class TargetInvalidSchemaTest(TargetFileTestTemplate): + """Test Target handles an invalid schema message.""" + + name = "invalid_schema" + + def test(self) -> None: + """Run test.""" + # TODO: the SDK should raise a better error than Exception in this case + # https://github.com/meltano/sdk/issues/1755 + with pytest.raises(Exception): # noqa: PT011, B017 + super().test() + + +class TargetMultipleStateMessages(TargetFileTestTemplate): + """Test Target correctly relays multiple received State messages (checkpoints).""" + + name = "multiple_state_messages" + + def test(self) -> None: + """Run test.""" + self.runner.sync_all() + state_messages = self.runner.state_messages + assert state_messages == [ + '{"test_multiple_state_messages_a": 1, "test_multiple_state_messages_b": 0}', # noqa: E501 + '{"test_multiple_state_messages_a": 3, "test_multiple_state_messages_b": 2}', # noqa: E501 + '{"test_multiple_state_messages_a": 5, "test_multiple_state_messages_b": 6}', # noqa: E501 + ] + + +class TargetNoPrimaryKeys(TargetFileTestTemplate): + """Test Target handles records without primary keys.""" + + name = "no_primary_keys" + + +class TargetOptionalAttributes(TargetFileTestTemplate): + """Test Target handles optional record attributes.""" + + name = "optional_attributes" + + +class TargetRecordBeforeSchemaTest(TargetFileTestTemplate): + """Test Target handles records arriving before schema.""" + + name = "record_before_schema" + + def test(self) -> None: + """Run test.""" + with pytest.raises(RecordsWithoutSchemaException): + super().test() + + +class TargetRecordMissingKeyProperty(TargetFileTestTemplate): + """Test Target handles record missing key property.""" + + name = "record_missing_key_property" + + def test(self) -> None: + """Run test.""" + with pytest.raises(MissingKeyPropertiesError): + super().test() + + +class TargetRecordMissingRequiredProperty(TargetFileTestTemplate): + """Test Target handles record missing required property.""" + + name = "record_missing_required_property" + + +class TargetSchemaNoProperties(TargetFileTestTemplate): + """Test Target handles schema with no properties.""" + + name = "schema_no_properties" + + +class TargetSchemaUpdates(TargetFileTestTemplate): + """Test Target handles schema updates.""" + + name = "schema_updates" + + +class TargetSpecialCharsInAttributes(TargetFileTestTemplate): + """Test Target handles special chars in attributes.""" + + name = "special_chars_in_attributes" + + +class TargetRecordMissingOptionalFields(TargetFileTestTemplate): + """Test Target handles record missing optional fields.""" + + name = "record_missing_fields" diff --git a/singer_sdk/testing/templates.py b/singer_sdk/testing/templates.py new file mode 100644 index 000000000..bfb54c360 --- /dev/null +++ b/singer_sdk/testing/templates.py @@ -0,0 +1,340 @@ +"""Tap and Target Test Templates.""" + +from __future__ import annotations + +import contextlib +import typing as t +import warnings +from pathlib import Path + +from singer_sdk.helpers._compat import resources +from singer_sdk.testing import target_test_streams + +if t.TYPE_CHECKING: + from singer_sdk.streams import Stream + + from .config import SuiteConfig + from .runners import TapTestRunner, TargetTestRunner + + +class TestTemplate: + """Each Test class requires one or more of the following arguments. + + Args: + runner (SingerTestRunner): The singer runner for this test. + + Possible Args: + stream (obj, optional): Initialized stream object to be tested. + stream_name (str, optional): Name of the stream to be tested. + attribute_name (str, optional): Name of the attribute to be tested. + + Raises: + ValueError: [description] + NotImplementedError: [description] + NotImplementedError: [description] + """ + + name: str | None = None + plugin_type: str | None = None + + @property + def id(self) -> str: # noqa: A003 + """Test ID. + + Raises: + NotImplementedError: if not implemented. + """ + msg = "ID not implemented." + raise NotImplementedError(msg) + + def setup(self) -> None: + """Test setup, called before `.test()`. + + This method is useful for preparing external resources (databases, folders etc.) + before test execution. + + Raises: + NotImplementedError: if not implemented. + """ + msg = "Setup method not implemented." + raise NotImplementedError(msg) + + def test(self) -> None: + """Main Test body, called after `.setup()` and before `.validate()`.""" + self.runner.sync_all() + + def validate(self) -> None: + """Test validation, called after `.test()`. + + This method is particularly useful in Target tests, to validate that records + were correctly written to external systems. + + Raises: + NotImplementedError: if not implemented. + """ + msg = "Method not implemented." + raise NotImplementedError(msg) + + def teardown(self) -> None: + """Test Teardown. + + This method is useful for cleaning up external resources + (databases, folders etc.) after test completion. + + Raises: + NotImplementedError: if not implemented. + """ + msg = "Method not implemented." + raise NotImplementedError(msg) + + def run( + self, + config: SuiteConfig, + resource: t.Any, + runner: TapTestRunner | TargetTestRunner, + ) -> None: + """Test main run method. + + Args: + config: SuiteConfig instance, to use for test. + resource: A generic external resource, provided by a pytest fixture. + runner: A Tap or Target runner instance, to use with this test. + + Raises: + ValueError: if Test instance does not have `name` and `type` properties. + """ + if not self.name or not self.plugin_type: + msg = "Test must have 'name' and 'type' properties." + raise ValueError(msg) + + self.config = config + self.resource = resource + self.runner = runner + + with contextlib.suppress(NotImplementedError): + self.setup() + + try: + self.test() + with contextlib.suppress(NotImplementedError): + self.validate() + + finally: + with contextlib.suppress(NotImplementedError): + self.teardown() + + +class TapTestTemplate(TestTemplate): + """Base Tap test template.""" + + plugin_type = "tap" + + @property + def id(self) -> str: # noqa: A003 + """Test ID. + + Returns: + Test ID string. + """ + return f"tap__{self.name}" + + def run( # type: ignore[override] + self, + config: SuiteConfig, + resource: t.Any, + runner: TapTestRunner, + ) -> None: + """Test main run method. + + Args: + config: SuiteConfig instance, to use for test. + resource: A generic external resource, provided by a pytest fixture. + runner: A Tap or Target runner instance, to use with this test. + """ + self.tap = runner.new_tap() + super().run(config, resource, runner) + + +class StreamTestTemplate(TestTemplate): + """Base Tap Stream test template.""" + + plugin_type = "stream" + required_kwargs: t.ClassVar[list[str]] = ["stream"] + + @property + def id(self) -> str: # noqa: A003 + """Test ID. + + Returns: + Test ID string. + """ + return f"{self.stream.name}__{self.name}" + + def run( # type: ignore[override] + self, + config: SuiteConfig, + resource: t.Any, + runner: TapTestRunner, + stream: Stream, + ) -> None: + """Test main run method. + + Args: + config: SuiteConfig instance, to use for test. + resource: A generic external resource, provided by a pytest fixture. + runner: A Tap runner instance, to use with this test. + stream: A Tap Stream instance, to use with this test. + """ + self.stream = stream + self.stream_records = runner.records[stream.name] + super().run(config, resource, runner) + + +class AttributeTestTemplate(TestTemplate): + """Base Tap Stream Attribute template.""" + + plugin_type = "attribute" + + @property + def id(self) -> str: # noqa: A003 + """Test ID. + + Returns: + Test ID string. + """ + return f"{self.stream.name}__{self.attribute_name}__{self.name}" + + def run( # type: ignore[override] + self, + config: SuiteConfig, + resource: t.Any, + runner: TapTestRunner, + stream: Stream, + attribute_name: str, + ) -> None: + """Test main run method. + + Args: + config: SuiteConfig instance, to use for test. + resource: A generic external resource, provided by a pytest fixture. + runner: A Tap runner instance, to use with this test. + stream: A Tap Stream instance, to use with this test. + to use with this test. + attribute_name: The name of the attribute to test. + """ + self.stream = stream + self.stream_records = runner.records[stream.name] + self.attribute_name = attribute_name + super().run(config, resource, runner) + + @property + def non_null_attribute_values(self) -> list[t.Any]: + """Extract attribute values from stream records. + + Returns: + A list of attribute values (excluding None values). + """ + values = [ + r[self.attribute_name] + for r in self.stream_records + if r.get(self.attribute_name) is not None + ] + if not values: + warnings.warn( + UserWarning("No records were available to test."), + stacklevel=2, + ) + return values + + @classmethod + def evaluate( + cls, + stream: Stream, # noqa: ARG003 + property_name: str, # noqa: ARG003 + property_schema: dict, # noqa: ARG003 + ) -> bool: + """Determine if this attribute test is applicable to the given property. + + Args: + stream: Parent Stream of given attribute. + property_name: Name of given attribute. + property_schema: JSON Schema of given property, in dict form. + + Raises: + NotImplementedError: if not implemented. + """ + msg = ( + "The 'evaluate' method is required for attribute tests, but not " + "implemented." + ) + raise NotImplementedError(msg) + + +class TargetTestTemplate(TestTemplate): + """Base Target test template.""" + + plugin_type = "target" + + def run( # type: ignore[override] + self, + config: SuiteConfig, + resource: t.Any, + runner: TargetTestRunner, + ) -> None: + """Test main run method. + + Args: + config: SuiteConfig instance, to use for test. + resource: A generic external resource, provided by a pytest fixture. + runner: A Tap runner instance, to use with this test. + """ + self.target = runner.new_target() + super().run(config, resource, runner) + + @property + def id(self) -> str: # noqa: A003 + """Test ID. + + Returns: + Test ID string. + """ + return f"target__{self.name}" + + +class TargetFileTestTemplate(TargetTestTemplate): + """Base Target File Test Template. + + Use this when sourcing Target test input from a .singer file. + """ + + def run( # type: ignore[override] + self, + config: SuiteConfig, + resource: t.Any, + runner: TargetTestRunner, + ) -> None: + """Test main run method. + + Args: + config: SuiteConfig instance, to use for test. + resource: A generic external resource, provided by a pytest fixture. + runner: A Tap runner instance, to use with this test. + """ + # get input from file + if getattr(self, "singer_filepath", None): + assert Path( + self.singer_filepath, + ).exists(), f"Singer file {self.singer_filepath} does not exist." + runner.input_filepath = self.singer_filepath + super().run(config, resource, runner) + + @property + def singer_filepath(self) -> Path: + """Get path to singer JSONL formatted messages file. + + Files will be sourced from `./target_test_streams/<test name>.singer`. + + Returns: + The expected Path to this tests singer file. + """ + return resources.files(target_test_streams).joinpath(f"{self.name}.singer") # type: ignore[no-any-return] # noqa: E501 diff --git a/singer_sdk/typing.py b/singer_sdk/typing.py index e373c2113..fd17b9d3d 100644 --- a/singer_sdk/typing.py +++ b/singer_sdk/typing.py @@ -5,13 +5,25 @@ .. code-block:: python jsonschema = PropertiesList( + Property("username", StringType, required=True), + Property("password", StringType, required=True, secret=True), + Property("id", IntegerType, required=True), - Property("name", StringType), - Property("tags", ArrayType(StringType)), - Property("ratio", NumberType), + Property("foo_or_bar", StringType, allowed_values=["foo", "bar"]), + Property( + "permissions", + ArrayType( + StringType( + allowed_values=["create", "delete", "insert", "update"], + examples=["insert", "update"], + ), + ), + ), + Property("ratio", NumberType, examples=[0.25, 0.75, 1.0]), Property("days_active", IntegerType), Property("updated_on", DateTimeType), Property("is_deleted", BooleanType), + Property( "author", ObjectType( @@ -19,6 +31,7 @@ Property("name", StringType), ) ), + Property("tags", ArrayType(StringType)), Property( "groups", ArrayType( @@ -41,19 +54,30 @@ from __future__ import annotations -import sys -from typing import Generic, Mapping, TypeVar, Union, cast +import json +import typing as t import sqlalchemy -from jsonschema import validators +from jsonschema import ValidationError, validators -from singer_sdk.helpers._classproperty import classproperty -from singer_sdk.helpers._typing import append_type, get_datelike_property_type +if t.TYPE_CHECKING: + from jsonschema.protocols import Validator + +from singer_sdk.helpers._typing import ( + JSONSCHEMA_ANNOTATION_SECRET, + JSONSCHEMA_ANNOTATION_WRITEONLY, + append_type, + get_datelike_property_type, +) + +if t.TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 10): + from typing import TypeAlias # noqa: ICN003 + else: + from typing_extensions import TypeAlias -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias __all__ = [ "extend_validator_with_defaults", @@ -86,7 +110,7 @@ "PropertiesList", ] -_JsonValue: TypeAlias = Union[ +_JsonValue: TypeAlias = t.Union[ str, int, float, @@ -96,19 +120,34 @@ None, ] +T = t.TypeVar("T", bound=_JsonValue) +P = t.TypeVar("P") -def extend_validator_with_defaults(validator_class): # noqa + +def extend_validator_with_defaults(validator_class): # noqa: ANN001, ANN201 """Fill in defaults, before validating with the provided JSON Schema Validator. - See https://python-jsonschema.readthedocs.io/en/latest/faq/#why-doesn-t-my-schema-s-default-property-set-the-default-on-my-instance # noqa + See + https://python-jsonschema.readthedocs.io/en/latest/faq/#why-doesn-t-my-schema-s-default-property-set-the-default-on-my-instance for details. + + Args: + validator_class: The JSON Schema Validator class to extend. + + Returns: + The extended JSON Schema Validator class. """ validate_properties = validator_class.VALIDATORS["properties"] - def set_defaults(validator, properties, instance, schema): # noqa - for property, subschema in properties.items(): + def set_defaults( + validator: Validator, + properties: t.Mapping[str, dict], + instance: t.MutableMapping[str, t.Any], + schema: dict, + ) -> t.Generator[ValidationError, None, None]: + for prop, subschema in properties.items(): if "default" in subschema: - instance.setdefault(property, subschema["default"]) + instance.setdefault(prop, subschema["default"]) yield from validate_properties( validator, @@ -123,17 +162,79 @@ def set_defaults(validator, properties, instance, schema): # noqa ) -class JSONTypeHelper: +class DefaultInstanceProperty: + """Property of default instance. + + Descriptor similar to ``property`` that decorates an instance method to retrieve + a property from the instance initialized with default parameters, if the called on + the class. + """ + + def __init__(self, fget: t.Callable) -> None: + """Initialize the decorator. + + Args: + fget: The function to decorate. + """ + self.fget = fget + + def __get__(self, instance: P, owner: type[P]) -> t.Any: # noqa: ANN401 + """Get the property value. + + Args: + instance: The instance to get the property value from. + owner: The class to get the property value from. + + Returns: + The property value. + """ + if instance is None: + instance = owner() + return self.fget(instance) + + +class JSONTypeHelper(t.Generic[T]): """Type helper base class for JSONSchema types.""" - @classproperty - def type_dict(cls) -> dict: + def __init__( + self, + *, + allowed_values: list[T] | None = None, + examples: list[T] | None = None, + ) -> None: + """Initialize the type helper. + + Args: + allowed_values: A list of allowed values. + examples: A list of example values. + """ + self.allowed_values = allowed_values + self.examples = examples + + @DefaultInstanceProperty + def type_dict(self) -> dict: """Return dict describing the type. Raises: NotImplementedError: If the derived class does not override this method. """ - raise NotImplementedError() + raise NotImplementedError + + @property + def extras(self) -> dict: + """Return dict describing the JSON Schema extras. + + Returns: + A dictionary containing the JSON Schema extras. + """ + result = {} + if self.allowed_values: + result["enum"] = self.allowed_values + + if self.examples: + result["examples"] = self.examples + + return result def to_dict(self) -> dict: """Convert to dictionary. @@ -141,28 +242,50 @@ def to_dict(self) -> dict: Returns: A JSON Schema dictionary describing the object. """ - return cast(dict, self.type_dict) + return self.type_dict # type: ignore[no-any-return] + def to_json(self, **kwargs: t.Any) -> str: + """Convert to JSON. -class StringType(JSONTypeHelper): - """String type.""" + Args: + kwargs: Additional keyword arguments to pass to json.dumps(). + + Returns: + A JSON string describing the object. + """ + return json.dumps(self.to_dict(), **kwargs) + + +class StringType(JSONTypeHelper[str]): + """String type. + + Examples: + >>> StringType.type_dict + {'type': ['string']} + >>> StringType().type_dict + {'type': ['string']} + >>> StringType(allowed_values=["a", "b"]).type_dict + {'type': ['string'], 'enum': ['a', 'b']} + """ string_format: str | None = None """String format. - See the [formats built into the JSON Schema\ - specification](https://json-schema.org/understanding-json-schema/reference/string.html#built-in-formats). + See the `formats built into the JSON Schema specification`_. Returns: A string describing the format. + + .. _`formats built into the JSON Schema specification`: + https://json-schema.org/understanding-json-schema/reference/string.html#built-in-formats """ - @classproperty - def _format(cls) -> dict: - return {"format": cls.string_format} if cls.string_format else {} + @property + def _format(self) -> dict: + return {"format": self.string_format} if self.string_format else {} - @classproperty - def type_dict(cls) -> dict: + @DefaultInstanceProperty + def type_dict(self) -> dict: """Get type dictionary. Returns: @@ -170,7 +293,8 @@ def type_dict(cls) -> dict: """ return { "type": ["string"], - **cls._format, + **self._format, + **self.extras, } @@ -279,97 +403,142 @@ class RegexType(StringType): string_format = "regex" -class BooleanType(JSONTypeHelper): - """Boolean type.""" +class BooleanType(JSONTypeHelper[bool]): + """Boolean type. - @classproperty - def type_dict(cls) -> dict: + Examples: + >>> BooleanType.type_dict + {'type': ['boolean']} + >>> BooleanType().type_dict + {'type': ['boolean']} + """ + + @DefaultInstanceProperty + def type_dict(self) -> dict: """Get type dictionary. Returns: A dictionary describing the type. """ - return {"type": ["boolean"]} + return {"type": ["boolean"], **self.extras} class IntegerType(JSONTypeHelper): - """Integer type.""" + """Integer type. + + Examples: + >>> IntegerType.type_dict + {'type': ['integer']} + >>> IntegerType().type_dict + {'type': ['integer']} + >>> IntegerType(allowed_values=[1, 2]).type_dict + {'type': ['integer'], 'enum': [1, 2]} + """ - @classproperty - def type_dict(cls) -> dict: + @DefaultInstanceProperty + def type_dict(self) -> dict: """Get type dictionary. Returns: A dictionary describing the type. """ - return {"type": ["integer"]} + return {"type": ["integer"], **self.extras} -class NumberType(JSONTypeHelper): - """Number type.""" +class NumberType(JSONTypeHelper[float]): + """Number type. - @classproperty - def type_dict(cls) -> dict: + Examples: + >>> NumberType.type_dict + {'type': ['number']} + >>> NumberType().type_dict + {'type': ['number']} + >>> NumberType(allowed_values=[1.0, 2.0]).type_dict + {'type': ['number'], 'enum': [1.0, 2.0]} + """ + + @DefaultInstanceProperty + def type_dict(self) -> dict: """Get type dictionary. Returns: A dictionary describing the type. """ - return {"type": ["number"]} + return {"type": ["number"], **self.extras} -W = TypeVar("W", bound=JSONTypeHelper) +W = t.TypeVar("W", bound=JSONTypeHelper) -class ArrayType(JSONTypeHelper, Generic[W]): +class ArrayType(JSONTypeHelper[list], t.Generic[W]): """Array type.""" - def __init__(self, wrapped_type: W | type[W]) -> None: + def __init__(self, wrapped_type: W | type[W], **kwargs: t.Any) -> None: """Initialize Array type with wrapped inner type. Args: wrapped_type: JSON Schema item type inside the array. + **kwargs: Additional keyword arguments to pass to the parent class. """ self.wrapped_type = wrapped_type + super().__init__(**kwargs) @property - def type_dict(self) -> dict: # type: ignore # OK: @classproperty vs @property + def type_dict(self) -> dict: # type: ignore[override] """Get type dictionary. Returns: A dictionary describing the type. """ - return {"type": "array", "items": self.wrapped_type.type_dict} + return {"type": "array", "items": self.wrapped_type.type_dict, **self.extras} -class Property(JSONTypeHelper, Generic[W]): +class Property(JSONTypeHelper[T], t.Generic[T]): """Generic Property. Should be nested within a `PropertiesList`.""" + # TODO: Make some of these arguments keyword-only. This is a breaking change. def __init__( self, name: str, - wrapped: W | type[W], - required: bool = False, - default: _JsonValue = None, - description: str = None, + wrapped: JSONTypeHelper[T] | type[JSONTypeHelper[T]], + required: bool = False, # noqa: FBT001, FBT002 + default: T | None = None, + description: str | None = None, + secret: bool | None = False, # noqa: FBT002 + allowed_values: list[T] | None = None, + examples: list[T] | None = None, ) -> None: """Initialize Property object. + Note: Properties containing secrets should be specified with `secret=True`. + Doing so will add the annotation `writeOnly=True`, in accordance with JSON + Schema Draft 7 and later, and `secret=True` as an additional hint to readers. + + More info: https://json-schema.org/draft-07/json-schema-release-notes.html + Args: name: Property name. wrapped: JSON Schema type of the property. required: Whether this is a required property. default: Default value in the JSON Schema. description: Long-text property description. + secret: True if this is a credential or other secret. + allowed_values: A list of allowed value options, if only specific values + are permitted. This will define the type as an 'enum'. + examples: Optional. A list of one or more sample values. These may be + displayed to the user as hints of the expected format of inputs. """ self.name = name self.wrapped = wrapped self.optional = not required self.default = default self.description = description + self.secret = secret + self.allowed_values = allowed_values or None + self.examples = examples or None @property - def type_dict(self) -> dict: # type: ignore # OK: @classproperty vs @property + def type_dict(self) -> dict: # type: ignore[override] """Get type dictionary. Returns: @@ -380,14 +549,14 @@ def type_dict(self) -> dict: # type: ignore # OK: @classproperty vs @property """ wrapped = self.wrapped - if isinstance(wrapped, type) and not isinstance(wrapped.type_dict, Mapping): - raise ValueError( - f"Type dict for {wrapped} is not defined. " - + "Try instantiating it with a nested type such as " - + f"{wrapped.__name__}(StringType)." + if isinstance(wrapped, type) and not isinstance(wrapped.type_dict, t.Mapping): + msg = ( + f"Type dict for {wrapped} is not defined. Try instantiating it with a " + f"nested type such as {wrapped.__name__}(StringType)." ) + raise ValueError(msg) - return cast(dict, wrapped.type_dict) + return t.cast(dict, wrapped.type_dict) def to_dict(self) -> dict: """Return a dict mapping the property name to its definition. @@ -402,6 +571,17 @@ def to_dict(self) -> dict: type_dict.update({"default": self.default}) if self.description: type_dict.update({"description": self.description}) + if self.secret: + type_dict.update( + { + JSONSCHEMA_ANNOTATION_SECRET: True, + JSONSCHEMA_ANNOTATION_WRITEONLY: True, + }, + ) + if self.allowed_values: + type_dict.update({"enum": self.allowed_values}) + if self.examples: + type_dict.update({"examples": self.examples}) return {self.name: type_dict} @@ -411,20 +591,99 @@ class ObjectType(JSONTypeHelper): def __init__( self, *properties: Property, - additional_properties: W | type[W] | None = None, + additional_properties: W | type[W] | bool | None = None, + pattern_properties: t.Mapping[str, W | type[W]] | None = None, + **kwargs: t.Any, ) -> None: """Initialize ObjectType from its list of properties. Args: properties: Zero or more attributes for this JSON object. additional_properties: A schema to match against unnamed properties in - this object. + this object, or a boolean indicating if extra properties are allowed. + pattern_properties: A dictionary of regex patterns to match against + property names, and the schema to match against the values. + **kwargs: Additional keyword arguments to pass to the `JSONTypeHelper`. + + Examples: + >>> t = ObjectType( + ... Property("name", StringType, required=True), + ... Property("age", IntegerType), + ... Property("height", NumberType), + ... additional_properties=False, + ... ) + >>> print(t.to_json(indent=2)) + { + "type": "object", + "properties": { + "name": { + "type": [ + "string" + ] + }, + "age": { + "type": [ + "integer", + "null" + ] + }, + "height": { + "type": [ + "number", + "null" + ] + } + }, + "required": [ + "name" + ], + "additionalProperties": false + } + >>> t = ObjectType( + ... Property("name", StringType, required=True), + ... Property("age", IntegerType), + ... Property("height", NumberType), + ... additional_properties=StringType, + ... ) + >>> print(t.to_json(indent=2)) + { + "type": "object", + "properties": { + "name": { + "type": [ + "string" + ] + }, + "age": { + "type": [ + "integer", + "null" + ] + }, + "height": { + "type": [ + "number", + "null" + ] + } + }, + "required": [ + "name" + ], + "additionalProperties": { + "type": [ + "string" + ] + } + } """ - self.wrapped: list[Property] = list(properties) + self.wrapped: dict[str, Property] = {prop.name: prop for prop in properties} self.additional_properties = additional_properties + self.pattern_properties = pattern_properties + super().__init__(**kwargs) @property - def type_dict(self) -> dict: # type: ignore # OK: @classproperty vs @property + def type_dict(self) -> dict: # type: ignore[override] """Get type dictionary. Returns: @@ -432,21 +691,173 @@ def type_dict(self) -> dict: # type: ignore # OK: @classproperty vs @property """ merged_props = {} required = [] - for w in self.wrapped: + for w in self.wrapped.values(): merged_props.update(w.to_dict()) if not w.optional: required.append(w.name) - result = {"type": "object", "properties": merged_props} + result: dict[str, t.Any] = {"type": "object", "properties": merged_props} if required: result["required"] = required - if self.additional_properties: - result["additionalProperties"] = self.additional_properties.type_dict + if self.additional_properties is not None: + if isinstance(self.additional_properties, bool): + result["additionalProperties"] = self.additional_properties + else: + result["additionalProperties"] = self.additional_properties.type_dict + + if self.pattern_properties: + result["patternProperties"] = { + k: v.type_dict for k, v in self.pattern_properties.items() + } return result +class OneOf(JSONPointerType): + """OneOf type. + + This type allows for a value to be one of a set of types. + + Examples: + >>> t = OneOf(StringType, IntegerType) + >>> print(t.to_json(indent=2)) + { + "oneOf": [ + { + "type": [ + "string" + ] + }, + { + "type": [ + "integer" + ] + } + ] + } + """ + + def __init__(self, *types: W | type[W]) -> None: + """Initialize OneOf type. + + Args: + types: Types to choose from. + """ + self.wrapped = types + + @property + def type_dict(self) -> dict: # type: ignore[override] + """Get type dictionary. + + Returns: + A dictionary describing the type. + """ + return {"oneOf": [t.type_dict for t in self.wrapped]} + + +class Constant(JSONTypeHelper): + """A constant property. + + A property that is always the same value. + + Examples: + >>> t = Constant("foo") + >>> print(t.to_json(indent=2)) + { + "const": "foo" + } + """ + + def __init__(self, value: _JsonValue) -> None: + """Initialize Constant. + + Args: + value: Value of the constant. + """ + self.value = value + + @property + def type_dict(self) -> dict: # type: ignore[override] + """Get type dictionary. + + Returns: + A dictionary describing the type. + """ + return {"const": self.value} + + +class DiscriminatedUnion(OneOf): + """A discriminator property. + + This is a special case of :class:`singer_sdk.typing.OneOf`, where values are + JSON objects, and the type of the object is determined by a property in the + object. + + The property is a :class:`singer_sdk.typing.Constant` called the discriminator + property. + """ + + def __init__(self, key: str, **options: ObjectType) -> None: + """Initialize a discriminated union type. + + Args: + key: Name of the discriminator property. + options: Mapping of discriminator values to object types. + + Examples: + >>> t = DiscriminatedUnion("species", cat=ObjectType(), dog=ObjectType()) + >>> print(t.to_json(indent=2)) + { + "oneOf": [ + { + "type": "object", + "properties": { + "species": { + "const": "cat", + "description": "Discriminator for object of type 'cat'." + } + }, + "required": [ + "species" + ] + }, + { + "type": "object", + "properties": { + "species": { + "const": "dog", + "description": "Discriminator for object of type 'dog'." + } + }, + "required": [ + "species" + ] + } + ] + } + """ + self.key = key + self.options = options + + super().__init__( + *( + ObjectType( + Property( + key, + Constant(k), + required=True, + description=f"Discriminator for object of type '{k}'.", + ), + *v.wrapped.values(), + additional_properties=v.additional_properties, + pattern_properties=v.pattern_properties, + ) + for k, v in options.items() + ), + ) + + class CustomType(JSONTypeHelper): """Accepts an arbitrary JSON Schema dictionary.""" @@ -459,7 +870,7 @@ def __init__(self, jsonschema_type_dict: dict) -> None: self._jsonschema_type_dict = jsonschema_type_dict @property - def type_dict(self) -> dict: # type: ignore # OK: @classproperty vs @property + def type_dict(self) -> dict: # type: ignore[override] """Get type dictionary. Returns: @@ -471,21 +882,21 @@ def type_dict(self) -> dict: # type: ignore # OK: @classproperty vs @property class PropertiesList(ObjectType): """Properties list. A convenience wrapper around the ObjectType class.""" - def items(self) -> list[tuple[str, Property]]: + def items(self) -> t.ItemsView[str, Property]: """Get wrapped properties. Returns: List of (name, property) tuples. """ - return [(p.name, p) for p in self.wrapped] + return self.wrapped.items() - def append(self, property: Property) -> None: + def append(self, property: Property) -> None: # noqa: A002 """Append a property to the property list. Args: property: Property to add """ - self.wrapped.append(property) + self.wrapped[property.name] = property def to_jsonschema_type( @@ -526,11 +937,13 @@ def to_jsonschema_type( elif isinstance(from_type, sqlalchemy.types.TypeEngine): type_name = type(from_type).__name__ elif isinstance(from_type, type) and issubclass( - from_type, sqlalchemy.types.TypeEngine + from_type, + sqlalchemy.types.TypeEngine, ): type_name = from_type.__name__ else: - raise ValueError("Expected `str` or a SQLAlchemy `TypeEngine` object or type.") + msg = "Expected `str` or a SQLAlchemy `TypeEngine` object or type." + raise ValueError(msg) # Look for the type name within the known SQL type names: for sqltype, jsonschema_type in sqltype_lookup.items(): @@ -552,20 +965,23 @@ def _jsonschema_type_check(jsonschema_type: dict, type_check: tuple[str]) -> boo """ if "type" in jsonschema_type: if isinstance(jsonschema_type["type"], (list, tuple)): - for t in jsonschema_type["type"]: - if t in type_check: + for schema_type in jsonschema_type["type"]: + if schema_type in type_check: return True - else: - if jsonschema_type.get("type") in type_check: - return True + elif jsonschema_type.get("type") in type_check: + return True - if any(t in type_check for t in jsonschema_type.get("anyOf", ())): + if any( + _jsonschema_type_check(t, type_check) for t in jsonschema_type.get("anyOf", ()) + ): return True return False -def to_sql_type(jsonschema_type: dict) -> sqlalchemy.types.TypeEngine: +def to_sql_type( # noqa: PLR0911, C901 + jsonschema_type: dict, +) -> sqlalchemy.types.TypeEngine: """Convert JSON Schema type to a SQL type. Args: @@ -578,26 +994,26 @@ def to_sql_type(jsonschema_type: dict) -> sqlalchemy.types.TypeEngine: datelike_type = get_datelike_property_type(jsonschema_type) if datelike_type: if datelike_type == "date-time": - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DATETIME()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DATETIME()) if datelike_type in "time": - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.TIME()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.TIME()) if datelike_type == "date": - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DATE()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DATE()) maxlength = jsonschema_type.get("maxLength") - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR(maxlength)) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR(maxlength)) if _jsonschema_type_check(jsonschema_type, ("integer",)): - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.INTEGER()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.INTEGER()) if _jsonschema_type_check(jsonschema_type, ("number",)): - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DECIMAL()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DECIMAL()) if _jsonschema_type_check(jsonschema_type, ("boolean",)): - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.BOOLEAN()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.BOOLEAN()) if _jsonschema_type_check(jsonschema_type, ("object",)): - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR()) if _jsonschema_type_check(jsonschema_type, ("array",)): - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR()) - return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR()) + return t.cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR()) diff --git a/tests/__init__.py b/tests/__init__.py index 27c3861ce..db2016738 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1 +1,3 @@ """SDK tests.""" + +from __future__ import annotations diff --git a/tests/_singerlib/__init__.py b/tests/_singerlib/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/_singerlib/test_catalog.py b/tests/_singerlib/test_catalog.py new file mode 100644 index 000000000..69f377137 --- /dev/null +++ b/tests/_singerlib/test_catalog.py @@ -0,0 +1,274 @@ +from __future__ import annotations + +import pytest + +from singer_sdk._singerlib import ( + Catalog, + CatalogEntry, + Metadata, + MetadataMapping, + SelectionMask, + StreamMetadata, +) + +METADATA_ARRAY = [ + { + "breadcrumb": [], + "metadata": { + "selected": True, + "forced-replication-method": "FULL_TABLE", + }, + }, + { + "breadcrumb": ["properties", "id"], + "metadata": { + "inclusion": "automatic", + "selected": True, + }, + }, + { + "breadcrumb": ["properties", "updated_at"], + "metadata": { + "inclusion": "automatic", + "selected": False, + }, + }, + { + "breadcrumb": ["properties", "name"], + "metadata": { + "inclusion": "available", + "selected": True, + }, + }, + { + "breadcrumb": ["properties", "an_object"], + "metadata": {"selected": False}, + }, + { + "breadcrumb": ["properties", "an_object", "properties", "nested"], + "metadata": { + "selected": True, + }, + }, + { + "breadcrumb": ["properties", "not_supported_selected"], + "metadata": { + "inclusion": "unsupported", + "selected": True, + }, + }, + { + "breadcrumb": ["properties", "not_supported_not_selected"], + "metadata": { + "inclusion": "unsupported", + "selected": False, + }, + }, + { + "breadcrumb": ["properties", "selected_by_default"], + "metadata": { + "inclusion": "available", + "selected-by-default": True, + }, + }, +] + + +def test_selection_mask(): + mask = SelectionMask( + [ + (("properties", "id"), False), + (("properties", "an_object"), False), + (("properties", "an_object", "properties", "a_string"), True), + ], + ) + # Missing root breadcrumb is selected + assert mask[()] is True + + # Explicitly deselected + assert mask[("properties", "id")] is False + + # Missing defaults to parent selection + assert mask[("properties", "name")] is True + + # Explicitly selected + assert mask[("properties", "an_object")] is False + + # Missing defaults to parent selection + assert mask[("properties", "an_object", "properties", "id")] is False + + # Explicitly selected nested property + assert mask[("properties", "an_object", "properties", "a_string")] is True + + +def test_metadata_mapping(): + mapping = MetadataMapping.from_iterable(METADATA_ARRAY) + + assert ( + mapping[()] + == mapping.root + == StreamMetadata( + selected=True, + forced_replication_method="FULL_TABLE", + ) + ) + assert mapping[("properties", "id")] == Metadata( + inclusion=Metadata.InclusionType.AUTOMATIC, + selected=True, + ) + assert mapping[("properties", "name")] == Metadata( + inclusion=Metadata.InclusionType.AVAILABLE, + selected=True, + ) + assert mapping[("properties", "missing")] == Metadata() + + selection_mask = mapping.resolve_selection() + assert selection_mask[()] is True + assert selection_mask[("properties", "id")] is True + assert selection_mask[("properties", "updated_at")] is True + assert selection_mask[("properties", "name")] is True + assert selection_mask[("properties", "missing")] is True + assert selection_mask[("properties", "an_object")] is False + assert selection_mask[("properties", "an_object", "properties", "nested")] is False + assert selection_mask[("properties", "not_supported_selected")] is False + assert selection_mask[("properties", "not_supported_not_selected")] is False + assert selection_mask[("properties", "selected_by_default")] is True + + +def test_empty_metadata_mapping(): + """Check that empty metadata mapping results in stream being selected.""" + mapping = MetadataMapping() + assert mapping._breadcrumb_is_selected(()) is True + + +def test_catalog_parsing(): + """Validate parsing works for a catalog and its stream entries.""" + catalog_dict = { + "streams": [ + { + "tap_stream_id": "test", + "database_name": "app_db", + "row_count": 10000, + "stream_alias": "test_alias", + "metadata": [ + { + "breadcrumb": [], + "metadata": { + "inclusion": "available", + }, + }, + { + "breadcrumb": ["properties", "a"], + "metadata": { + "inclusion": "unsupported", + }, + }, + ], + "schema": { + "type": "object", + }, + }, + ], + } + catalog = Catalog.from_dict(catalog_dict) + + assert catalog.streams[0].tap_stream_id == "test" + assert catalog.streams[0].database == "app_db" + assert catalog.streams[0].row_count == 10000 + assert catalog.streams[0].stream_alias == "test_alias" + assert catalog.get_stream("test").tap_stream_id == "test" + assert catalog["test"].metadata.to_list() == catalog_dict["streams"][0]["metadata"] + assert catalog["test"].tap_stream_id == catalog_dict["streams"][0]["tap_stream_id"] + assert catalog["test"].schema.to_dict() == {"type": "object"} + assert catalog.to_dict() == catalog_dict + + new = { + "tap_stream_id": "new", + "metadata": [], + "schema": {}, + } + entry = CatalogEntry.from_dict(new) + catalog.add_stream(entry) + assert catalog.get_stream("new") == entry + + +@pytest.mark.parametrize( + "schema,key_properties,replication_method,valid_replication_keys,schema_name", + [ + ( + {"properties": {"id": {"type": "integer"}}, "type": "object"}, + ["id"], + "FULL_TABLE", + None, + None, + ), + ( + { + "properties": { + "first_name": {"type": "string"}, + "last_name": {"type": "string"}, + "updated_at": {"type": "string", "format": "date-time"}, + }, + "type": "object", + }, + ["first_name", "last_name"], + "INCREMENTAL", + ["updated_at"], + "users", + ), + ( + { + "properties": { + "first_name": {"type": "string"}, + "last_name": {"type": "string"}, + "group": {"type": "string"}, + }, + "type": "object", + }, + ["first_name", "last_name"], + "FULL_TABLE", + None, + None, + ), + ( + {}, + [], + None, + None, + None, + ), + ], +) +def test_standard_metadata( + schema: dict, + key_properties: list[str], + replication_method: str | None, + valid_replication_keys: list[str] | None, + schema_name: str | None, +): + """Validate generated metadata.""" + metadata = MetadataMapping.get_standard_metadata( + schema=schema, + schema_name=schema_name, + key_properties=key_properties, + replication_method=replication_method, + valid_replication_keys=valid_replication_keys, + ) + + stream_metadata = metadata[()] + assert isinstance(stream_metadata, StreamMetadata) + assert stream_metadata.table_key_properties == key_properties + assert stream_metadata.forced_replication_method == replication_method + assert stream_metadata.valid_replication_keys == valid_replication_keys + assert stream_metadata.selected is None + assert stream_metadata.schema_name == schema_name + + for pk in key_properties: + pk_metadata = metadata[("properties", pk)] + assert pk_metadata.inclusion == Metadata.InclusionType.AUTOMATIC + assert pk_metadata.selected is None + + for rk in valid_replication_keys or []: + rk_metadata = metadata[("properties", rk)] + assert rk_metadata.inclusion == Metadata.InclusionType.AUTOMATIC + assert rk_metadata.selected is None diff --git a/tests/_singerlib/test_messages.py b/tests/_singerlib/test_messages.py new file mode 100644 index 000000000..47a36aca6 --- /dev/null +++ b/tests/_singerlib/test_messages.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +import io +from contextlib import redirect_stdout +from datetime import datetime + +import pytest +from pytz import UTC, timezone + +import singer_sdk._singerlib as singer +from singer_sdk._singerlib.messages import format_message + + +def test_exclude_null_dict(): + pairs = [("a", 1), ("b", None), ("c", 3)] + assert singer.exclude_null_dict(pairs) == {"a": 1, "c": 3} + + +def test_format_message(): + message = singer.RecordMessage( + stream="test", + record={"id": 1, "name": "test"}, + ) + assert format_message(message) == ( + '{"type": "RECORD", "stream": "test", "record": {"id": 1, "name": "test"}}' + ) + + +def test_write_message(): + message = singer.RecordMessage( + stream="test", + record={"id": 1, "name": "test"}, + ) + with redirect_stdout(io.StringIO()) as out: + singer.write_message(message) + + assert out.getvalue() == ( + '{"type": "RECORD", "stream": "test", "record": {"id": 1, "name": "test"}}\n' + ) + + +def test_record_message(): + record = singer.RecordMessage( + stream="test", + record={"id": 1, "name": "test"}, + ) + assert record.stream == "test" + assert record.record == {"id": 1, "name": "test"} + assert record.to_dict() == { + "type": "RECORD", + "stream": "test", + "record": {"id": 1, "name": "test"}, + } + + assert singer.RecordMessage.from_dict(record.to_dict()) == record + + +def test_record_message_naive_time_extracted(): + """Check that record message' time_extracted must be timezone-aware.""" + with pytest.raises(ValueError, match="must be either None or an aware datetime"): + singer.RecordMessage( + stream="test", + record={"id": 1, "name": "test"}, + time_extracted=datetime(2021, 1, 1), # noqa: DTZ001 + ) + + +def test_record_message_time_extracted_to_utc(): + """Check that record message's time_extracted is converted to UTC.""" + naive = datetime(2021, 1, 1, 12) # noqa: DTZ001 + nairobi = timezone("Africa/Nairobi") + + record = singer.RecordMessage( + stream="test", + record={"id": 1, "name": "test"}, + time_extracted=nairobi.localize(naive), + ) + assert record.time_extracted == datetime(2021, 1, 1, 9, tzinfo=UTC) + + +def test_schema_message(): + schema = singer.SchemaMessage( + stream="test", + schema={"type": "object", "properties": {"id": {"type": "integer"}}}, + ) + assert schema.stream == "test" + assert schema.schema == { + "type": "object", + "properties": {"id": {"type": "integer"}}, + } + assert schema.to_dict() == { + "type": "SCHEMA", + "stream": "test", + "schema": {"type": "object", "properties": {"id": {"type": "integer"}}}, + } + + assert singer.SchemaMessage.from_dict(schema.to_dict()) == schema + + +def test_schema_messages_string_bookmark_properties(): + """Check that schema message's bookmark_properties can be a string.""" + schema = singer.SchemaMessage( + stream="test", + schema={"type": "object", "properties": {"id": {"type": "integer"}}}, + bookmark_properties="id", + ) + assert schema.bookmark_properties == ["id"] + + +def test_bookmark_properties_not_string_or_list(): + """Check that schema message's bookmark_properties must be a string or list.""" + with pytest.raises(ValueError, match="must be a string or list"): + singer.SchemaMessage( + stream="test", + schema={"type": "object", "properties": {"id": {"type": "integer"}}}, + bookmark_properties=1, + ) + + +def test_state_message(): + state = singer.StateMessage(value={"bookmarks": {"test": {"id": 1}}}) + assert state.value == {"bookmarks": {"test": {"id": 1}}} + assert state.to_dict() == { + "type": "STATE", + "value": {"bookmarks": {"test": {"id": 1}}}, + } + + assert singer.StateMessage.from_dict(state.to_dict()) == state + + +def test_activate_version_message(): + version = singer.ActivateVersionMessage(stream="test", version=1) + assert version.stream == "test" + assert version.version == 1 + assert version.to_dict() == { + "type": "ACTIVATE_VERSION", + "stream": "test", + "version": 1, + } + + assert singer.ActivateVersionMessage.from_dict(version.to_dict()) == version diff --git a/tests/_singerlib/test_schema.py b/tests/_singerlib/test_schema.py new file mode 100644 index 000000000..07589f431 --- /dev/null +++ b/tests/_singerlib/test_schema.py @@ -0,0 +1,252 @@ +from __future__ import annotations + +import pytest + +from singer_sdk._singerlib import Schema, resolve_schema_references + +STRING_SCHEMA = Schema(type="string", maxLength=32) +STRING_DICT = {"type": "string", "maxLength": 32} +INTEGER_SCHEMA = Schema(type="integer", maximum=1000000) +INTEGER_DICT = {"type": "integer", "maximum": 1000000} +ARRAY_SCHEMA = Schema(type="array", items=INTEGER_SCHEMA) +ARRAY_DICT = {"type": "array", "items": INTEGER_DICT} +OBJECT_SCHEMA = Schema( + type="object", + properties={ + "a_string": STRING_SCHEMA, + "an_array": ARRAY_SCHEMA, + }, + additionalProperties=True, + required=["a_string"], +) +OBJECT_DICT = { + "type": "object", + "properties": { + "a_string": STRING_DICT, + "an_array": ARRAY_DICT, + }, + "additionalProperties": True, + "required": ["a_string"], +} + + +@pytest.mark.parametrize( + "schema,expected", + [ + pytest.param( + STRING_SCHEMA, + STRING_DICT, + id="string_to_dict", + ), + pytest.param( + INTEGER_SCHEMA, + INTEGER_DICT, + id="integer_to_dict", + ), + pytest.param( + ARRAY_SCHEMA, + ARRAY_DICT, + id="array_to_dict", + ), + pytest.param( + OBJECT_SCHEMA, + OBJECT_DICT, + id="object_to_dict", + ), + ], +) +def test_schema_to_dict(schema, expected): + assert schema.to_dict() == expected + + +@pytest.mark.parametrize( + "pydict,expected", + [ + pytest.param( + STRING_DICT, + STRING_SCHEMA, + id="schema_from_string_dict", + ), + pytest.param( + INTEGER_DICT, + INTEGER_SCHEMA, + id="schema_from_integer_dict", + ), + pytest.param( + ARRAY_DICT, + ARRAY_SCHEMA, + id="schema_from_array_dict", + ), + pytest.param( + OBJECT_DICT, + OBJECT_SCHEMA, + id="schema_from_object_dict", + ), + ], +) +def test_schema_from_dict(pydict, expected): + assert Schema.from_dict(pydict) == expected + + +@pytest.mark.parametrize( + "schema,refs,expected", + [ + pytest.param( + { + "type": "object", + "definitions": {"string_type": {"type": "string"}}, + "properties": {"name": {"$ref": "#/definitions/string_type"}}, + }, + None, + { + "type": "object", + "definitions": {"string_type": {"type": "string"}}, + "properties": {"name": {"type": "string"}}, + }, + id="resolve_schema_references", + ), + pytest.param( + { + "type": "object", + "properties": { + "name": {"$ref": "references.json#/definitions/string_type"}, + }, + }, + {"references.json": {"definitions": {"string_type": {"type": "string"}}}}, + { + "type": "object", + "properties": {"name": {"type": "string"}}, + }, + id="resolve_schema_references_with_refs", + ), + pytest.param( + { + "type": "object", + "definitions": {"string_type": {"type": "string"}}, + "patternProperties": {".+": {"$ref": "#/definitions/string_type"}}, + }, + None, + { + "type": "object", + "definitions": {"string_type": {"type": "string"}}, + "patternProperties": {".+": {"type": "string"}}, + }, + id="resolve_schema_references_with_pattern_properties", + ), + pytest.param( + { + "type": "object", + "properties": { + "dogs": {"type": "array", "items": {"$ref": "doggie.json#/dogs"}}, + }, + }, + { + "doggie.json": { + "dogs": { + "type": "object", + "properties": { + "breed": {"type": "string"}, + "name": {"type": "string"}, + }, + }, + }, + }, + { + "type": "object", + "properties": { + "dogs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "breed": {"type": "string"}, + "name": {"type": "string"}, + }, + }, + }, + }, + }, + id="resolve_schema_references_with_items", + ), + pytest.param( + { + "type": "object", + "properties": { + "thing": { + "type": "object", + "properties": { + "name": { + "$ref": "references.json#/definitions/string_type", + }, + }, + }, + }, + }, + {"references.json": {"definitions": {"string_type": {"type": "string"}}}}, + { + "type": "object", + "properties": { + "thing": { + "type": "object", + "properties": {"name": {"type": "string"}}, + }, + }, + }, + id="resolve_schema_nested_references", + ), + pytest.param( + { + "type": "object", + "properties": { + "name": {"$ref": "references.json#/definitions/string_type"}, + }, + }, + { + "references.json": { + "definitions": {"string_type": {"$ref": "second_reference.json"}}, + }, + "second_reference.json": {"type": "string"}, + }, + {"type": "object", "properties": {"name": {"type": "string"}}}, + id="resolve_schema_indirect_references", + ), + pytest.param( + { + "type": "object", + "properties": { + "name": { + "$ref": "references.json#/definitions/string_type", + "still_here": "yep", + }, + }, + }, + {"references.json": {"definitions": {"string_type": {"type": "string"}}}}, + { + "type": "object", + "properties": {"name": {"type": "string", "still_here": "yep"}}, + }, + id="resolve_schema_preserves_existing_fields", + ), + pytest.param( + { + "anyOf": [ + {"$ref": "references.json#/definitions/first_type"}, + {"$ref": "references.json#/definitions/second_type"}, + ], + }, + { + "references.json": { + "definitions": { + "first_type": {"type": "string"}, + "second_type": {"type": "integer"}, + }, + }, + }, + {"anyOf": [{"type": "string"}, {"type": "integer"}]}, + id="resolve_schema_any_of", + ), + ], +) +def test_resolve_schema_references(schema, refs, expected): + """Test resolving schema references.""" + assert resolve_schema_references(schema, refs) == expected diff --git a/tests/_singerlib/test_utils.py b/tests/_singerlib/test_utils.py new file mode 100644 index 000000000..f48b9684b --- /dev/null +++ b/tests/_singerlib/test_utils.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from datetime import datetime + +import pytest +import pytz + +from singer_sdk._singerlib import strftime, strptime_to_utc +from singer_sdk._singerlib.utils import NonUTCDatetimeError + + +def test_small_years(): + assert ( + strftime(datetime(90, 1, 1, tzinfo=pytz.UTC)) == "0090-01-01T00:00:00.000000Z" + ) + + +def test_round_trip(): + now = datetime.now(tz=pytz.UTC) + dtime = strftime(now) + parsed_datetime = strptime_to_utc(dtime) + formatted_datetime = strftime(parsed_datetime) + assert dtime == formatted_datetime + + +@pytest.mark.parametrize( + "dtimestr", + [ + "2021-01-01T00:00:00.000000Z", + "2021-01-01T00:00:00.000000+00:00", + "2021-01-01T00:00:00.000000+06:00", + "2021-01-01T00:00:00.000000-04:00", + ], + ids=["Z", "offset+0", "offset+6", "offset-4"], +) +def test_strptime_to_utc(dtimestr): + assert strptime_to_utc(dtimestr).tzinfo == pytz.UTC + + +def test_stftime_non_utc(): + now = datetime.now(tz=pytz.timezone("America/New_York")) + with pytest.raises(NonUTCDatetimeError): + strftime(now) diff --git a/tests/conftest.py b/tests/conftest.py index eff8e32c5..7e7c39958 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,12 +2,29 @@ from __future__ import annotations -import os import pathlib +import platform import shutil +import typing as t import pytest -from _pytest.config import Config +from sqlalchemy import __version__ as sqlalchemy_version + +from singer_sdk import SQLConnector +from singer_sdk import typing as th +from singer_sdk.helpers.capabilities import PluginCapabilities +from singer_sdk.sinks import BatchSink, SQLSink +from singer_sdk.target_base import SQLTarget, Target + +if t.TYPE_CHECKING: + from _pytest.config import Config + + from singer_sdk.helpers.capabilities import CapabilitiesEnum + + +SYSTEMS = {"linux", "darwin", "windows"} + +pytest_plugins = ("singer_sdk.testing.pytest_plugin",) def pytest_collection_modifyitems(config: Config, items: list[pytest.Item]): @@ -21,16 +38,150 @@ def pytest_collection_modifyitems(config: Config, items: list[pytest.Item]): item.add_marker("external") +def pytest_runtest_setup(item): + supported_systems = SYSTEMS.intersection(mark.name for mark in item.iter_markers()) + system = platform.system().lower() + if supported_systems and system not in supported_systems: + pytest.skip(f"cannot run on platform {system}") + + +def pytest_report_header() -> list[str]: + """Return a list of strings to be displayed in the header of the report.""" + return [f"sqlalchemy: {sqlalchemy_version}"] + + @pytest.fixture(scope="class") -def outdir() -> str: +def outdir() -> t.Generator[str, None, None]: """Create a temporary directory for cookiecutters and target output.""" name = ".output/" try: - os.mkdir(name) + pathlib.Path(name).mkdir(parents=True) except FileExistsError: # Directory already exists shutil.rmtree(name) - os.mkdir(name) + pathlib.Path(name).mkdir(parents=True) yield name shutil.rmtree(name) + + +@pytest.fixture(scope="session") +def snapshot_dir() -> pathlib.Path: + """Return the path to the snapshot directory.""" + return pathlib.Path("tests/snapshots/") + + +class BatchSinkMock(BatchSink): + """A mock Sink class.""" + + name = "batch-sink-mock" + + def __init__( + self, + target: TargetMock, + stream_name: str, + schema: dict, + key_properties: list[str] | None, + ): + """Create the Mock batch-based sink.""" + super().__init__(target, stream_name, schema, key_properties) + self.target = target + + def process_record(self, record: dict, context: dict) -> None: + """Tracks the count of processed records.""" + self.target.num_records_processed += 1 + super().process_record(record, context) + + def process_batch(self, context: dict) -> None: + """Write to mock trackers.""" + self.target.records_written.extend(context["records"]) + self.target.num_batches_processed += 1 + + @property + def key_properties(self) -> list[str]: + return [key.upper() for key in super().key_properties] + + +class TargetMock(Target): + """A mock Target class.""" + + name = "target-mock" + config_jsonschema = th.PropertiesList().to_dict() + default_sink_class = BatchSinkMock + capabilities: t.ClassVar[list[CapabilitiesEnum]] = [ + *Target.capabilities, + PluginCapabilities.BATCH, + ] + + def __init__(self, *args, **kwargs): + """Create the Mock target sync.""" + super().__init__(*args, **kwargs) + self.state_messages_written: list[dict] = [] + self.records_written: list[dict] = [] + self.num_records_processed: int = 0 + self.num_batches_processed: int = 0 + + def _write_state_message(self, state: dict): + """Emit the stream's latest state.""" + super()._write_state_message(state) + self.state_messages_written.append(state) + + +class SQLConnectorMock(SQLConnector): + """A Mock SQLConnector class.""" + + +class SQLSinkMock(SQLSink): + """A mock Sink class.""" + + name = "sql-sink-mock" + connector_class = SQLConnectorMock + + def __init__( + self, + target: SQLTargetMock, + stream_name: str, + schema: dict, + key_properties: list[str] | None, + connector: SQLConnector | None = None, + ): + """Create the Mock batch-based sink.""" + self._connector: SQLConnector + self._connector = connector or self.connector_class(dict(target.config)) + super().__init__(target, stream_name, schema, key_properties, connector) + self.target = target + + def process_record(self, record: dict, context: dict) -> None: + """Tracks the count of processed records.""" + self.target.num_records_processed += 1 + super().process_record(record, context) + + def process_batch(self, context: dict) -> None: + """Write to mock trackers.""" + self.target.records_written.extend(context["records"]) + self.target.num_batches_processed += 1 + + @property + def key_properties(self) -> list[str]: + return [key.upper() for key in super().key_properties] + + +class SQLTargetMock(SQLTarget): + """A mock Target class.""" + + name = "sql-target-mock" + config_jsonschema = th.PropertiesList().to_dict() + default_sink_class = SQLSinkMock + + def __init__(self, *args, **kwargs): + """Create the Mock target sync.""" + super().__init__(*args, **kwargs) + self.state_messages_written: list[dict] = [] + self.records_written: list[dict] = [] + self.num_records_processed: int = 0 + self.num_batches_processed: int = 0 + + def _write_state_message(self, state: dict): + """Emit the stream's latest state.""" + super()._write_state_message(state) + self.state_messages_written.append(state) diff --git a/tests/cookiecutters/test_cookiecutter.py b/tests/cookiecutters/test_cookiecutter.py deleted file mode 100644 index 2815c6ea0..000000000 --- a/tests/cookiecutters/test_cookiecutter.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Test cookiecutter template.""" - -import logging -import os -from logging import getLogger -from pathlib import Path - -import black -import yaml -from cookiecutter.main import cookiecutter -from flake8.api import legacy as flake8 -from mypy import api - -getLogger("flake8").propagate = False - - -def pytest_generate_tests(metafunc): - """Generate test cases for each Cookiecutter template.""" - id_list = [] - argvalues = [] - - for template in ["tap", "target"]: - template_dir = f"cookiecutter/{template}-template" - case_key = f"{template}_id" - test_input_file = os.path.join(template_dir, "cookiecutter.tests.yml") - - for case in yaml.safe_load(Path(test_input_file).read_text())["tests"]: - id_list.append(case[case_key]) - argvalues.append([template_dir, case]) - - metafunc.parametrize( - ["cookiecutter_dir", "cookiecutter_input"], - argvalues, - ids=id_list, - scope="function", - ) - - -def test_cookiecutter(outdir: str, cookiecutter_dir: str, cookiecutter_input: dict): - """Generate and validate project from Cookiecutter.""" - style_guide_easy = flake8.get_style_guide( - ignore=["E302", "E303", "E305", "F401", "W391"] - ) - style_guide_strict = flake8.get_style_guide( - ignore=[ - "F401", # "imported but unused" - "W292", # "no newline at end of file" - "W391", # "blank line at end of file" - ] - ) - cookiecutter( - template=cookiecutter_dir, - output_dir=outdir, - extra_context=cookiecutter_input, - overwrite_if_exists=True, - no_input=True, - ) - for outfile in Path(outdir).glob("**/*.py"): - filepath = str(outfile.absolute()) - report = style_guide_easy.check_files([filepath]) - errors = report.get_statistics("E") - assert ( - not errors - ), f"Flake8 found violations in first pass of {filepath}: {errors}" - mypy_out = api.run([filepath, "--config", str(Path(outdir) / Path("tox.ini"))]) - mypy_msg = str(mypy_out[0]) - if not mypy_msg.startswith("Success:"): - logging.exception(f"MyPy validation failed: {mypy_msg}") - assert not mypy_msg, f"MyPy validation failed for file {filepath}" - report = style_guide_strict.check_files([filepath]) - errors = report.get_statistics("E") - assert ( - not errors - ), f"Flake8 found violations in second pass of {filepath}: {errors}" - black.format_file_in_place( - Path(filepath), - fast=False, - mode=black.FileMode(), - write_back=black.WriteBack.NO, - ) diff --git a/tests/core/__init__.py b/tests/core/__init__.py index 92c901e62..cae16b520 100644 --- a/tests/core/__init__.py +++ b/tests/core/__init__.py @@ -1 +1,3 @@ """SDK core tests.""" + +from __future__ import annotations diff --git a/tests/core/configuration/__init__.py b/tests/core/configuration/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/core/configuration/test_dict_config.py b/tests/core/configuration/test_dict_config.py index 8b1e3ef4f..7d94b06f4 100644 --- a/tests/core/configuration/test_dict_config.py +++ b/tests/core/configuration/test_dict_config.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import json import os from pathlib import Path @@ -21,7 +23,7 @@ @pytest.fixture def config_file1(tmpdir) -> str: filepath: str = tmpdir.join("file1.json") - with open(filepath, "w") as f: + with Path(filepath).open("w") as f: json.dump({"prop2": "from-file-1"}, f) return filepath @@ -30,7 +32,7 @@ def config_file1(tmpdir) -> str: @pytest.fixture def config_file2(tmpdir) -> str: filepath: str = tmpdir.join("file2.json") - with open(filepath, "w") as f: + with Path(filepath).open("w") as f: json.dump({"prop3": ["from-file-2"]}, f) return filepath @@ -50,14 +52,20 @@ def test_get_env_var_config(): assert env_config["prop1"] == "hello" assert env_config["prop3"] == ["val1", "val2"] assert "PROP1" not in env_config - assert "prop2" not in env_config and "PROP2" not in env_config - assert "prop4" not in env_config and "PROP4" not in env_config + assert "prop2" not in env_config + assert "PROP2" not in env_config + assert "prop4" not in env_config + assert "PROP4" not in env_config no_env_config = parse_environment_config(CONFIG_JSONSCHEMA, "PLUGIN_TEST_") - assert "prop1" not in no_env_config and "PROP1" not in env_config - assert "prop2" not in no_env_config and "PROP2" not in env_config - assert "prop3" not in no_env_config and "PROP3" not in env_config - assert "prop4" not in no_env_config and "PROP4" not in env_config + assert "prop1" not in no_env_config + assert "PROP1" not in env_config + assert "prop2" not in no_env_config + assert "PROP2" not in env_config + assert "prop3" not in no_env_config + assert "PROP3" not in env_config + assert "prop4" not in no_env_config + assert "PROP4" not in env_config def test_get_dotenv_config(tmpdir, monkeypatch: pytest.MonkeyPatch): @@ -81,9 +89,8 @@ def test_get_env_var_config_not_parsable(): "PLUGIN_TEST_PROP1": "hello", "PLUGIN_TEST_PROP3": '["repeated"]', }, - ): - with pytest.raises(ValueError): - parse_environment_config(CONFIG_JSONSCHEMA, "PLUGIN_TEST_") + ), pytest.raises(ValueError, match="A bracketed list was detected"): + parse_environment_config(CONFIG_JSONSCHEMA, "PLUGIN_TEST_") def test_merge_config_sources(config_file1, config_file2): diff --git a/tests/core/conftest.py b/tests/core/conftest.py deleted file mode 100644 index ca3030ce4..000000000 --- a/tests/core/conftest.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Tap, target and stream test fixtures.""" - -import pytest - - -@pytest.fixture -def csv_config(outdir: str) -> dict: - """Get configuration dictionary for target-csv.""" - return {"target_folder": outdir} diff --git a/tests/core/resources/batch.1.jsonl.gz b/tests/core/resources/batch.1.jsonl.gz new file mode 100644 index 000000000..393bd2953 Binary files /dev/null and b/tests/core/resources/batch.1.jsonl.gz differ diff --git a/tests/core/resources/batch.2.jsonl.gz b/tests/core/resources/batch.2.jsonl.gz new file mode 100644 index 000000000..c06fcd59d Binary files /dev/null and b/tests/core/resources/batch.2.jsonl.gz differ diff --git a/tests/core/rest/__init__.py b/tests/core/rest/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/core/rest/conftest.py b/tests/core/rest/conftest.py index 0ee924f3f..13d7eb9eb 100644 --- a/tests/core/rest/conftest.py +++ b/tests/core/rest/conftest.py @@ -1,7 +1,12 @@ """REST fixtures.""" +from __future__ import annotations + +import typing as t + import pytest from memoization.memoization import cached +from requests.auth import HTTPProxyAuth from singer_sdk.authenticators import APIAuthenticatorBase, SingletonMeta from singer_sdk.streams import RESTStream @@ -16,7 +21,7 @@ class SimpleRESTStream(RESTStream): """A REST stream for testing.""" url_base = "https://example.com" - schema = { + schema: t.ClassVar[dict] = { "type": "object", "properties": {}, } @@ -51,6 +56,15 @@ def authenticator(self) -> NaiveAuthenticator: return NaiveAuthenticator(stream=self) +class ProxyAuthStream(SimpleRESTStream): + """A stream with digest authentication.""" + + @property + def authenticator(self): + """Stream authenticator.""" + return HTTPProxyAuth("username", "password") + + class SimpleTap(Tap): """A REST tap for testing.""" @@ -65,6 +79,7 @@ def discover_streams(self): SingletonAuthStream(self, name="reused_single_auth_stream"), CachedAuthStream(self, name="cached_auth_stream"), CachedAuthStream(self, name="other_cached_auth_stream"), + ProxyAuthStream(self, name="proxy_auth_stream"), ] diff --git a/tests/core/rest/test_authenticators.py b/tests/core/rest/test_authenticators.py index 9d18a7728..0226c4aef 100644 --- a/tests/core/rest/test_authenticators.py +++ b/tests/core/rest/test_authenticators.py @@ -2,9 +2,10 @@ from __future__ import annotations +import typing as t + import jwt import pytest -import requests_mock from cryptography.hazmat.primitives.asymmetric.rsa import ( RSAPrivateKey, RSAPublicKey, @@ -16,10 +17,15 @@ PrivateFormat, PublicFormat, ) +from requests.auth import HTTPProxyAuth, _basic_auth_str from singer_sdk.authenticators import OAuthAuthenticator, OAuthJWTAuthenticator -from singer_sdk.streams import RESTStream -from singer_sdk.tap_base import Tap + +if t.TYPE_CHECKING: + import requests_mock + + from singer_sdk.streams import RESTStream + from singer_sdk.tap_base import Tap @pytest.mark.parametrize( @@ -66,7 +72,10 @@ ], ) def test_authenticator_is_reused( - rest_tap: Tap, stream_name: str, other_stream_name: str, auth_reused: bool + rest_tap: Tap, + stream_name: str, + other_stream_name: str, + auth_reused: bool, ): """Validate that the stream's authenticator is a singleton.""" stream: RESTStream = rest_tap.streams[stream_name] @@ -173,7 +182,7 @@ def test_oauth_jwt_authenticator_payload( ): class _FakeOAuthJWTAuthenticator(OAuthJWTAuthenticator): private_key = private_key_string - oauth_request_body = {"some": "payload"} + oauth_request_body = {"some": "payload"} # noqa: RUF012 authenticator = _FakeOAuthJWTAuthenticator(stream=rest_tap.streams["some_stream"]) @@ -182,3 +191,12 @@ class _FakeOAuthJWTAuthenticator(OAuthJWTAuthenticator): token = payload["assertion"] assert jwt.decode(token, public_key_string, algorithms=["RS256"]) == body + + +def test_requests_library_auth(rest_tap: Tap): + """Validate that a requests.auth object can be used as an authenticator.""" + stream: RESTStream = rest_tap.streams["proxy_auth_stream"] + r = stream.prepare_request(None, None) + + assert isinstance(stream.authenticator, HTTPProxyAuth) + assert r.headers["Proxy-Authorization"] == _basic_auth_str("username", "password") diff --git a/tests/core/rest/test_backoff.py b/tests/core/rest/test_backoff.py index 96e237d63..fbe391e55 100644 --- a/tests/core/rest/test_backoff.py +++ b/tests/core/rest/test_backoff.py @@ -1,4 +1,7 @@ +from __future__ import annotations + import json +import typing as t try: from contextlib import nullcontext @@ -19,7 +22,7 @@ class CustomResponseValidationStream(RESTStream): url_base = "https://badapi.test" name = "imabadapi" - schema = {"type": "object", "properties": {}} + schema: t.ClassVar[dict] = {"type": "object", "properties": {}} path = "/dummy" class StatusMessage(str, Enum): @@ -33,9 +36,11 @@ def validate_response(self, response: requests.Response): super().validate_response(response) data = response.json() if data["status"] == self.StatusMessage.ERROR: - raise FatalAPIError("Error message found :(") + msg = "Error message found :(" + raise FatalAPIError(msg) if data["status"] == self.StatusMessage.UNAVAILABLE: - raise RetriableAPIError("API is unavailable") + msg = "API is unavailable" + raise RetriableAPIError(msg) @pytest.fixture @@ -144,7 +149,10 @@ def test_status_message_api(custom_validation_stream, message, expectation): ], ) def test_rate_limiting_status_override( - basic_rest_stream, rate_limit_codes, response_status, expectation + basic_rest_stream, + rate_limit_codes, + response_status, + expectation, ): fake_response = requests.Response() fake_response.status_code = response_status diff --git a/tests/core/rest/test_pagination.py b/tests/core/rest/test_pagination.py new file mode 100644 index 000000000..23dce9841 --- /dev/null +++ b/tests/core/rest/test_pagination.py @@ -0,0 +1,354 @@ +"""Tests generic paginator classes.""" + +from __future__ import annotations + +import json +import typing as t + +import pytest +from requests import Response + +from singer_sdk.helpers.jsonpath import extract_jsonpath +from singer_sdk.pagination import ( + BaseAPIPaginator, + BaseHATEOASPaginator, + BaseOffsetPaginator, + BasePageNumberPaginator, + HeaderLinkPaginator, + JSONPathPaginator, + SimpleHeaderPaginator, + SinglePagePaginator, + first, +) + + +def test_paginator_base_missing_implementation(): + """Validate that `BaseAPIPaginator` implementation requires `get_next`.""" + + with pytest.raises( + TypeError, + match="Can't instantiate abstract class .* get_next", + ): + BaseAPIPaginator(0) + + +def test_single_page_paginator(): + """Validate single page paginator.""" + + response = Response() + paginator = SinglePagePaginator() + assert not paginator.finished + assert paginator.current_value is None + assert paginator.count == 0 + + paginator.advance(response) + assert paginator.finished + assert paginator.current_value is None + assert paginator.count == 1 + + +def test_paginator_page_number_missing_implementation(): + """Validate that `BasePageNumberPaginator` implementation requires `has_more`.""" + + with pytest.raises( + TypeError, + match="Can't instantiate abstract class .* has_more", + ): + BasePageNumberPaginator(1) + + +def test_paginator_offset_missing_implementation(): + """Validate that `BaseOffsetPaginator` implementation requires `has_more`.""" + + with pytest.raises( + TypeError, + match="Can't instantiate abstract class .* has_more", + ): + BaseOffsetPaginator(0, 100) + + +def test_paginator_hateoas_missing_implementation(): + """Validate that `BaseHATEOASPaginator` implementation requires `get_next_url`.""" + + with pytest.raises( + TypeError, + match="Can't instantiate abstract class .* get_next_url", + ): + BaseHATEOASPaginator() + + +def test_paginator_attributes(): + """Validate paginator that uses the page number.""" + + response = Response() + paginator = JSONPathPaginator(jsonpath="$.nextPageToken") + assert str(paginator) == "JSONPathPaginator<None>" + + response._content = b'{"nextPageToken": "abc"}' + paginator.advance(response) + assert str(paginator) == "JSONPathPaginator<abc>" + + +def test_paginator_loop(): + """Validate paginator that uses the page number.""" + + response = Response() + paginator = JSONPathPaginator(jsonpath="$.nextPageToken") + assert not paginator.finished + assert paginator.current_value is None + assert paginator.count == 0 + + response._content = b'{"nextPageToken": "abc"}' + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value == "abc" + assert paginator.count == 1 + + response._content = b'{"nextPageToken": "abc"}' + with pytest.raises(RuntimeError, match="Loop detected in pagination"): + paginator.advance(response) + + +def test_paginator_page_number(): + """Validate paginator that uses the page number.""" + + class _TestPageNumberPaginator(BasePageNumberPaginator): + def has_more(self, response: Response) -> bool: + return response.json()["hasMore"] + + has_more_response = b'{"hasMore": true}' + no_more_response = b'{"hasMore": false}' + + response = Response() + paginator = _TestPageNumberPaginator(0) + assert not paginator.finished + assert paginator.current_value == 0 + assert paginator.count == 0 + + response._content = has_more_response + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value == 1 + assert paginator.count == 1 + + response._content = has_more_response + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value == 2 + assert paginator.count == 2 + + response._content = no_more_response + paginator.advance(response) + assert paginator.finished + assert paginator.count == 3 + + +def test_paginator_offset(): + """Validate paginator that uses the page offset.""" + + class _TestOffsetPaginator(BaseOffsetPaginator): + def __init__( + self, + start_value: int, + page_size: int, + records_jsonpath: str, + *args: t.Any, + **kwargs: t.Any, + ) -> None: + super().__init__(start_value, page_size, *args, **kwargs) + self._records_jsonpath = records_jsonpath + + def has_more(self, response: Response) -> bool: + """Check if response has any records. + + Args: + response: API response object. + + Returns: + Boolean flag used to indicate if the endpoint has more pages. + """ + try: + first( + extract_jsonpath( + self._records_jsonpath, + response.json(), + ), + ) + except StopIteration: + return False + + return True + + response = Response() + paginator = _TestOffsetPaginator(0, 2, "$[*]") + assert not paginator.finished + assert paginator.current_value == 0 + assert paginator.count == 0 + + response._content = b'[{"id": 1}, {"id": 2}]' + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value == 2 + assert paginator.count == 1 + + response._content = b'[{"id": 3}]' + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value == 4 + assert paginator.count == 2 + + response._content = b"[]" + paginator.advance(response) + assert paginator.finished + assert paginator.count == 3 + + +def test_paginator_jsonpath(): + """Validate paginator that uses JSONPath.""" + + response = Response() + paginator = JSONPathPaginator(jsonpath="$.nextPageToken") + assert not paginator.finished + assert paginator.current_value is None + assert paginator.count == 0 + + response._content = b'{"nextPageToken": "abc"}' + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value == "abc" + assert paginator.count == 1 + + response._content = b'{"nextPageToken": null}' + paginator.advance(response) + assert paginator.finished + assert paginator.count == 2 + + +def test_paginator_header(): + """Validate paginator that uses response headers.""" + + key = "X-Next-Page" + response = Response() + paginator = SimpleHeaderPaginator(key=key) + assert not paginator.finished + assert paginator.current_value is None + assert paginator.count == 0 + + response.headers[key] = "abc" + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value == "abc" + assert paginator.count == 1 + + response.headers[key] = None + paginator.advance(response) + assert paginator.finished + assert paginator.count == 2 + + +def test_paginator_header_links(): + """Validate paginator that uses HATEOAS links.""" + + api_hostname = "my.api.test" + resource_path = "/path/to/resource" + + response = Response() + paginator = HeaderLinkPaginator() + assert not paginator.finished + assert paginator.current_value is None + assert paginator.count == 0 + + response.headers.update( + {"Link": f"<https://{api_hostname}{resource_path}?page=2&limit=100>; rel=next"}, + ) + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value.hostname == api_hostname + assert paginator.current_value.path == resource_path + assert paginator.current_value.query == "page=2&limit=100" + assert paginator.count == 1 + + response.headers.update( + { + "Link": ( + f"<https://{api_hostname}{resource_path}?page=3&limit=100>;rel=next," + f"<https://{api_hostname}{resource_path}?page=2&limit=100>;rel=back" + ), + }, + ) + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value.hostname == api_hostname + assert paginator.current_value.path == resource_path + assert paginator.current_value.query == "page=3&limit=100" + assert paginator.count == 2 + + response.headers.update( + {"Link": "<https://myapi.test/path/to/resource?page=3&limit=100>;rel=back"}, + ) + paginator.advance(response) + assert paginator.finished + assert paginator.count == 3 + + +def test_paginator_custom_hateoas(): + """Validate paginator that uses HATEOAS links.""" + + class _CustomHATEOASPaginator(BaseHATEOASPaginator): + def get_next_url(self, response: Response) -> str | None: + """Get a parsed HATEOAS link for the next, if the response has one.""" + + try: + return first( + extract_jsonpath( + "$.links[?(@.rel=='next')].href", + response.json(), + ), + ) + except StopIteration: + return None + + resource_path = "/path/to/resource" + + response = Response() + paginator = _CustomHATEOASPaginator() + assert not paginator.finished + assert paginator.current_value is None + assert paginator.count == 0 + + response._content = json.dumps( + { + "links": [ + { + "rel": "next", + "href": f"{resource_path}?page=2&limit=100", + }, + ], + }, + ).encode() + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value.path == resource_path + assert paginator.current_value.query == "page=2&limit=100" + assert paginator.count == 1 + + response._content = json.dumps( + { + "links": [ + { + "rel": "next", + "href": f"{resource_path}?page=3&limit=100", + }, + ], + }, + ).encode() + paginator.advance(response) + assert not paginator.finished + assert paginator.current_value.path == resource_path + assert paginator.current_value.query == "page=3&limit=100" + assert paginator.count == 2 + + response._content = json.dumps({"links": []}).encode() + paginator.advance(response) + assert paginator.finished + assert paginator.count == 3 diff --git a/tests/core/sinks/__init__.py b/tests/core/sinks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/core/sinks/test_sdc_metadata.py b/tests/core/sinks/test_sdc_metadata.py new file mode 100644 index 000000000..c07ac4f6a --- /dev/null +++ b/tests/core/sinks/test_sdc_metadata.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from freezegun import freeze_time + +from tests.conftest import BatchSinkMock, TargetMock + + +def test_sdc_metadata(): + with freeze_time("2023-01-01T00:00:00+00:00"): + target = TargetMock() + + sink = BatchSinkMock( + target, + "users", + {"type": "object", "properties": {"id": {"type": "integer"}}}, + ["id"], + ) + + record_message = { + "type": "RECORD", + "stream": "users", + "record": {"id": 1}, + "time_extracted": "2021-01-01T00:00:00+00:00", + "version": 100, + } + record = record_message["record"] + + with freeze_time("2023-01-01T00:05:00+00:00"): + sink._add_sdc_metadata_to_record(record, record_message, {}) + + assert record == { + "id": 1, + "_sdc_extracted_at": "2021-01-01T00:00:00+00:00", + "_sdc_received_at": "2023-01-01T00:05:00+00:00", + "_sdc_batched_at": "2023-01-01T00:05:00+00:00", + "_sdc_deleted_at": None, + "_sdc_sequence": 1672531500000, + "_sdc_table_version": 100, + "_sdc_sync_started_at": 1672531200000, + } + + sink._add_sdc_metadata_to_schema() + assert sink.schema == { + "type": "object", + "properties": { + "id": {"type": "integer"}, + "_sdc_extracted_at": {"type": ["null", "string"], "format": "date-time"}, + "_sdc_received_at": {"type": ["null", "string"], "format": "date-time"}, + "_sdc_batched_at": {"type": ["null", "string"], "format": "date-time"}, + "_sdc_deleted_at": {"type": ["null", "string"], "format": "date-time"}, + "_sdc_sequence": {"type": ["null", "integer"]}, + "_sdc_table_version": {"type": ["null", "integer"]}, + "_sdc_sync_started_at": {"type": ["null", "integer"]}, + }, + } diff --git a/tests/core/test_about.py b/tests/core/test_about.py new file mode 100644 index 000000000..cd458ceab --- /dev/null +++ b/tests/core/test_about.py @@ -0,0 +1,74 @@ +"""Test the AboutInfo class.""" + +from __future__ import annotations + +import typing as t + +import pytest + +from singer_sdk.about import AboutFormatter, AboutInfo +from singer_sdk.helpers.capabilities import TapCapabilities + +if t.TYPE_CHECKING: + from pathlib import Path + + from pytest_snapshot.plugin import Snapshot + +_format_to_extension = { + "text": "txt", + "json": "json", + "markdown": "md", +} + + +@pytest.fixture(scope="module") +def about_info() -> AboutInfo: + return AboutInfo( + name="tap-example", + description="Example tap for Singer SDK", + version="0.1.1", + sdk_version="1.0.0", + supported_python_versions=["3.6", "3.7", "3.8"], + capabilities=[ + TapCapabilities.CATALOG, + TapCapabilities.DISCOVER, + TapCapabilities.STATE, + ], + settings={ + "properties": { + "start_date": { + "type": "string", + "format": "date-time", + "description": "Start date for the tap to extract data from.", + }, + "api_key": { + "type": "string", + "description": "API key for the tap to use.", + }, + }, + "required": ["api_key"], + }, + ) + + +@pytest.mark.snapshot() +@pytest.mark.parametrize( + "about_format", + [ + "text", + "json", + "markdown", + ], +) +def test_about_format( + snapshot: Snapshot, + snapshot_dir: Path, + about_info: AboutInfo, + about_format: str, +): + snapshot.snapshot_dir = snapshot_dir.joinpath("about_format") + + formatter = AboutFormatter.get_formatter(about_format) + output = formatter.format_about(about_info) + snapshot_name = f"{about_format}.snap.{_format_to_extension[about_format]}" + snapshot.assert_match(output, snapshot_name) diff --git a/tests/core/test_batch.py b/tests/core/test_batch.py new file mode 100644 index 000000000..6efb3b34a --- /dev/null +++ b/tests/core/test_batch.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +import decimal +import re +from dataclasses import asdict + +import pytest + +from singer_sdk.batch import JSONLinesBatcher +from singer_sdk.helpers._batch import ( + BaseBatchFileEncoding, + BatchConfig, + JSONLinesEncoding, + StorageTarget, +) + + +@pytest.mark.parametrize( + "encoding,expected", + [ + (JSONLinesEncoding("gzip"), {"compression": "gzip", "format": "jsonl"}), + (JSONLinesEncoding(), {"compression": None, "format": "jsonl"}), + ], + ids=["jsonl-compression-gzip", "jsonl-compression-none"], +) +def test_encoding_as_dict(encoding: BaseBatchFileEncoding, expected: dict) -> None: + """Test encoding as dict.""" + assert asdict(encoding) == expected + + +def test_storage_get_url(): + storage = StorageTarget("file://root_dir") + + with storage.fs(create=True) as fs: + url = fs.geturl("prefix--file.jsonl.gz") + assert url.startswith("file://") + assert url.replace("\\", "/").endswith("root_dir/prefix--file.jsonl.gz") + + +def test_storage_get_s3_url(): + storage = StorageTarget("s3://testing123:testing123@test_bucket") + + with storage.fs(create=True) as fs: + url = fs.geturl("prefix--file.jsonl.gz") + assert url.startswith( + "https://s3.amazonaws.com/test_bucket/prefix--file.jsonl.gz", + ) + + +@pytest.mark.parametrize( + "file_url,root", + [ + pytest.param( + "file:///Users/sdk/path/to/file", + "file:///Users/sdk/path/to", + id="local", + ), + pytest.param( + "s3://test_bucket/prefix--file.jsonl.gz", + "s3://test_bucket", + id="s3", + ), + ], +) +def test_storage_from_url(file_url: str, root: str): + """Test storage target from URL.""" + head, _ = StorageTarget.split_url(file_url) + target = StorageTarget.from_url(head) + assert target.root == root + + +@pytest.mark.parametrize( + "file_url,expected", + [ + pytest.param( + "file:///Users/sdk/path/to/file", + ("file:///Users/sdk/path/to", "file"), + id="local", + ), + pytest.param( + "s3://bucket/path/to/file", + ("s3://bucket/path/to", "file"), + id="s3", + ), + pytest.param( + "file://C:\\Users\\sdk\\path\\to\\file", + ("file://C:\\Users\\sdk\\path\\to", "file"), + marks=(pytest.mark.windows,), + id="windows-local", + ), + pytest.param( + "file://\\\\remotemachine\\C$\\batches\\file", + ("file://\\\\remotemachine\\C$\\batches", "file"), + marks=(pytest.mark.windows,), + id="windows-local", + ), + ], +) +def test_storage_split_url(file_url: str, expected: tuple): + """Test storage target split URL.""" + assert StorageTarget.split_url(file_url) == expected + + +def test_json_lines_batcher(): + batcher = JSONLinesBatcher( + "tap-test", + "stream-test", + batch_config=BatchConfig( + encoding=JSONLinesEncoding("gzip"), + storage=StorageTarget("file:///tmp/sdk-batches"), + batch_size=2, + ), + ) + records = [ + {"id": 1, "numeric": decimal.Decimal("1.0")}, + {"id": 2, "numeric": decimal.Decimal("2.0")}, + {"id": 3, "numeric": decimal.Decimal("3.0")}, + ] + + batches = list(batcher.get_batches(records)) + assert len(batches) == 2 + assert all(len(batch) == 1 for batch in batches) + assert all( + re.match(r".*tap-test--stream-test-.*\.json.gz", filepath) + for batch in batches + for filepath in batch + ) diff --git a/tests/core/test_capabilities.py b/tests/core/test_capabilities.py index ddabe563f..49149469a 100644 --- a/tests/core/test_capabilities.py +++ b/tests/core/test_capabilities.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import warnings from inspect import currentframe, getframeinfo @@ -16,13 +18,13 @@ class DummyCapabilitiesEnum(CapabilitiesEnum): def test_deprecated_capabilities(): with warnings.catch_warnings(): warnings.simplefilter("error") - DummyCapabilitiesEnum.MY_SUPPORTED_FEATURE + _ = DummyCapabilitiesEnum.MY_SUPPORTED_FEATURE with pytest.warns( DeprecationWarning, match="is deprecated. No longer supported", ) as record: - DummyCapabilitiesEnum.MY_DEPRECATED_FEATURE + _ = DummyCapabilitiesEnum.MY_DEPRECATED_FEATURE warning = record.list[0] frameinfo = getframeinfo(currentframe()) diff --git a/tests/core/test_catalog_selection.py b/tests/core/test_catalog_selection.py index f0019ba40..f6653c25e 100644 --- a/tests/core/test_catalog_selection.py +++ b/tests/core/test_catalog_selection.py @@ -1,11 +1,13 @@ """Test catalog selection features.""" +from __future__ import annotations + import logging from copy import deepcopy import pytest -import singer_sdk.helpers._singer as singer +import singer_sdk._singerlib as singer from singer_sdk.helpers._catalog import ( get_selected_schema, pop_deselected_record_properties, @@ -201,7 +203,6 @@ def test_schema_selection( mask, logging.getLogger(), ) - # selected_schema["properties"]["required"] = [] assert ( selected_schema["properties"] == PropertiesList( @@ -220,7 +221,9 @@ def test_schema_selection( def test_record_selection( - catalog_entry_obj: singer.CatalogEntry, selection_test_cases, caplog + catalog_entry_obj: singer.CatalogEntry, + selection_test_cases, + caplog, ): """Test that record selection rules are correctly applied to SCHEMA messages.""" caplog.set_level(logging.DEBUG) diff --git a/tests/core/test_connector_sql.py b/tests/core/test_connector_sql.py new file mode 100644 index 000000000..58ba59ec7 --- /dev/null +++ b/tests/core/test_connector_sql.py @@ -0,0 +1,285 @@ +from __future__ import annotations + +from decimal import Decimal +from unittest import mock + +import pytest +import sqlalchemy +from sqlalchemy.dialects import sqlite + +from singer_sdk.connectors import SQLConnector +from singer_sdk.exceptions import ConfigValidationError + + +def stringify(in_dict): + return {k: str(v) for k, v in in_dict.items()} + + +class TestConnectorSQL: + """Test the SQLConnector class.""" + + @pytest.fixture + def connector(self): + return SQLConnector(config={"sqlalchemy_url": "sqlite:///"}) + + @pytest.mark.parametrize( + "method_name,kwargs,context,unrendered_statement,rendered_statement", + [ + ( + "get_column_add_ddl", + { + "table_name": "full.table.name", + "column_name": "column_name", + "column_type": sqlalchemy.types.Text(), + }, + { + "table_name": "full.table.name", + "create_column_clause": sqlalchemy.schema.CreateColumn( + sqlalchemy.Column( + "column_name", + sqlalchemy.types.Text(), + ), + ), + }, + "ALTER TABLE %(table_name)s ADD COLUMN %(create_column_clause)s", + "ALTER TABLE full.table.name ADD COLUMN column_name TEXT", + ), + ( + "get_column_rename_ddl", + { + "table_name": "full.table.name", + "column_name": "old_name", + "new_column_name": "new_name", + }, + { + "table_name": "full.table.name", + "column_name": "old_name", + "new_column_name": "new_name", + }, + "ALTER TABLE %(table_name)s RENAME COLUMN %(column_name)s to %(new_column_name)s", # noqa: E501 + "ALTER TABLE full.table.name RENAME COLUMN old_name to new_name", + ), + ( + "get_column_alter_ddl", + { + "table_name": "full.table.name", + "column_name": "column_name", + "column_type": sqlalchemy.types.String(), + }, + { + "table_name": "full.table.name", + "column_name": "column_name", + "column_type": sqlalchemy.types.String(), + }, + "ALTER TABLE %(table_name)s ALTER COLUMN %(column_name)s (%(column_type)s)", # noqa: E501 + "ALTER TABLE full.table.name ALTER COLUMN column_name (VARCHAR)", + ), + ], + ) + def test_get_column_ddl( + self, + connector, + method_name, + kwargs, + context, + unrendered_statement, + rendered_statement, + ): + method = getattr(connector, method_name) + column_ddl = method(**kwargs) + + assert stringify(column_ddl.context) == stringify(context) + assert column_ddl.statement == unrendered_statement + + statement = str( + column_ddl.compile( + dialect=sqlite.dialect(), + compile_kwargs={"literal_binds": True}, + ), + ) + assert statement == rendered_statement + + def test_remove_collation_text_type(self): + remove_collation = SQLConnector.remove_collation + test_collation = "SQL_Latin1_General_CP1_CI_AS" + current_type = sqlalchemy.types.Text(collation=test_collation) + current_type_collation = remove_collation(current_type) + # Check collation was set to None by the function + assert current_type.collation is None + # Check that we get the same collation we put in back out + assert current_type_collation == test_collation + + def test_remove_collation_non_text_type(self): + remove_collation = SQLConnector.remove_collation + current_type = sqlalchemy.types.Integer() + current_type_collation = remove_collation(current_type) + # Check there is not a collation attribute + assert not hasattr(current_type, "collation") + # Check that we get the same type we put in + assert str(current_type) == "INTEGER" + # Check that this variable is missing + assert current_type_collation is None + + def test_update_collation_text_type(self): + update_collation = SQLConnector.update_collation + test_collation = "SQL_Latin1_General_CP1_CI_AS" + compatible_type = sqlalchemy.types.Text(collation=None) + update_collation(compatible_type, test_collation) + # Check collation was set to the value we put in + assert compatible_type.collation == test_collation + + def test_update_collation_non_text_type(self): + update_collation = SQLConnector.update_collation + test_collation = "SQL_Latin1_General_CP1_CI_AS" + compatible_type = sqlalchemy.types.Integer() + update_collation(compatible_type, test_collation) + # Check there is not a collation attribute + assert not hasattr(compatible_type, "collation") + # Check that we get the same type we put in + assert str(compatible_type) == "INTEGER" + + def test_create_engine_returns_new_engine(self, connector): + engine1 = connector.create_engine() + engine2 = connector.create_engine() + assert engine1 is not engine2 + + def test_engine_creates_and_returns_cached_engine(self, connector): + assert not connector._cached_engine + engine1 = connector._engine + engine2 = connector._cached_engine + assert engine1 is engine2 + + def test_deprecated_functions_warn(self, connector): + with pytest.deprecated_call(): + connector.create_sqlalchemy_engine() + with pytest.deprecated_call(): + connector.create_sqlalchemy_connection() + with pytest.deprecated_call(): + _ = connector.connection + + def test_connect_calls_engine(self, connector): + with mock.patch.object( + SQLConnector, + "_engine", + ) as mock_engine, connector._connect() as _: + mock_engine.connect.assert_called_once() + + def test_connect_calls_connect(self, connector): + attached_engine = connector._engine + with mock.patch.object( + attached_engine, + "connect", + ) as mock_conn, connector._connect() as _: + mock_conn.assert_called_once() + + def test_connect_raises_on_operational_failure(self, connector): + with pytest.raises( + sqlalchemy.exc.OperationalError, + ) as _, connector._connect() as conn: + conn.execute(sqlalchemy.text("SELECT * FROM fake_table")) + + def test_rename_column_uses_connect_correctly(self, connector): + attached_engine = connector._engine + # Ends up using the attached engine + with mock.patch.object(attached_engine, "connect") as mock_conn: + connector.rename_column("fake_table", "old_name", "new_name") + mock_conn.assert_called_once() + # Uses the _connect method + with mock.patch.object(connector, "_connect") as mock_connect_method: + connector.rename_column("fake_table", "old_name", "new_name") + mock_connect_method.assert_called_once() + + def test_get_slalchemy_url_raises_if_not_in_config(self, connector): + with pytest.raises(ConfigValidationError): + connector.get_sqlalchemy_url({}) + + def test_dialect_uses_engine(self, connector): + attached_engine = connector._engine + with mock.patch.object(attached_engine, "dialect") as _: + res = connector._dialect + assert res == attached_engine.dialect + + def test_merge_sql_types_text_current_max(self, connector): + current_type = sqlalchemy.types.VARCHAR(length=None) + sql_type = sqlalchemy.types.VARCHAR(length=255) + compatible_sql_type = connector.merge_sql_types([current_type, sql_type]) + # Check that the current VARCHAR(MAX) type is kept + assert compatible_sql_type is current_type + + def test_merge_sql_types_text_current_greater_than(self, connector): + current_type = sqlalchemy.types.VARCHAR(length=255) + sql_type = sqlalchemy.types.VARCHAR(length=64) + compatible_sql_type = connector.merge_sql_types([current_type, sql_type]) + # Check the current greater VARCHAR(255) is kept + assert compatible_sql_type is current_type + + def test_merge_sql_types_text_proposed_max(self, connector): + current_type = sqlalchemy.types.VARCHAR(length=64) + sql_type = sqlalchemy.types.VARCHAR(length=None) + compatible_sql_type = connector.merge_sql_types([current_type, sql_type]) + # Check the current VARCHAR(64) is chosen over default VARCHAR(max) + assert compatible_sql_type is current_type + + def test_merge_sql_types_text_current_less_than(self, connector): + current_type = sqlalchemy.types.VARCHAR(length=64) + sql_type = sqlalchemy.types.VARCHAR(length=255) + compatible_sql_type = connector.merge_sql_types([current_type, sql_type]) + # Check that VARCHAR(255) is chosen over the lesser current VARCHAR(64) + assert compatible_sql_type is sql_type + + @pytest.mark.parametrize( + "types,expected_type", + [ + pytest.param( + [sqlalchemy.types.Integer(), sqlalchemy.types.Numeric()], + sqlalchemy.types.Integer, + id="integer-numeric", + ), + pytest.param( + [sqlalchemy.types.Numeric(), sqlalchemy.types.Integer()], + sqlalchemy.types.Numeric, + id="numeric-integer", + ), + pytest.param( + [ + sqlalchemy.types.Integer(), + sqlalchemy.types.String(), + sqlalchemy.types.Numeric(), + ], + sqlalchemy.types.String, + id="integer-string-numeric", + ), + ], + ) + def test_merge_generic_sql_types( + self, + connector: SQLConnector, + types: list[sqlalchemy.types.TypeEngine], + expected_type: type[sqlalchemy.types.TypeEngine], + ): + merged_type = connector.merge_sql_types(types) + assert isinstance(merged_type, expected_type) + + def test_engine_json_serialization(self, connector: SQLConnector): + engine = connector._engine + meta = sqlalchemy.MetaData() + table = sqlalchemy.Table( + "test_table", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True), + sqlalchemy.Column("attrs", sqlalchemy.JSON), + ) + meta.create_all(engine) + with engine.connect() as conn: + conn.execute( + table.insert(), + [ + {"attrs": {"x": Decimal("1.0")}}, + {"attrs": {"x": Decimal("2.0"), "y": [1, 2, 3]}}, + ], + ) + result = conn.execute(table.select()) + assert result.fetchall() == [ + (1, {"x": Decimal("1.0")}), + (2, {"x": Decimal("2.0"), "y": [1, 2, 3]}), + ] diff --git a/tests/core/test_generic_tests.py b/tests/core/test_generic_tests.py deleted file mode 100644 index f919aa30d..000000000 --- a/tests/core/test_generic_tests.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Test the generic tests from `singer_sdk.testing`.""" - -from pathlib import Path - -from samples.sample_tap_countries.countries_tap import SampleTapCountries -from singer_sdk.testing import get_standard_tap_tests - -PARQUET_SAMPLE_FILENAME = Path(__file__).parent / Path("./resources/testfile.parquet") -PARQUET_TEST_CONFIG = {"filepath": str(PARQUET_SAMPLE_FILENAME)} - - -def test_countries_tap_standard_tests(): - """Run standard tap tests against Countries tap.""" - tests = get_standard_tap_tests(SampleTapCountries) - for test in tests: - test() diff --git a/tests/core/test_io.py b/tests/core/test_io.py new file mode 100644 index 000000000..c8de02447 --- /dev/null +++ b/tests/core/test_io.py @@ -0,0 +1,55 @@ +"""Test IO operations.""" + +from __future__ import annotations + +import decimal +import json +from contextlib import nullcontext + +import pytest + +from singer_sdk.io_base import SingerReader + + +class DummyReader(SingerReader): + def _process_activate_version_message(self, message_dict: dict) -> None: + pass + + def _process_batch_message(self, message_dict: dict) -> None: + pass + + def _process_record_message(self, message_dict: dict) -> None: + pass + + def _process_schema_message(self, message_dict: dict) -> None: + pass + + def _process_state_message(self, message_dict: dict) -> None: + pass + + +@pytest.mark.parametrize( + "line,expected,exception", + [ + pytest.param( + "not-valid-json", + None, + pytest.raises(json.decoder.JSONDecodeError), + id="unparsable", + ), + pytest.param( + '{"type": "RECORD", "stream": "users", "record": {"id": 1, "value": 1.23}}', # noqa: E501 + { + "type": "RECORD", + "stream": "users", + "record": {"id": 1, "value": decimal.Decimal("1.23")}, + }, + nullcontext(), + id="record", + ), + ], +) +def test_deserialize(line, expected, exception): + reader = DummyReader() + with exception: + assert reader.deserialize_json(line) == expected diff --git a/tests/core/test_jsonschema_helpers.py b/tests/core/test_jsonschema_helpers.py index 175d0b577..e1369dcba 100644 --- a/tests/core/test_jsonschema_helpers.py +++ b/tests/core/test_jsonschema_helpers.py @@ -1,11 +1,29 @@ """Test sample sync.""" +from __future__ import annotations + import re -from typing import List +import typing as t +from textwrap import dedent import pytest - -from singer_sdk.streams.core import Stream +from jsonschema import Draft6Validator + +from singer_sdk.helpers._typing import ( + JSONSCHEMA_ANNOTATION_SECRET, + JSONSCHEMA_ANNOTATION_WRITEONLY, + is_array_type, + is_boolean_type, + is_date_or_datetime_type, + is_datetime_type, + is_integer_type, + is_null_type, + is_number_type, + is_object_type, + is_secret_type, + is_string_array_type, + is_string_type, +) from singer_sdk.tap_base import Tap from singer_sdk.typing import ( ArrayType, @@ -13,6 +31,7 @@ CustomType, DateTimeType, DateType, + DiscriminatedUnion, DurationType, EmailType, HostnameType, @@ -35,6 +54,24 @@ UUIDType, ) +if t.TYPE_CHECKING: + from pathlib import Path + + from pytest_snapshot.plugin import Snapshot + + from singer_sdk.streams.core import Stream + +TYPE_FN_CHECKS: set[t.Callable] = { + is_array_type, + is_boolean_type, + is_date_or_datetime_type, + is_datetime_type, + is_integer_type, + is_secret_type, + is_string_array_type, + is_string_type, +} + class ConfigTestTap(Tap): """Test tap class.""" @@ -43,14 +80,56 @@ class ConfigTestTap(Tap): config_jsonschema = PropertiesList( Property("host", StringType, required=True), Property("username", StringType, required=True), - Property("password", StringType, required=True), + Property("password", StringType, required=True, secret=True), Property("batch_size", IntegerType, default=-1), ).to_dict() - def discover_streams(self) -> List[Stream]: + def discover_streams(self) -> list[Stream]: return [] +def test_to_json(): + schema = PropertiesList( + Property( + "test_property", + StringType, + description="A test property", + required=True, + ), + Property( + "test_property_2", + StringType, + description="A test property", + ), + additional_properties=False, + ) + assert schema.to_json(indent=4) == dedent( + """\ + { + "type": "object", + "properties": { + "test_property": { + "type": [ + "string" + ], + "description": "A test property" + }, + "test_property_2": { + "type": [ + "string", + "null" + ], + "description": "A test property" + } + }, + "required": [ + "test_property" + ], + "additionalProperties": false + }""", + ) + + def test_nested_complex_objects(): test1a = Property( "Datasets", @@ -63,22 +142,24 @@ def test_nested_complex_objects(): ObjectType( Property("DatasetId", StringType), Property("DatasetName", StringType), - ) + ), ), ) test2b = test2a.to_dict() - assert test1a and test1b and test2a and test2b + assert test1a + assert test1b + assert test2a + assert test2b def test_default_value(): prop = Property("test_property", StringType, default="test_property_value") assert prop.to_dict() == { - "test_property": {"type": ["string", "null"], "default": "test_property_value"} + "test_property": {"type": ["string", "null"], "default": "test_property_value"}, } def test_tap_config_default_injection(): - config_dict = {"host": "gitlab.com", "username": "foo", "password": "bar"} tap = ConfigTestTap(config=config_dict, parse_env_config=False, catalog={}) @@ -111,7 +192,7 @@ def test_property_description(): "test_property": { "type": ["string", "null"], "description": text, - } + }, } @@ -253,12 +334,180 @@ def test_inbuilt_type(json_type: JSONTypeHelper, expected_json_schema: dict): assert json_type.type_dict == expected_json_schema +@pytest.mark.parametrize( + "property_obj,expected_jsonschema,type_fn_checks_true", + [ + ( + Property("my_prop1", StringType, required=True), + {"my_prop1": {"type": ["string"]}}, + {is_string_type}, + ), + ( + Property("my_prop2", StringType, required=False), + {"my_prop2": {"type": ["string", "null"]}}, + {is_string_type}, + ), + ( + Property("my_prop3", StringType, secret=True), + { + "my_prop3": { + "type": ["string", "null"], + JSONSCHEMA_ANNOTATION_SECRET: True, + JSONSCHEMA_ANNOTATION_WRITEONLY: True, + }, + }, + {is_secret_type, is_string_type}, + ), + ( + Property("my_prop4", StringType, description="This is a property."), + { + "my_prop4": { + "description": "This is a property.", + "type": ["string", "null"], + }, + }, + {is_string_type}, + ), + ( + Property("my_prop5", StringType, default="some_val"), + { + "my_prop5": { + "default": "some_val", + "type": ["string", "null"], + }, + }, + {is_string_type}, + ), + ( + Property("my_prop6", ArrayType(StringType)), + { + "my_prop6": { + "type": ["array", "null"], + "items": {"type": ["string"]}, + }, + }, + {is_array_type, is_string_array_type}, + ), + ( + Property( + "my_prop7", + ObjectType( + Property("not_a_secret", StringType), + Property("is_a_secret", StringType, secret=True), + ), + ), + { + "my_prop7": { + "type": ["object", "null"], + "properties": { + "not_a_secret": {"type": ["string", "null"]}, + "is_a_secret": { + "type": ["string", "null"], + "secret": True, + "writeOnly": True, + }, + }, + }, + }, + {is_object_type, is_secret_type}, + ), + ( + Property("my_prop8", IntegerType), + { + "my_prop8": { + "type": ["integer", "null"], + }, + }, + {is_integer_type}, + ), + ( + Property( + "my_prop9", + IntegerType, + allowed_values=[1, 2, 3, 4, 5, 6, 7, 8, 9], + examples=[1, 2, 3], + ), + { + "my_prop9": { + "type": ["integer", "null"], + "enum": [1, 2, 3, 4, 5, 6, 7, 8, 9], + "examples": [1, 2, 3], + }, + }, + {is_integer_type}, + ), + ( + Property( + "my_prop10", + ArrayType( + StringType( + allowed_values=["create", "delete", "insert", "update"], + examples=["insert", "update"], + ), + ), + ), + { + "my_prop10": { + "type": ["array", "null"], + "items": { + "type": ["string"], + "enum": ["create", "delete", "insert", "update"], + "examples": ["insert", "update"], + }, + }, + }, + {is_array_type, is_string_array_type}, + ), + ( + Property( + "my_prop11", + ArrayType( + StringType, + allowed_values=[ + ["create", "delete"], + ["insert", "update"], + ], + ), + ), + { + "my_prop11": { + "type": ["array", "null"], + "items": { + "type": ["string"], + }, + "enum": [["create", "delete"], ["insert", "update"]], + }, + }, + {is_array_type, is_string_array_type}, + ), + ], +) +def test_property_creation( + property_obj: Property, + expected_jsonschema: dict, + type_fn_checks_true: set[t.Callable], +) -> None: + property_dict = property_obj.to_dict() + assert property_dict == expected_jsonschema + for check_fn in TYPE_FN_CHECKS: + property_name = next(iter(property_dict.keys())) + property_node = property_dict[property_name] + if check_fn in type_fn_checks_true: + assert ( + check_fn(property_node) is True + ), f"{check_fn.__name__} was not True for {property_dict!r}" + else: + assert ( + check_fn(property_node) is False + ), f"{check_fn.__name__} was not False for {property_dict!r}" + + def test_wrapped_type_dict(): with pytest.raises( ValueError, match=re.escape( "Type dict for <class 'singer_sdk.typing.ArrayType'> is not defined. " - + "Try instantiating it with a nested type such as ArrayType(StringType)." + "Try instantiating it with a nested type such as ArrayType(StringType).", ), ): Property("bad_array_prop", ArrayType).to_dict() @@ -267,7 +516,7 @@ def test_wrapped_type_dict(): ValueError, match=re.escape( "Type dict for <class 'singer_sdk.typing.ObjectType'> is not defined. " - + "Try instantiating it with a nested type such as ObjectType(StringType)." + "Try instantiating it with a nested type such as ObjectType(StringType).", ), ): Property("bad_object_prop", ObjectType).to_dict() @@ -276,7 +525,7 @@ def test_wrapped_type_dict(): "good_array_prop": { "type": ["array", "null"], "items": {"type": ["string"]}, - } + }, } @@ -290,119 +539,166 @@ def test_array_type(): assert ArrayType(wrapped_type).type_dict == expected_json_schema +@pytest.mark.snapshot() @pytest.mark.parametrize( - "properties,addtional_properties", + "schema_obj,snapshot_name", [ - ( - [ + pytest.param( + ObjectType( Property("id", StringType), Property("email", StringType), Property("username", StringType), Property("phone_number", StringType), - ], - None, + ), + "base.json", + id="no required, no duplicates, no additional properties", ), - ( - [ + pytest.param( + ObjectType( Property("id", StringType), Property("email", StringType), Property("username", StringType), Property("phone_number", StringType), - ], - StringType, + additional_properties=StringType, + ), + "additional_properties.json", + id="no required, no duplicates, additional properties", ), - ( - [ + pytest.param( + ObjectType( + Property("id", StringType), + Property("email", StringType), + Property("username", StringType), + Property("phone_number", StringType), + additional_properties=False, + ), + "no_additional_properties.json", + ), + pytest.param( + ObjectType( Property("id", StringType), Property("id", StringType), Property("email", StringType), Property("username", StringType), Property("phone_number", StringType), - ], - None, + ), + "duplicates.json", + id="no required, duplicates, no additional properties", ), - ( - [ + pytest.param( + ObjectType( Property("id", StringType), Property("id", StringType), Property("email", StringType), Property("username", StringType), Property("phone_number", StringType), - ], - StringType, + additional_properties=StringType, + ), + "duplicates_additional_properties.json", + id="no required, duplicates, additional properties", ), - ( - [ + pytest.param( + ObjectType( + Property("id", StringType), Property("id", StringType), - Property("email", StringType, True), - Property("username", StringType, True), + Property("email", StringType), + Property("username", StringType), Property("phone_number", StringType), - ], - None, + additional_properties=False, + ), + "duplicates_no_additional_properties.json", + id="no required, duplicates, no additional properties allowed", ), - ( - [ + pytest.param( + ObjectType( Property("id", StringType), - Property("email", StringType, True), - Property("username", StringType, True), + Property("email", StringType, required=True), + Property("username", StringType, required=True), Property("phone_number", StringType), - ], - StringType, + ), + "required.json", + id="required, no duplicates, no additional properties", ), - ( - [ + pytest.param( + ObjectType( Property("id", StringType), - Property("email", StringType, True), - Property("email", StringType, True), - Property("username", StringType, True), + Property("email", StringType, required=True), + Property("username", StringType, required=True), Property("phone_number", StringType), - ], - None, + additional_properties=StringType, + ), + "required_additional_properties.json", + id="required, no duplicates, additional properties", ), - ( - [ + pytest.param( + ObjectType( Property("id", StringType), - Property("email", StringType, True), - Property("email", StringType, True), - Property("username", StringType, True), + Property("email", StringType, required=True), + Property("username", StringType, required=True), Property("phone_number", StringType), - ], - StringType, + additional_properties=False, + ), + "required_no_additional_properties.json", + id="required, no duplicates, no additional properties allowed", + ), + pytest.param( + ObjectType( + Property("id", StringType), + Property("email", StringType, required=True), + Property("email", StringType, required=True), + Property("username", StringType, required=True), + Property("phone_number", StringType), + ), + "required_duplicates.json", + id="required, duplicates, no additional properties", + ), + pytest.param( + ObjectType( + Property("id", StringType), + Property("email", StringType, required=True), + Property("email", StringType, required=True), + Property("username", StringType, required=True), + Property("phone_number", StringType), + additional_properties=StringType, + ), + "required_duplicates_additional_properties.json", + id="required, duplicates, additional properties", + ), + pytest.param( + ObjectType( + Property("id", StringType), + Property("email", StringType, required=True), + Property("email", StringType, required=True), + Property("username", StringType, required=True), + Property("phone_number", StringType), + additional_properties=False, + ), + "required_duplicates_no_additional_properties.json", + id="required, duplicates, no additional properties allowed", + ), + pytest.param( + ObjectType( + Property("id", StringType), + Property("email", StringType), + Property("username", StringType), + Property("phone_number", StringType), + pattern_properties={ + "^attr_[a-z]+$": StringType, + }, + ), + "pattern_properties.json", + id="pattern properties", ), - ], - ids=[ - "no requried, no duplicates, no additional properties", - "no requried, no duplicates, additional properties", - "no requried, duplicates, no additional properties", - "no requried, duplicates, additional properties", - "requried, no duplicates, no additional properties", - "requried, no duplicates, additional properties", - "requried, duplicates, no additional properties", - "requried, duplicates, additional properties", ], ) -def test_object_type(properties: List[Property], addtional_properties: JSONTypeHelper): - merged_property_schemas = { - name: schema for p in properties for name, schema in p.to_dict().items() - } - - required = [p.name for p in properties if not p.optional] - required_schema = {"required": required} if required else {} - addtional_properties_schema = ( - {"additionalProperties": addtional_properties.type_dict} - if addtional_properties - else {} - ) - - expected_json_schema = { - "type": "object", - "properties": merged_property_schemas, - **required_schema, - **addtional_properties_schema, - } - - object_type = ObjectType(*properties, additional_properties=addtional_properties) - assert object_type.type_dict == expected_json_schema +def test_object_type( + schema_obj: ObjectType, + snapshot_dir: Path, + snapshot_name: str, + snapshot: Snapshot, +): + snapshot.snapshot_dir = snapshot_dir.joinpath("jsonschema") + snapshot.assert_match(schema_obj.to_json(indent=2), snapshot_name) def test_custom_type(): @@ -412,3 +708,155 @@ def test_custom_type(): } assert CustomType(json_schema).type_dict == json_schema + + +@pytest.mark.parametrize( + "property_schemas,type_check_functions,results", + [ + ( + [ + { + "anyOf": [ + {"type": "array"}, + {"type": "null"}, + ], + }, + {"type": "array"}, + {"type": ["array", "null"]}, + ], + [is_array_type], + [True], + ), + ( + [ + { + "anyOf": [ + {"type": "boolean"}, + {"type": "null"}, + ], + }, + {"type": "boolean"}, + {"type": ["boolean", "null"]}, + ], + [is_boolean_type], + [True], + ), + ( + [ + { + "anyOf": [ + {"type": "integer"}, + {"type": "null"}, + ], + }, + {"type": "integer"}, + {"type": ["integer", "null"]}, + ], + [is_integer_type], + [True], + ), + ( + [ + { + "anyOf": [ + {"type": "string", "format": "date-time"}, + {"type": "null"}, + ], + }, + {"type": "string"}, + {"type": ["string", "null"]}, + ], + [is_string_type], + [True], + ), + ( + [ + { + "anyOf": [ + {"type": "string", "format": "date-time"}, + {"type": "null"}, + ], + }, + {"type": "null"}, + {"type": ["string", "null"]}, + ], + [is_null_type], + [True], + ), + ( + [ + { + "anyOf": [ + {"type": "string", "format": "date-time"}, + {"type": "number"}, + ], + }, + {"type": "number"}, + {"type": ["number", "null"]}, + ], + [is_number_type], + [True], + ), + ( + [ + { + "anyOf": [ + {"enum": ["developer", "team", "enterprise"]}, + {"type": "string"}, + ], + }, + ], + [is_string_type], + [True], + ), + ], +) +def test_type_check_variations(property_schemas, type_check_functions, results): + for property_schema in property_schemas: + for type_check_function, result in zip(type_check_functions, results): + assert type_check_function(property_schema) == result + + +def test_discriminated_union(): + th = DiscriminatedUnion( + "flow", + oauth=ObjectType( + Property("client_id", StringType, required=True, secret=True), + Property("client_secret", StringType, required=True, secret=True), + additional_properties=False, + ), + password=ObjectType( + Property("username", StringType, required=True), + Property("password", StringType, required=True, secret=True), + additional_properties=False, + ), + ) + + validator = Draft6Validator(th.to_dict()) + + assert validator.is_valid( + { + "flow": "oauth", + "client_id": "123", + "client_secret": "456", + }, + ) + assert validator.is_valid( + { + "flow": "password", + "password": "123", + "username": "456", + }, + ) + assert not validator.is_valid( + { + "flow": "oauth", + "client_id": "123", + }, + ) + assert not validator.is_valid( + { + "flow": "password", + "client_id": "123", + }, + ) diff --git a/tests/core/test_mapper.py b/tests/core/test_mapper.py index 233ae8ae7..036d7586a 100644 --- a/tests/core/test_mapper.py +++ b/tests/core/test_mapper.py @@ -1,15 +1,20 @@ """Test map transformer.""" +from __future__ import annotations + import copy +import io import json import logging -from typing import Dict, List, Optional, Set, cast +import typing as t +from contextlib import redirect_stdout import pytest +from freezegun import freeze_time +from singer_sdk._singerlib import Catalog from singer_sdk.exceptions import MapExpressionError from singer_sdk.helpers._catalog import get_selected_schema -from singer_sdk.helpers._singer import Catalog from singer_sdk.mapper import PluginMapper, RemoveRecordTransform, md5 from singer_sdk.streams.core import Stream from singer_sdk.tap_base import Tap @@ -21,6 +26,11 @@ StringType, ) +if t.TYPE_CHECKING: + from pathlib import Path + + from pytest_snapshot.plugin import Snapshot + @pytest.fixture def stream_map_config() -> dict: @@ -36,6 +46,7 @@ def sample_catalog_dict() -> dict: Property("name", StringType), Property("owner_email", StringType), Property("description", StringType), + Property("description", StringType), ).to_dict() foobars_schema = PropertiesList( Property("the", StringType), @@ -53,7 +64,7 @@ def sample_catalog_dict() -> dict: "tap_stream_id": "foobars", "schema": foobars_schema, }, - ] + ], } @@ -70,21 +81,25 @@ def sample_stream(): "name": "tap-something", "owner_email": "sample1@example.com", "description": "Comment A", + "create_date": "2019-01-01", }, { "name": "my-tap-something", "owner_email": "sample2@example.com", "description": "Comment B", + "create_date": "2020-01-01", }, { "name": "target-something", "owner_email": "sample3@example.com", "description": "Comment C", + "create_date": "2021-01-01", }, { "name": "not-atap", "owner_email": "sample4@example.com", "description": "Comment D", + "create_date": "2022-01-01", }, ], "foobars": [ @@ -101,12 +116,12 @@ def sample_stream(): def transform_stream_maps(): return { "repositories": { - # "__source__": "repositories", "repo_name": "_['name']", "email_domain": "owner_email.split('@')[1]", "email_hash": "md5(config['hash_seed'] + owner_email)", "description": "'[masked]'", "description2": "str('[masked]')", + "create_year": "int(datetime.date.fromisoformat(create_date).year)", "int_test": "int('0')", "__else__": None, }, @@ -121,40 +136,44 @@ def transformed_result(stream_map_config): "repo_name": "tap-something", "email_domain": "example.com", "email_hash": md5( - stream_map_config["hash_seed"] + "sample1@example.com" + stream_map_config["hash_seed"] + "sample1@example.com", ), "description": "[masked]", "description2": "[masked]", + "create_year": 2019, "int_test": 0, }, { "repo_name": "my-tap-something", "email_domain": "example.com", "email_hash": md5( - stream_map_config["hash_seed"] + "sample2@example.com" + stream_map_config["hash_seed"] + "sample2@example.com", ), "description": "[masked]", "description2": "[masked]", + "create_year": 2020, "int_test": 0, }, { "repo_name": "target-something", "email_domain": "example.com", "email_hash": md5( - stream_map_config["hash_seed"] + "sample3@example.com" + stream_map_config["hash_seed"] + "sample3@example.com", ), "description": "[masked]", "description2": "[masked]", + "create_year": 2021, "int_test": 0, }, { "repo_name": "not-atap", "email_domain": "example.com", "email_hash": md5( - stream_map_config["hash_seed"] + "sample4@example.com" + stream_map_config["hash_seed"] + "sample4@example.com", ), "description": "[masked]", "description2": "[masked]", + "create_year": 2022, "int_test": 0, }, ], @@ -174,6 +193,7 @@ def transformed_schemas(): Property("email_hash", StringType), Property("description", StringType), Property("description2", StringType), + Property("create_year", IntegerType), Property("int_test", IntegerType), ).to_dict(), "foobars": PropertiesList( @@ -197,7 +217,7 @@ def clone_and_alias_stream_maps(): @pytest.fixture -def cloned_and_aliased_result(stream_map_config, sample_stream): +def cloned_and_aliased_result(sample_stream): return { "repositories_aliased": sample_stream["repositories"], "repositories_clone_1": sample_stream["repositories"], @@ -236,9 +256,9 @@ def filter_stream_maps(): @pytest.fixture def filter_stream_map_w_error(filter_stream_maps): - restult = copy.copy(filter_stream_maps) - restult["repositories"]["__filter__"] = "this should raise an er!ror" - return restult + result = copy.copy(filter_stream_maps) + result["repositories"]["__filter__"] = "this should raise an er!ror" + return result @pytest.fixture @@ -336,6 +356,7 @@ def test_filter_transforms_w_error( def _test_transform( test_name: str, + *, stream_maps, stream_map_config, expected_result, @@ -343,7 +364,7 @@ def _test_transform( sample_stream, sample_catalog_obj, ): - output: Dict[str, List[dict]] = {} + output: dict[str, list[dict]] = {} mapper = PluginMapper( plugin_config={ "stream_maps": stream_maps, @@ -356,7 +377,7 @@ def _test_transform( for stream_name, stream in sample_stream.items(): for stream_map in mapper.stream_maps[stream_name]: if isinstance(stream_map, RemoveRecordTransform): - logging.info(f"Skipping ignored stream '{stream_name}'") + logging.info("Skipping ignored stream '%s'", stream_name) continue assert ( @@ -398,7 +419,7 @@ class MappedStream(Stream): ), ).to_dict() - def get_records(self, context): + def get_records(self, context): # noqa: ARG002 yield { "email": "alice@example.com", "count": 21, @@ -427,176 +448,181 @@ def discover_streams(self): @pytest.fixture -def clear_schema_cache() -> None: +def _clear_schema_cache() -> None: """Schemas are cached, so the cache needs to be cleared between test invocations.""" yield get_selected_schema.cache_clear() +@freeze_time("2022-01-01T00:00:00Z") +@pytest.mark.snapshot() +@pytest.mark.usefixtures("_clear_schema_cache") @pytest.mark.parametrize( - "stream_alias,stream_maps,flatten,flatten_max_depth,output_fields,key_properties", + "stream_maps,flatten,flatten_max_depth,snapshot_name", [ - ( - "mystream", + pytest.param( {}, False, 0, - {"email", "count", "user"}, - [], + "no_map.jsonl", + id="no_map", ), - ( - "mystream", + pytest.param( { "mystream": { "email_hash": "md5(email)", - } + }, }, False, 0, - {"email", "count", "user", "email_hash"}, - [], + "keep_all_fields.jsonl", + id="keep_all_fields", ), - ( - "mystream", + pytest.param( { "mystream": { "email_hash": "md5(email)", "fixed_count": "int(count-1)", "__else__": None, - } + }, + }, + False, + 0, + "only_mapped_fields.jsonl", + id="only_mapped_fields", + ), + pytest.param( + { + "mystream": { + "email_hash": "md5(email)", + "fixed_count": "int(count-1)", + "__else__": "__NULL__", + }, }, False, 0, - {"fixed_count", "email_hash"}, - [], + "only_mapped_fields_null_string.jsonl", + id="only_mapped_fields_null_string", ), - ( - "mystream", + pytest.param( { "mystream": { "email_hash": "md5(email)", "__key_properties__": ["email_hash"], "__else__": None, - } + }, }, False, 0, - {"email_hash"}, - ["email_hash"], + "changed_key_properties.jsonl", + id="changed_key_properties", ), - ( - "sourced_stream_1", + pytest.param( {"mystream": None, "sourced_stream_1": {"__source__": "mystream"}}, False, 0, - {"email", "count", "user"}, - [], + "sourced_stream_1.jsonl", + id="sourced_stream_1", + ), + pytest.param( + {"mystream": "__NULL__", "sourced_stream_1": {"__source__": "mystream"}}, + False, + 0, + "sourced_stream_1_null_string.jsonl", + id="sourced_stream_1_null_string", ), - ( - "sourced_stream_2", + pytest.param( {"sourced_stream_2": {"__source__": "mystream"}, "__else__": None}, False, 0, - {"email", "count", "user"}, - [], + "sourced_stream_2.jsonl", + id="sourced_stream_2", ), - ( - "aliased_stream", + pytest.param( {"mystream": {"__alias__": "aliased_stream"}}, False, 0, - {"email", "count", "user"}, - [], + "aliased_stream.jsonl", + id="aliased_stream", ), - ( - "mystream", + pytest.param( {}, True, 1, - {"email", "count", "user__id", "user__sub"}, - [], + "flatten_depth_1.jsonl", + id="flatten_depth_1", ), - ( - "mystream", + pytest.param( {}, True, 10, - {"email", "count", "user__id", "user__sub__num"}, - [], + "flatten_all.jsonl", + id="flatten_all", ), - ( - "mystream", + pytest.param( { "mystream": { "email_hash": "md5(email)", "__key_properties__": ["email_hash"], - } + }, }, True, 10, - {"email", "count", "email_hash", "user__id", "user__sub__num"}, - ["email_hash"], + "map_and_flatten.jsonl", + id="map_and_flatten", ), - ( - "mystream", + pytest.param( { "mystream": { "email": None, - } + }, }, False, 0, - {"count", "user"}, - [], + "drop_property.jsonl", + id="drop_property", + ), + pytest.param( + {"mystream": {"email": "__NULL__"}}, + False, + 0, + "drop_property_null_string.jsonl", + id="drop_property_null_string", + ), + pytest.param( + { + "mystream": { + "count": "count", + "__else__": None, + }, + }, + False, + 0, + "non_pk_passthrough.jsonl", + id="non_pk_passthrough", ), - ], - ids=[ - "no_map", - "keep_all_fields", - "only_mapped_fields", - "changed_key_properties", - "sourced_stream_1", - "sourced_stream_2", - "aliased_stream", - "flatten_depth_1", - "flatten_all", - "map_and_flatten", - "drop_property", ], ) def test_mapped_stream( - clear_schema_cache: None, - stream_alias: str, + snapshot: Snapshot, + snapshot_dir: Path, stream_maps: dict, flatten: bool, - flatten_max_depth: Optional[int], - output_fields: Set[str], - key_properties: List[str], + flatten_max_depth: int | None, + snapshot_name: str, ): + snapshot.snapshot_dir = snapshot_dir.joinpath("mapped_stream") + tap = MappedTap( config={ "stream_maps": stream_maps, "flattening_enabled": flatten, "flattening_max_depth": flatten_max_depth, - } + }, ) - stream = tap.streams["mystream"] - - schema_messages = list(stream._generate_schema_messages()) - assert len(schema_messages) == 1, "Incorrect number of schema messages generated." - schema_message = schema_messages[0] - assert schema_message.stream == stream_alias - assert schema_message.key_properties == key_properties - assert schema_message.schema["properties"].keys() == output_fields - - for raw_record in stream.get_records(None): - record_message = next(stream._generate_record_messages(cast(dict, raw_record))) - transformed_record = record_message.record - - assert record_message.stream == stream_alias - assert output_fields == set(transformed_record.keys()) - - for output_field in output_fields: - assert transformed_record[ - output_field - ], f"Value for field '{output_field}' should be nonempty." + buf = io.StringIO() + with redirect_stdout(buf): + tap.sync_all() + + buf.seek(0) + snapshot.assert_match(buf.read(), snapshot_name) diff --git a/tests/core/test_metrics.py b/tests/core/test_metrics.py new file mode 100644 index 000000000..a9d6b4d26 --- /dev/null +++ b/tests/core/test_metrics.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import logging +import time + +import pytest + +from singer_sdk import metrics + + +class CustomObject: + def __init__(self, name: str, value: int): + self.name = name + self.value = value + + def __str__(self) -> str: + return f"{self.name}={self.value}" + + +def test_meter(): + class _MyMeter(metrics.Meter): + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + meter = _MyMeter(metrics.Metric.RECORD_COUNT) + + assert meter.tags == {} + + stream_context = {"parent_id": 1} + meter.context = stream_context + assert meter.tags == {metrics.Tag.CONTEXT: stream_context} + + meter.context = None + assert metrics.Tag.CONTEXT not in meter.tags + + +def test_record_counter(caplog: pytest.LogCaptureFixture): + caplog.set_level(logging.INFO, logger=metrics.METRICS_LOGGER_NAME) + custom_object = CustomObject("test", 1) + + with metrics.record_counter( + "test_stream", + endpoint="test_endpoint", + custom_tag="pytest", + custom_obj=custom_object, + ) as counter: + for _ in range(100): + counter.last_log_time = 0 + assert counter._ready_to_log() + + counter.increment() + + total = 0 + + assert len(caplog.records) == 100 + 1 + + for record in caplog.records: + assert record.levelname == "INFO" + assert record.msg == "METRIC: %s" + assert "test=1" in record.message + + point: metrics.Point[int] = record.args[0] + assert point.metric_type == "counter" + assert point.metric == "record_count" + assert point.tags == { + metrics.Tag.STREAM: "test_stream", + metrics.Tag.ENDPOINT: "test_endpoint", + "custom_tag": "pytest", + "custom_obj": custom_object, + } + + total += point.value + + assert total == 100 + + +def test_sync_timer(caplog: pytest.LogCaptureFixture): + caplog.set_level(logging.INFO, logger=metrics.METRICS_LOGGER_NAME) + with metrics.sync_timer("test_stream", custom_tag="pytest") as timer: + start_time = timer.start_time + for _ in range(1000): + time.sleep(0.001) + end_time = time.time() + + assert len(caplog.records) == 1 + + record = caplog.records[0] + assert record.levelname == "INFO" + assert record.msg == "METRIC: %s" + + point: metrics.Point[float] = record.args[0] + assert point.metric_type == "timer" + assert point.metric == "sync_duration" + assert point.tags == { + metrics.Tag.STREAM: "test_stream", + metrics.Tag.STATUS: "succeeded", + "custom_tag": "pytest", + } + + assert pytest.approx(point.value, rel=0.001) == end_time - start_time diff --git a/tests/core/test_parent_child.py b/tests/core/test_parent_child.py index 2f09c34aa..7fd01a153 100644 --- a/tests/core/test_parent_child.py +++ b/tests/core/test_parent_child.py @@ -2,6 +2,7 @@ import io import json +import typing as t from contextlib import redirect_stdout import pytest @@ -14,20 +15,24 @@ class Parent(Stream): """A parent stream.""" name = "parent" - schema = { + schema: t.ClassVar[dict] = { "type": "object", "properties": { "id": {"type": "integer"}, }, } - def get_child_context(self, record: dict, context: dict | None) -> dict: + def get_child_context( + self, + record: dict, + context: dict | None, # noqa: ARG002 + ) -> dict | None: """Create context for children streams.""" return { "pid": record["id"], } - def get_records(self, context: dict | None): + def get_records(self, context: dict | None): # noqa: ARG002 """Get dummy records.""" yield {"id": 1} yield {"id": 2} @@ -38,7 +43,7 @@ class Child(Stream): """A child stream.""" name = "child" - schema = { + schema: t.ClassVar[dict] = { "type": "object", "properties": { "id": {"type": "integer"}, @@ -47,7 +52,7 @@ class Child(Stream): } parent_stream_type = Parent - def get_records(self, context: dict | None): + def get_records(self, context: dict | None): # noqa: ARG002 """Get dummy records.""" yield {"id": 1} yield {"id": 2} @@ -82,39 +87,64 @@ def tap_with_deselected_parent(tap: MyTap): tap.catalog["parent"].metadata[()].selected = original -def test_parent_context_fields_in_child(tap: MyTap): - """Test that parent context fields are available in child streams.""" - parent_stream = tap.streams["parent"] - child_stream = tap.streams["child"] - +def _get_messages(tap: Tap): + """Redirect stdout to a buffer.""" buf = io.StringIO() with redirect_stdout(buf): tap.sync_all() - buf.seek(0) lines = buf.read().splitlines() - messages = [json.loads(line) for line in lines] + return [json.loads(line) for line in lines] - # Parent schema is emitted - assert messages[0] - assert messages[0]["type"] == SingerMessageType.SCHEMA - assert messages[0]["stream"] == parent_stream.name - assert messages[0]["schema"] == parent_stream.schema - # Child schema is emitted +def test_parent_context_fields_in_child(tap: MyTap): + """Test that parent context fields are available in child streams.""" + parent_stream = tap.streams["parent"] + child_stream = tap.streams["child"] + messages = _get_messages(tap) + + # Parent schema is emitted assert messages[1] assert messages[1]["type"] == SingerMessageType.SCHEMA - assert messages[1]["stream"] == child_stream.name - assert messages[1]["schema"] == child_stream.schema + assert messages[1]["stream"] == parent_stream.name + assert messages[1]["schema"] == parent_stream.schema + + # Child schema is emitted + assert messages[2] + assert messages[2]["type"] == SingerMessageType.SCHEMA + assert messages[2]["stream"] == child_stream.name + assert messages[2]["schema"] == child_stream.schema - # Child records are emitted, skip state message in between - child_record_messages = messages[2], *messages[4:6] + # Child records are emitted + child_record_messages = messages[3:6] assert child_record_messages assert all(msg["type"] == SingerMessageType.RECORD for msg in child_record_messages) assert all(msg["stream"] == child_stream.name for msg in child_record_messages) assert all("pid" in msg["record"] for msg in child_record_messages) +def test_skip_deleted_parent_child_streams( + tap: MyTap, + caplog: pytest.LogCaptureFixture, +): + """Test tap output with parent stream deselected.""" + parent_stream = tap.streams["parent"] + + buf = io.StringIO() + with redirect_stdout(buf), caplog.at_level("WARNING"): + parent_stream._sync_children(None) + + buf.seek(0) + + assert not buf.read().splitlines() + assert len(caplog.records) == 1 + assert caplog.records[0].levelname == "WARNING" + assert caplog.records[0].message == ( + "Context for child streams of 'parent' is null, " + "skipping sync of any child streams" + ) + + def test_child_deselected_parent(tap_with_deselected_parent: MyTap): """Test tap output with parent stream deselected.""" parent_stream = tap_with_deselected_parent.streams["parent"] @@ -123,22 +153,16 @@ def test_child_deselected_parent(tap_with_deselected_parent: MyTap): assert not parent_stream.selected assert parent_stream.has_selected_descendents - buf = io.StringIO() - with redirect_stdout(buf): - tap_with_deselected_parent.sync_all() - - buf.seek(0) - lines = buf.read().splitlines() - messages = [json.loads(line) for line in lines] + messages = _get_messages(tap_with_deselected_parent) # First message is a schema for the child stream, not the parent - assert messages[0] - assert messages[0]["type"] == SingerMessageType.SCHEMA - assert messages[0]["stream"] == child_stream.name - assert messages[0]["schema"] == child_stream.schema + assert messages[1] + assert messages[1]["type"] == SingerMessageType.SCHEMA + assert messages[1]["stream"] == child_stream.name + assert messages[1]["schema"] == child_stream.schema - # Child records are emitted, skip state message in between - child_record_messages = messages[1], *messages[3:5] + # Child records are emitted + child_record_messages = messages[2:5] assert child_record_messages assert all(msg["type"] == SingerMessageType.RECORD for msg in child_record_messages) assert all(msg["stream"] == child_stream.name for msg in child_record_messages) diff --git a/tests/core/test_plugin_base.py b/tests/core/test_plugin_base.py index 348485b64..7d1021c11 100644 --- a/tests/core/test_plugin_base.py +++ b/tests/core/test_plugin_base.py @@ -1,7 +1,11 @@ +from __future__ import annotations + import os from unittest import mock -from singer_sdk.plugin_base import PluginBase +import pytest + +from singer_sdk.plugin_base import MapperNotInitialized, PluginBase from singer_sdk.typing import IntegerType, PropertiesList, Property, StringType @@ -27,10 +31,25 @@ def test_get_env_var_config(): env_config = PluginTest._env_var_config assert env_config["prop1"] == "hello" assert "PROP1" not in env_config - assert "prop2" not in env_config and "PROP2" not in env_config - assert "prop3" not in env_config and "PROP3" not in env_config + assert "prop2" not in env_config + assert "PROP2" not in env_config + assert "prop3" not in env_config + assert "PROP3" not in env_config no_env_config = PluginTest._env_var_config - assert "prop1" not in no_env_config and "PROP1" not in env_config - assert "prop2" not in no_env_config and "PROP2" not in env_config - assert "prop3" not in no_env_config and "PROP3" not in env_config + assert "prop1" not in no_env_config + assert "PROP1" not in env_config + assert "prop2" not in no_env_config + assert "PROP2" not in env_config + assert "prop3" not in no_env_config + assert "PROP3" not in env_config + + +def test_mapper_not_initialized(): + """Test that the mapper is not initialized before the plugin is started.""" + plugin = PluginTest( + parse_env_config=False, + validate_config=False, + ) + with pytest.raises(MapperNotInitialized): + _ = plugin.mapper diff --git a/tests/core/test_plugin_config.py b/tests/core/test_plugin_config.py index 98448722f..d0df1d69f 100644 --- a/tests/core/test_plugin_config.py +++ b/tests/core/test_plugin_config.py @@ -1,13 +1,17 @@ """Test plugin config functions.""" -from typing import Any, Dict, List +from __future__ import annotations + +import typing as t -from singer_sdk.streams.core import Stream from singer_sdk.tap_base import Tap from singer_sdk.typing import BooleanType, PropertiesList, Property -SAMPLE_CONFIG: Dict[str, Any] = {} +if t.TYPE_CHECKING: + from singer_sdk.streams.core import Stream + +SAMPLE_CONFIG: dict[str, t.Any] = {} class TapConfigTest(Tap): @@ -19,7 +23,7 @@ class TapConfigTest(Tap): Property("default_false", BooleanType, default=False), ).to_dict() - def discover_streams(self) -> List[Stream]: + def discover_streams(self) -> list[Stream]: """Noop.""" return [] diff --git a/tests/core/test_record_typing.py b/tests/core/test_record_typing.py index d883ee914..2657ff285 100644 --- a/tests/core/test_record_typing.py +++ b/tests/core/test_record_typing.py @@ -1,13 +1,16 @@ """Typing tests.""" +from __future__ import annotations + import logging +import typing as t from datetime import datetime -from typing import Any, Dict import pendulum import pytest from singer_sdk.helpers._typing import ( + TypeConformanceLevel, conform_record_data_types, get_datelike_property_type, to_json_compatible, @@ -15,31 +18,66 @@ @pytest.mark.parametrize( - "row,schema,expected_row", + "record,schema,expected_row,ignore_props_message", [ ( {"updatedAt": pendulum.parse("2021-08-25T20:05:28+00:00")}, {"properties": {"updatedAt": True}}, {"updatedAt": "2021-08-25T20:05:28+00:00"}, + None, ), ( {"updatedAt": pendulum.parse("2021-08-25T20:05:28Z")}, {"properties": {"updatedAt": True}}, {"updatedAt": "2021-08-25T20:05:28+00:00"}, + None, ), ( {"updatedAt": pendulum.parse("2021-08-25T20:05:28")}, {"properties": {"updatedAt": True}}, {"updatedAt": "2021-08-25T20:05:28+00:00"}, + None, ), + ( + {"present": 1, "absent": "2"}, + {"properties": {"present": {"type": "integer"}}}, + {"present": 1}, + ( + "Properties ('absent',) were present in the 'test-stream' stream but " + "not found in catalog schema. Ignoring." + ), + ), + ], + ids=[ + "datetime with offset", + "datetime with timezone", + "datetime without timezone", + "ignored_props_message", ], ) -def test_conform_record_data_types(row: Dict[str, Any], schema: dict, expected_row): +def test_conform_record_data_types( + record: dict[str, t.Any], + schema: dict, + expected_row: dict, + ignore_props_message: str, + caplog: pytest.LogCaptureFixture, +): stream_name = "test-stream" - # TODO: mock this out - logger = logging.getLogger() - actual = conform_record_data_types(stream_name, row, schema, logger) - print(row["updatedAt"].isoformat()) + logger = logging.getLogger("test-logger") + + with caplog.at_level(logging.INFO, logger=logger.name): + actual = conform_record_data_types( + stream_name, + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + if ignore_props_message: + assert ignore_props_message in caplog.text + else: + assert not caplog.text + assert actual == expected_row @@ -49,7 +87,10 @@ def test_conform_record_data_types(row: Dict[str, Any], schema: dict, expected_r (pendulum.parse("2021-08-25T20:05:28+00:00"), "2021-08-25T20:05:28+00:00"), (pendulum.parse("2021-08-25T20:05:28+07:00"), "2021-08-25T20:05:28+07:00"), ( - datetime.strptime("2021-08-25T20:05:28", "%Y-%m-%dT%H:%M:%S"), + datetime.strptime( # noqa: DTZ007 + "2021-08-25T20:05:28", + "%Y-%m-%dT%H:%M:%S", + ), "2021-08-25T20:05:28+00:00", ), ( @@ -86,7 +127,7 @@ def test_to_json_compatible(datetime_val, expected): "items": {"type": "string", "format": "date-time"}, }, {"type": "null"}, - ] + ], }, None, ), diff --git a/tests/core/test_schema.py b/tests/core/test_schema.py new file mode 100644 index 000000000..5fa8c75f8 --- /dev/null +++ b/tests/core/test_schema.py @@ -0,0 +1,70 @@ +""" +Testing that Schema can convert schemas lossless from and to dicts. + +Schemas are taken from these examples; +https://json-schema.org/learn/miscellaneous-examples.html + +NOTE: The following properties are not currently supported; +pattern +unevaluatedProperties +propertyNames +minProperties +maxProperties +prefixItems +contains +minContains +maxContains +minItems +maxItems +uniqueItems +enum +const +contentMediaType +contentEncoding +allOf +oneOf +not + +Some of these could be trivially added (if they are SIMPLE_PROPERTIES. +Some might need more thinking if they can contain schemas (though, note that we also +treat 'additionalProperties', 'anyOf' and' patternProperties' as SIMPLE even though they +can contain schemas. +""" + +from __future__ import annotations + +from singer_sdk._singerlib import Schema + + +def test_simple_schema(): + simple_schema = { + "title": "Longitude and Latitude Values", + "description": "A geographical coordinate.", + "required": ["latitude", "longitude"], + "type": "object", + "properties": { + "latitude": {"type": "number", "minimum": -90, "maximum": 90}, + "longitude": {"type": "number", "minimum": -180, "maximum": 180}, + }, + } + + schema_plus = Schema.from_dict(simple_schema) + assert schema_plus.to_dict() == simple_schema + assert schema_plus.required == ["latitude", "longitude"] + assert isinstance(schema_plus.properties["latitude"], Schema) + latitude = schema_plus.properties["latitude"] + assert latitude.type == "number" + + +def test_schema_with_items(): + schema = { + "description": "A representation of a person, company, organization, or place", + "type": "object", + "properties": {"fruits": {"type": "array", "items": {"type": "string"}}}, + } + schema_plus = Schema.from_dict(schema) + assert schema_plus.to_dict() == schema + assert isinstance(schema_plus.properties["fruits"], Schema) + fruits = schema_plus.properties["fruits"] + assert isinstance(fruits.items, Schema) + assert fruits.items.type == "string" diff --git a/tests/core/test_simpleeval.py b/tests/core/test_simpleeval.py index 68ea865b2..d5cacb30f 100644 --- a/tests/core/test_simpleeval.py +++ b/tests/core/test_simpleeval.py @@ -6,6 +6,8 @@ - https://gitlab.com/meltano/sdk/-/issues/213 """ +from __future__ import annotations + import ast import operator import os diff --git a/tests/core/test_singer.py b/tests/core/test_singer.py deleted file mode 100644 index aa7ef2a66..000000000 --- a/tests/core/test_singer.py +++ /dev/null @@ -1,118 +0,0 @@ -from typing import List, Optional - -import pytest - -from singer_sdk.helpers._singer import Catalog, CatalogEntry, Metadata, MetadataMapping - - -def test_catalog_parsing(): - """Validate parsing works for a catalog and its stream entries.""" - catalog_dict = { - "streams": [ - { - "tap_stream_id": "test", - "metadata": [ - { - "breadcrumb": [], - "metadata": { - "inclusion": "available", - }, - }, - { - "breadcrumb": ["properties", "a"], - "metadata": { - "inclusion": "unsupported", - }, - }, - ], - "schema": { - "type": "object", - }, - }, - ], - } - catalog = Catalog.from_dict(catalog_dict) - - assert catalog.streams[0].tap_stream_id == "test" - assert catalog.get_stream("test").tap_stream_id == "test" - assert catalog["test"].metadata.to_list() == catalog_dict["streams"][0]["metadata"] - assert catalog["test"].tap_stream_id == catalog_dict["streams"][0]["tap_stream_id"] - assert catalog["test"].schema.to_dict() == {"type": "object"} - assert catalog.to_dict() == catalog_dict - - new = { - "tap_stream_id": "new", - "metadata": [], - "schema": {}, - } - entry = CatalogEntry.from_dict(new) - catalog.add_stream(entry) - assert catalog.get_stream("new") == entry - - -@pytest.mark.parametrize( - "schema,key_properties,replication_method,valid_replication_keys", - [ - ( - {"properties": {"id": {"type": "integer"}}, "type": "object"}, - ["id"], - "FULL_TABLE", - None, - ), - ( - { - "properties": { - "first_name": {"type": "string"}, - "last_name": {"type": "string"}, - }, - "type": "object", - }, - ["first_name", "last_name"], - "INCREMENTAL", - ["updated_at"], - ), - ( - { - "properties": { - "first_name": {"type": "string"}, - "last_name": {"type": "string"}, - }, - "type": "object", - }, - ["first_name", "last_name"], - "FULL_TABLE", - None, - ), - ( - {}, - [], - None, - None, - ), - ], -) -def test_standard_metadata( - schema: dict, - key_properties: List[str], - replication_method: Optional[str], - valid_replication_keys: Optional[List[str]], -): - """Validate generated metadata.""" - metadata = MetadataMapping.get_standard_metadata( - schema=schema, - schema_name="test", - key_properties=key_properties, - replication_method=replication_method, - valid_replication_keys=valid_replication_keys, - ) - - stream_metadata = metadata[()] - assert stream_metadata.table_key_properties == key_properties - assert stream_metadata.forced_replication_method == replication_method - assert stream_metadata.valid_replication_keys == valid_replication_keys - assert stream_metadata.selected is None - - for pk in key_properties: - pk_metadata = metadata[("properties", pk)] - assert pk_metadata.inclusion == Metadata.InclusionType.AUTOMATIC - assert pk_metadata.selected is None diff --git a/tests/core/test_singer_messages.py b/tests/core/test_singer_messages.py new file mode 100644 index 000000000..3a2253611 --- /dev/null +++ b/tests/core/test_singer_messages.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import pytest + +from singer_sdk._singerlib import SingerMessageType +from singer_sdk.helpers._batch import JSONLinesEncoding, SDKBatchMessage + + +@pytest.mark.parametrize( + "message,expected", + [ + ( + SDKBatchMessage( + stream="test_stream", + encoding=JSONLinesEncoding("gzip"), + manifest=[ + "path/to/file1.jsonl.gz", + "path/to/file2.jsonl.gz", + ], + ), + { + "type": SingerMessageType.BATCH, + "stream": "test_stream", + "encoding": {"compression": "gzip", "format": "jsonl"}, + "manifest": [ + "path/to/file1.jsonl.gz", + "path/to/file2.jsonl.gz", + ], + }, + ), + ], + ids=["batch-message-jsonl"], +) +def test_batch_message_as_dict(message, expected): + """Test batch message as dict.""" + + dumped = message.to_dict() + assert dumped == expected + + assert message.from_dict(dumped) == message diff --git a/tests/core/test_sql_typing.py b/tests/core/test_sql_typing.py index 06d8928ed..0d2c4bac0 100644 --- a/tests/core/test_sql_typing.py +++ b/tests/core/test_sql_typing.py @@ -1,5 +1,7 @@ """Typing tests.""" +from __future__ import annotations + import pytest import sqlalchemy @@ -65,7 +67,8 @@ def test_convert_jsonschema_type_to_sql_type( ], ) def test_convert_sql_type_to_jsonschema_type( - sql_type: sqlalchemy.types.TypeEngine, is_of_jsonschema_type: dict + sql_type: sqlalchemy.types.TypeEngine, + is_of_jsonschema_type: dict, ): result = th.to_jsonschema_type(sql_type) assert result == is_of_jsonschema_type diff --git a/tests/core/test_state_handling.py b/tests/core/test_state_handling.py index d2e7de099..339df1f4c 100644 --- a/tests/core/test_state_handling.py +++ b/tests/core/test_state_handling.py @@ -1,5 +1,7 @@ """Test catalog selection features.""" +from __future__ import annotations + import pytest from singer_sdk.helpers import _state @@ -35,7 +37,7 @@ def dirty_state(): "replication_key_value": "2021-05-11T18:07:11Z", }, }, - } + }, } @@ -54,7 +56,7 @@ def cleared_state(): {"context": {"org": "VirusEnabled", "repo": "Athena"}}, ], }, - } + }, } @@ -79,7 +81,7 @@ def finalized_state(): "replication_key": "updated_at", "replication_key_value": "2021-05-11T18:07:11Z", }, - } + }, } diff --git a/tests/core/test_streams.py b/tests/core/test_streams.py index 2b87dd75b..a3a451086 100644 --- a/tests/core/test_streams.py +++ b/tests/core/test_streams.py @@ -1,14 +1,21 @@ """Stream tests.""" +from __future__ import annotations + import logging -from typing import Any, Dict, Iterable, List, Optional, cast +import typing as t import pendulum import pytest import requests +from singer_sdk._singerlib import Catalog, MetadataMapping +from singer_sdk.exceptions import ( + InvalidReplicationKeyException, +) from singer_sdk.helpers._classproperty import classproperty -from singer_sdk.helpers.jsonpath import _compile_jsonpath +from singer_sdk.helpers.jsonpath import _compile_jsonpath, extract_jsonpath +from singer_sdk.pagination import first from singer_sdk.streams.core import ( REPLICATION_FULL_TABLE, REPLICATION_INCREMENTAL, @@ -25,6 +32,8 @@ StringType, ) +CONFIG_START_DATE = "2021-01-01" + class SimpleTestStream(Stream): """Test stream class.""" @@ -41,13 +50,36 @@ def __init__(self, tap: Tap): """Create a new stream.""" super().__init__(tap, schema=self.schema, name=self.name) - def get_records(self, context: Optional[dict]) -> Iterable[Dict[str, Any]]: + def get_records( + self, + context: dict | None, # noqa: ARG002 + ) -> t.Iterable[dict[str, t.Any]]: """Generate records.""" yield {"id": 1, "value": "Egypt"} yield {"id": 2, "value": "Germany"} yield {"id": 3, "value": "India"} +class UnixTimestampIncrementalStream(SimpleTestStream): + name = "unix_ts" + schema = PropertiesList( + Property("id", IntegerType, required=True), + Property("value", StringType, required=True), + Property("updatedAt", IntegerType, required=True), + ).to_dict() + replication_key = "updatedAt" + + +class UnixTimestampIncrementalStream2(UnixTimestampIncrementalStream): + name = "unix_ts_override" + + def compare_start_date(self, value: str, start_date_value: str) -> str: + """Compare a value to a start date value.""" + + start_timestamp = pendulum.parse(start_date_value).format("X") + return max(value, start_timestamp, key=float) + + class RestTestStream(RESTStream): """Test RESTful stream class.""" @@ -60,6 +92,24 @@ class RestTestStream(RESTStream): ).to_dict() replication_key = "updatedAt" + def get_next_page_token( + self, + response: requests.Response, + previous_token: str | None, # noqa: ARG002 + ) -> str | None: + if self.next_page_token_jsonpath: + all_matches = extract_jsonpath( + self.next_page_token_jsonpath, + response.json(), + ) + try: + return first(all_matches) + except StopIteration: + return None + + else: + return response.headers.get("X-Next-Page", None) + class GraphqlTestStream(GraphQLStream): """Test Graphql stream class.""" @@ -80,48 +130,60 @@ class SimpleTestTap(Tap): name = "test-tap" settings_jsonschema = PropertiesList(Property("start_date", DateTimeType)).to_dict() - def discover_streams(self) -> List[Stream]: + def discover_streams(self) -> list[Stream]: """List all streams.""" - return [SimpleTestStream(self)] + return [ + SimpleTestStream(self), + UnixTimestampIncrementalStream(self), + UnixTimestampIncrementalStream2(self), + ] @pytest.fixture def tap() -> SimpleTestTap: """Tap instance.""" - catalog_dict = { - "streams": [ - { - "key_properties": ["id"], - "tap_stream_id": SimpleTestStream.name, - "stream": SimpleTestStream.name, - "schema": SimpleTestStream.schema, - "replication_method": REPLICATION_FULL_TABLE, - "replication_key": None, - } - ] - } return SimpleTestTap( - config={"start_date": "2021-01-01"}, + config={"start_date": CONFIG_START_DATE}, parse_env_config=False, - catalog=catalog_dict, ) @pytest.fixture def stream(tap: SimpleTestTap) -> SimpleTestStream: """Create a new stream instance.""" - return cast(SimpleTestStream, tap.load_streams()[0]) + return t.cast(SimpleTestStream, tap.load_streams()[0]) + + +@pytest.fixture +def unix_timestamp_stream(tap: SimpleTestTap) -> UnixTimestampIncrementalStream: + """Create a new stream instance.""" + return t.cast(UnixTimestampIncrementalStream, tap.load_streams()[1]) -def test_stream_apply_catalog(tap: SimpleTestTap, stream: SimpleTestStream): +def test_stream_apply_catalog(stream: SimpleTestStream): """Applying a catalog to a stream should overwrite fields.""" assert stream.primary_keys == [] assert stream.replication_key == "updatedAt" assert stream.replication_method == REPLICATION_INCREMENTAL assert stream.forced_replication_method is None - assert tap.input_catalog is not None - stream.apply_catalog(catalog=tap.input_catalog) + stream.apply_catalog( + catalog=Catalog.from_dict( + { + "streams": [ + { + "tap_stream_id": stream.name, + "metadata": MetadataMapping(), + "key_properties": ["id"], + "stream": stream.name, + "schema": stream.schema, + "replication_method": REPLICATION_FULL_TABLE, + "replication_key": None, + }, + ], + }, + ), + ) assert stream.primary_keys == ["id"] assert stream.replication_key is None @@ -129,31 +191,109 @@ def test_stream_apply_catalog(tap: SimpleTestTap, stream: SimpleTestStream): assert stream.forced_replication_method == REPLICATION_FULL_TABLE -def test_stream_starting_timestamp(tap: SimpleTestTap, stream: SimpleTestStream): - """Validate state and start_time setting handling.""" - timestamp_value = "2021-02-01" +@pytest.mark.parametrize( + "stream_name,bookmark_value,expected_starting_value", + [ + pytest.param( + "test", + None, + pendulum.parse(CONFIG_START_DATE), + id="datetime-repl-key-no-state", + ), + pytest.param( + "test", + "2021-02-01", + pendulum.datetime(2021, 2, 1), + id="datetime-repl-key-recent-bookmark", + ), + pytest.param( + "test", + "2020-01-01", + pendulum.parse(CONFIG_START_DATE), + id="datetime-repl-key-old-bookmark", + ), + pytest.param( + "unix_ts", + None, + CONFIG_START_DATE, + id="naive-unix-ts-repl-key-no-state", + ), + pytest.param( + "unix_ts", + "1612137600", + "1612137600", + id="naive-unix-ts-repl-key-recent-bookmark", + ), + pytest.param( + "unix_ts", + "1577858400", + "1577858400", + id="naive-unix-ts-repl-key-old-bookmark", + ), + pytest.param( + "unix_ts_override", + None, + CONFIG_START_DATE, + id="unix-ts-repl-key-no-state", + ), + pytest.param( + "unix_ts_override", + "1612137600", + "1612137600", + id="unix-ts-repl-key-recent-bookmark", + ), + pytest.param( + "unix_ts_override", + "1577858400", + pendulum.parse(CONFIG_START_DATE).format("X"), + id="unix-ts-repl-key-old-bookmark", + ), + ], +) +def test_stream_starting_timestamp( + tap: SimpleTestTap, + stream_name: str, + bookmark_value: str, + expected_starting_value: t.Any, +): + """Test the starting timestamp for a stream.""" + stream = tap.streams[stream_name] + + if stream.is_timestamp_replication_key: + get_starting_value = stream.get_starting_timestamp + else: + get_starting_value = stream.get_starting_replication_key_value - stream._write_starting_replication_value(None) - assert stream.get_starting_timestamp(None) == pendulum.parse( - cast(str, stream.config.get("start_date")) - ) tap.load_state( { "bookmarks": { - stream.name: { + stream_name: { "replication_key": stream.replication_key, - "replication_key_value": timestamp_value, - } - } - } + "replication_key_value": bookmark_value, + }, + }, + }, ) stream._write_starting_replication_value(None) - assert stream.replication_key == "updatedAt" - assert stream.replication_method == REPLICATION_INCREMENTAL - assert stream.is_timestamp_replication_key - assert stream.get_starting_timestamp(None) == pendulum.parse( - timestamp_value - ), f"Incorrect starting timestamp. Tap state was {dict(tap.state)}" + assert get_starting_value(None) == expected_starting_value + + +def test_stream_invalid_replication_key(tap: SimpleTestTap): + """Validate an exception is raised if replication_key not in schema.""" + + class InvalidReplicationKeyStream(SimpleTestStream): + replication_key = "INVALID" + + stream = InvalidReplicationKeyStream(tap) + + with pytest.raises( + InvalidReplicationKeyException, + match=( + f"Field '{stream.replication_key}' is not in schema for stream " + f"'{stream.name}'" + ), + ): + _check = stream.is_timestamp_replication_key @pytest.mark.parametrize( @@ -214,7 +354,10 @@ def test_stream_starting_timestamp(tap: SimpleTestTap, stream: SimpleTestStream) ], ) def test_jsonpath_rest_stream( - tap: SimpleTestTap, path: str, content: str, result: List[dict] + tap: SimpleTestTap, + path: str, + content: str, + result: list[dict], ): """Validate records are extracted correctly from the API response.""" fake_response = requests.Response() @@ -223,9 +366,9 @@ def test_jsonpath_rest_stream( RestTestStream.records_jsonpath = path stream = RestTestStream(tap) - rows = stream.parse_response(fake_response) + records = stream.parse_response(fake_response) - assert list(rows) == result + assert list(records) == result def test_jsonpath_graphql_stream_default(tap: SimpleTestTap): @@ -243,9 +386,9 @@ def test_jsonpath_graphql_stream_default(tap: SimpleTestTap): fake_response._content = str.encode(content) stream = GraphqlTestStream(tap) - rows = stream.parse_response(fake_response) + records = stream.parse_response(fake_response) - assert list(rows) == [{"id": 1, "value": "abc"}, {"id": 2, "value": "def"}] + assert list(records) == [{"id": 1, "value": "abc"}, {"id": 2, "value": "def"}] def test_jsonpath_graphql_stream_override(tap: SimpleTestTap): @@ -261,14 +404,14 @@ def test_jsonpath_graphql_stream_override(tap: SimpleTestTap): class GraphQLJSONPathOverride(GraphqlTestStream): @classproperty - def records_jsonpath(cls): + def records_jsonpath(cls): # noqa: N805 return "$[*]" stream = GraphQLJSONPathOverride(tap) - rows = stream.parse_response(fake_response) + records = stream.parse_response(fake_response) - assert list(rows) == [{"id": 1, "value": "abc"}, {"id": 2, "value": "def"}] + assert list(records) == [{"id": 1, "value": "abc"}, {"id": 2, "value": "def"}] @pytest.mark.parametrize( @@ -335,7 +478,11 @@ def records_jsonpath(cls): ], ) def test_next_page_token_jsonpath( - tap: SimpleTestTap, path: str, content: str, headers: dict, result: str + tap: SimpleTestTap, + path: str, + content: str, + headers: dict, + result: str, ): """Validate pagination token is extracted correctly from API response.""" fake_response = requests.Response() @@ -345,7 +492,10 @@ def test_next_page_token_jsonpath( RestTestStream.next_page_token_jsonpath = path stream = RestTestStream(tap) - next_page = stream.get_next_page_token(fake_response, previous_token=None) + with pytest.warns(DeprecationWarning): + paginator = stream.get_new_paginator() + + next_page = paginator.get_next(fake_response) assert next_page == result @@ -368,9 +518,9 @@ def test_sync_costs_calculation(tap: SimpleTestTap, caplog): stream = RestTestStream(tap) def calculate_test_cost( - request: requests.PreparedRequest, - response: requests.Response, - context: Optional[Dict], + request: requests.PreparedRequest, # noqa: ARG001 + response: requests.Response, # noqa: ARG001 + context: dict | None, # noqa: ARG001 ): return {"dim1": 1, "dim2": 2} @@ -387,3 +537,81 @@ def calculate_test_cost( for record in caplog.records: assert record.levelname == "INFO" assert f"Total Sync costs for stream {stream.name}" in record.message + + +@pytest.mark.parametrize( + "input_catalog,selection", + [ + pytest.param( + None, + { + "selected_stream": True, + "unselected_stream": False, + }, + id="no_catalog", + ), + pytest.param( + { + "streams": [], + }, + { + "selected_stream": False, + "unselected_stream": False, + }, + id="empty_catalog", + ), + pytest.param( + { + "streams": [ + { + "tap_stream_id": "selected_stream", + "metadata": [ + { + "breadcrumb": [], + "metadata": { + "selected": True, + }, + }, + ], + }, + { + "tap_stream_id": "unselected_stream", + "metadata": [ + { + "breadcrumb": [], + "metadata": { + "selected": True, + }, + }, + ], + }, + ], + }, + { + "selected_stream": True, + "unselected_stream": True, + }, + id="catalog_with_selection", + ), + ], +) +def test_stream_class_selection(input_catalog, selection): + """Test stream class selection.""" + + class SelectedStream(RESTStream): + name = "selected_stream" + url_base = "https://example.com" + schema = {"type": "object", "properties": {}} # noqa: RUF012 + + class UnselectedStream(SelectedStream): + name = "unselected_stream" + selected_by_default = False + + class MyTap(SimpleTestTap): + def discover_streams(self): + return [SelectedStream(self), UnselectedStream(self)] + + # Check that the selected stream is selected + tap = MyTap(config=None, catalog=input_catalog) + for stream in selection: + assert tap.streams[stream].selected is selection[stream] diff --git a/tests/core/test_target_base.py b/tests/core/test_target_base.py new file mode 100644 index 000000000..de344c7e3 --- /dev/null +++ b/tests/core/test_target_base.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +import copy + +import pytest + +from singer_sdk.exceptions import ( + MissingKeyPropertiesError, + RecordsWithoutSchemaException, +) +from singer_sdk.helpers.capabilities import PluginCapabilities +from tests.conftest import BatchSinkMock, SQLSinkMock, SQLTargetMock, TargetMock + + +def test_get_sink(): + input_schema_1 = { + "properties": { + "id": { + "type": ["string", "null"], + }, + "col_ts": { + "format": "date-time", + "type": ["string", "null"], + }, + }, + } + input_schema_2 = copy.deepcopy(input_schema_1) + key_properties = [] + target = TargetMock(config={"add_record_metadata": True}) + sink = BatchSinkMock(target, "foo", input_schema_1, key_properties) + target._sinks_active["foo"] = sink + sink_returned = target.get_sink( + "foo", + schema=input_schema_2, + key_properties=key_properties, + ) + assert sink_returned == sink + + +def test_validate_record(): + target = TargetMock() + sink = BatchSinkMock( + target=target, + stream_name="test", + schema={ + "properties": { + "id": {"type": ["integer"]}, + "name": {"type": ["string"]}, + }, + }, + key_properties=["id"], + ) + + # Test valid record + sink._singer_validate_message({"id": 1, "name": "test"}) + + # Test invalid record + with pytest.raises(MissingKeyPropertiesError): + sink._singer_validate_message({"name": "test"}) + + +def test_target_about_info(): + target = TargetMock() + about = target._get_about_info() + + assert about.capabilities == [ + PluginCapabilities.ABOUT, + PluginCapabilities.STREAM_MAPS, + PluginCapabilities.FLATTENING, + PluginCapabilities.BATCH, + ] + + assert "stream_maps" in about.settings["properties"] + assert "stream_map_config" in about.settings["properties"] + assert "flattening_enabled" in about.settings["properties"] + assert "flattening_max_depth" in about.settings["properties"] + assert "batch_config" in about.settings["properties"] + assert "add_record_metadata" in about.settings["properties"] + + +def test_sql_get_sink(): + input_schema_1 = { + "properties": { + "id": { + "type": ["string", "null"], + }, + "col_ts": { + "format": "date-time", + "type": ["string", "null"], + }, + }, + } + input_schema_2 = copy.deepcopy(input_schema_1) + key_properties = [] + target = SQLTargetMock(config={"sqlalchemy_url": "sqlite:///"}) + sink = SQLSinkMock( + target=target, + stream_name="foo", + schema=input_schema_1, + key_properties=key_properties, + connector=target.target_connector, + ) + target._sinks_active["foo"] = sink + sink_returned = target.get_sink( + "foo", + schema=input_schema_2, + key_properties=key_properties, + ) + assert sink_returned is sink + + +def test_add_sqlsink_and_get_sink(): + input_schema_1 = { + "properties": { + "id": { + "type": ["string", "null"], + }, + "col_ts": { + "format": "date-time", + "type": ["string", "null"], + }, + }, + } + input_schema_2 = copy.deepcopy(input_schema_1) + key_properties = [] + target = SQLTargetMock(config={"sqlalchemy_url": "sqlite:///"}) + sink = target.add_sqlsink( + "foo", + schema=input_schema_2, + key_properties=key_properties, + ) + + sink_returned = target.get_sink( + "foo", + ) + + assert sink_returned is sink + + # Test invalid call + with pytest.raises(RecordsWithoutSchemaException): + target.get_sink( + "bar", + ) diff --git a/tests/core/test_target_csv_init.py b/tests/core/test_target_csv_init.py deleted file mode 100644 index fd8ecfddc..000000000 --- a/tests/core/test_target_csv_init.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Test class creation.""" - -from samples.sample_target_csv.csv_target import SampleTargetCSV - - -def test_target_class(csv_config: dict): - """Test class creation.""" - _ = SampleTargetCSV(config=csv_config) diff --git a/tests/core/test_target_input.py b/tests/core/test_target_input.py deleted file mode 100644 index f4e91133d..000000000 --- a/tests/core/test_target_input.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Test target reading from file.""" - -import json -import os -from pathlib import Path - -import pytest -from click.testing import CliRunner - -from samples.sample_target_csv.csv_target import SampleTargetCSV - -SAMPLE_FILENAME = Path(__file__).parent / Path("./resources/messages.jsonl") -EXPECTED_OUTPUT = """"id" "name" -1 "Chris" -2 "Mike" -""" - - -@pytest.fixture -def target(csv_config: dict): - return SampleTargetCSV(config=csv_config) - - -@pytest.fixture -def cli_runner(): - return CliRunner() - - -@pytest.fixture -def config_file_path(target): - try: - path = Path(target.config["target_folder"]) / "./config.json" - with open(path, "w") as f: - f.write(json.dumps(dict(target.config))) - yield path - finally: - os.remove(path) - - -def test_input_arg(cli_runner, config_file_path, target): - result = cli_runner.invoke( - target.cli, - [ - "--config", - config_file_path, - "--input", - SAMPLE_FILENAME, - ], - ) - - assert result.exit_code == 0 - - output = Path(target.config["target_folder"]) / "./users.csv" - with open(output) as f: - assert f.read() == EXPECTED_OUTPUT diff --git a/tests/core/test_target_parquet_init.py b/tests/core/test_target_parquet_init.py deleted file mode 100644 index 948daad81..000000000 --- a/tests/core/test_target_parquet_init.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Test class creation.""" - -from samples.sample_target_parquet.parquet_target import SampleTargetParquet - -SAMPLE_FILENAME = "/tmp/testfile.parquet" -SAMPLE_CONFIG = {"filepath": SAMPLE_FILENAME} - - -def test_target_class(): - """Test class creation.""" - _ = SampleTargetParquet(config=SAMPLE_CONFIG) diff --git a/tests/core/test_testing.py b/tests/core/test_testing.py new file mode 100644 index 000000000..5715cd1e1 --- /dev/null +++ b/tests/core/test_testing.py @@ -0,0 +1,43 @@ +"""Test the plugin testing helpers.""" + +from __future__ import annotations + +import pytest + +from singer_sdk.testing.factory import BaseTestClass + + +def test_module_deprecations(): + with pytest.deprecated_call(): + from singer_sdk.testing import get_standard_tap_tests # noqa: F401 + + with pytest.deprecated_call(): + from singer_sdk.testing import get_standard_target_tests # noqa: F401 + + from singer_sdk import testing + + with pytest.raises( + AttributeError, + match="module singer_sdk.testing has no attribute", + ): + testing.foo # noqa: B018 + + +def test_test_class_mro(): + class PluginTestClass(BaseTestClass): + pass + + PluginTestClass.params["x"] = 1 + + class AnotherPluginTestClass(BaseTestClass): + pass + + AnotherPluginTestClass.params["x"] = 2 + AnotherPluginTestClass.params["y"] = 3 + + class SubPluginTestClass(PluginTestClass): + pass + + assert PluginTestClass.params == {"x": 1} + assert AnotherPluginTestClass.params == {"x": 2, "y": 3} + assert SubPluginTestClass.params == {"x": 1} diff --git a/tests/core/test_typing.py b/tests/core/test_typing.py new file mode 100644 index 000000000..b2cf9c691 --- /dev/null +++ b/tests/core/test_typing.py @@ -0,0 +1,320 @@ +"""Test _typing - specifically conform_record_data_types().""" + +from __future__ import annotations + +import datetime +import logging + +import pytest +import sqlalchemy + +from singer_sdk.helpers._typing import ( + TypeConformanceLevel, + _conform_primitive_property, + conform_record_data_types, +) +from singer_sdk.typing import ( + ArrayType, + BooleanType, + PropertiesList, + Property, + StringType, + to_sql_type, +) + +logger = logging.getLogger("log") + + +def test_simple_schema_conforms_types(): + schema = PropertiesList( + Property("true", BooleanType), + Property("false", BooleanType), + ).to_dict() + + record = { + "true": b"\x01", + "false": b"\x00", + } + + expected_output = { + "true": True, + "false": False, + } + + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + + +def test_primitive_arrays_are_conformed(): + schema = PropertiesList( + Property("list", ArrayType(BooleanType)), + ).to_dict() + + record = { + "list": [b"\x01", b"\x00"], + } + + expected_output = {"list": [True, False]} + + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + + +def test_only_root_fields_are_conformed_for_root_level(): + schema = PropertiesList( + Property("primitive", BooleanType), + Property("object", PropertiesList(Property("value", BooleanType))), + Property("list", ArrayType(BooleanType)), + ).to_dict() + + record = { + "primitive": b"\x01", + "object": {"value": b"\x01"}, + "list": [b"\x01", b"\x00"], + } + + expected_output = { + "primitive": True, + "object": {"value": b"\x01"}, + "list": [b"\x01", b"\x00"], + } + + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.ROOT_ONLY, + logger, + ) + assert actual_output == expected_output + + +def test_no_fields_are_conformed_for_none_level(): + schema = PropertiesList( + Property("primitive", BooleanType), + Property("object", PropertiesList(Property("value", BooleanType))), + Property("list", ArrayType(BooleanType)), + ).to_dict() + + record = { + "primitive": b"\x01", + "object": {"value": b"\x01"}, + "list": [b"\x01", b"\x00"], + } + + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.NONE, + logger, + ) + assert actual_output == record + + +def test_object_arrays_are_conformed(): + schema = PropertiesList( + Property("list", ArrayType(PropertiesList(Property("value", BooleanType)))), + ).to_dict() + + record = {"list": [{"value": b"\x01"}, {"value": b"\x00"}]} + + expected_output = {"list": [{"value": True}, {"value": False}]} + + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + + +def test_mixed_arrays_are_conformed(): + schema = { + "type": "object", + "properties": { + "list": { + "type": ["array", "null"], + "items": { + "type": ["object", "boolean"], + "properties": {"value": {"type": ["boolean", "null"]}}, + }, + }, + }, + } + + record = {"list": [{"value": b"\x01"}, b"\x00"]} + + expected_output = {"list": [{"value": True}, False]} + + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + + +def test_nested_objects_are_conformed(): + schema = PropertiesList( + Property("object", PropertiesList(Property("value", BooleanType))), + ).to_dict() + + record = {"object": {"value": b"\x01"}} + + expected_output = {"object": {"value": True}} + + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + + +def test_simple_schema_removes_types(caplog: pytest.LogCaptureFixture): + schema = PropertiesList( + Property("keep", StringType), + ).to_dict() + + record = {"keep": "hello", "remove": "goodbye"} + + expected_output = {"keep": "hello"} + + with caplog.at_level(logging.WARNING): + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + assert caplog.records[0].message == ( + "Properties ('remove',) were present in the 'test_stream' stream but not " + "found in catalog schema. Ignoring." + ) + + +def test_nested_objects_remove_types(caplog: pytest.LogCaptureFixture): + schema = PropertiesList( + Property("object", PropertiesList(Property("keep", StringType))), + ).to_dict() + + record = {"object": {"keep": "hello", "remove": "goodbye"}} + + expected_output = {"object": {"keep": "hello"}} + + with caplog.at_level(logging.WARNING): + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + assert caplog.records[0].message == ( + "Properties ('object.remove',) were present in the 'test_stream' stream " + "but not found in catalog schema. Ignoring." + ) + + +def test_object_arrays_remove_types(caplog: pytest.LogCaptureFixture): + schema = PropertiesList( + Property("list", ArrayType(PropertiesList(Property("keep", StringType)))), + ).to_dict() + + record = {"list": [{"keep": "hello", "remove": "goodbye"}]} + + expected_output = {"list": [{"keep": "hello"}]} + + with caplog.at_level(logging.WARNING): + actual_output = conform_record_data_types( + "test_stream", + record, + schema, + TypeConformanceLevel.RECURSIVE, + logger, + ) + assert actual_output == expected_output + assert caplog.records[0].message == ( + "Properties ('list.remove',) were present in the 'test_stream' stream but " + "not found in catalog schema. Ignoring." + ) + + +def test_conform_primitives(): + assert ( + _conform_primitive_property( + datetime.datetime(2020, 5, 17, tzinfo=datetime.timezone.utc), + {"type": "string"}, + ) + == "2020-05-17T00:00:00+00:00" + ) + assert ( + _conform_primitive_property(datetime.date(2020, 5, 17), {"type": "string"}) + == "2020-05-17T00:00:00+00:00" + ) + assert ( + _conform_primitive_property(datetime.timedelta(365), {"type": "string"}) + == "1971-01-01T00:00:00+00:00" + ) + assert ( + _conform_primitive_property(datetime.time(12, 0, 0), {"type": "string"}) + == "12:00:00" + ) + + assert _conform_primitive_property(b"\x00", {"type": "string"}) == "00" + assert _conform_primitive_property(b"\xBC", {"type": "string"}) == "bc" + + assert _conform_primitive_property(b"\x00", {"type": "boolean"}) is False + assert _conform_primitive_property(b"\xBC", {"type": "boolean"}) is True + + assert _conform_primitive_property(None, {"type": "boolean"}) is None + assert _conform_primitive_property(0, {"type": "boolean"}) is False + assert _conform_primitive_property(1, {"type": "boolean"}) is True + + +@pytest.mark.parametrize( + "jsonschema_type,expected", + [ + ({"type": ["string", "null"]}, sqlalchemy.types.VARCHAR), + ({"type": ["integer", "null"]}, sqlalchemy.types.INTEGER), + ({"type": ["number", "null"]}, sqlalchemy.types.DECIMAL), + ({"type": ["boolean", "null"]}, sqlalchemy.types.BOOLEAN), + ({"type": "object", "properties": {}}, sqlalchemy.types.VARCHAR), + ({"type": "array"}, sqlalchemy.types.VARCHAR), + ({"format": "date", "type": ["string", "null"]}, sqlalchemy.types.DATE), + ({"format": "time", "type": ["string", "null"]}, sqlalchemy.types.TIME), + ( + {"format": "date-time", "type": ["string", "null"]}, + sqlalchemy.types.DATETIME, + ), + ( + {"anyOf": [{"type": "string", "format": "date-time"}, {"type": "null"}]}, + sqlalchemy.types.DATETIME, + ), + ({"anyOf": [{"type": "integer"}, {"type": "null"}]}, sqlalchemy.types.INTEGER), + ], +) +def test_to_sql_type(jsonschema_type, expected): + assert isinstance(to_sql_type(jsonschema_type), expected) diff --git a/tests/external/__init__.py b/tests/external/__init__.py index c058ef421..0226b7672 100644 --- a/tests/external/__init__.py +++ b/tests/external/__init__.py @@ -1 +1,3 @@ """SDK external system tests.""" + +from __future__ import annotations diff --git a/tests/external/conftest.py b/tests/external/conftest.py index d9e748356..6cb7d8b45 100644 --- a/tests/external/conftest.py +++ b/tests/external/conftest.py @@ -1,31 +1,42 @@ """External tests fixtures.""" +from __future__ import annotations + import json from pathlib import Path -from typing import Optional import pytest -@pytest.fixture -def gitlab_config() -> Optional[dict]: +def gitlab_config() -> dict | None: """Create a tap-gitlab config object.""" - config: Optional[dict] = None + path = Path("singer_sdk/tests/external/.secrets/gitlab-config.json") + if not path.exists(): + # local testing relative path + path = Path("tests/external/.secrets/gitlab-config.json") if path.exists(): - config = json.loads(path.read_text()) + return json.loads(path.read_text()) + + return None - return config +@pytest.fixture(name="gitlab_config") +def gitlab_config_fixture() -> dict | None: + return gitlab_config() -@pytest.fixture -def ga_config() -> Optional[dict]: + +def ga_config() -> dict | None: """Create a tap-google-analytics config object.""" - config: Optional[dict] = None path = Path("singer_sdk/tests/external/.secrets/google-analytics-config.json") if path.exists(): - config = json.loads(path.read_text()) + return json.loads(path.read_text()) + + return None + - return config +@pytest.fixture(name="ga_config") +def ga_config_fixture() -> dict | None: + return ga_config() diff --git a/tests/external/test_ga_init.py b/tests/external/test_ga_init.py deleted file mode 100644 index 1f1241494..000000000 --- a/tests/external/test_ga_init.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Test class creation.""" - -from typing import Optional - -from samples.sample_tap_google_analytics.ga_tap import SampleTapGoogleAnalytics - -CONFIG_FILE = "singer_sdk/tests/external/.secrets/google-analytics-config.json" - - -def test_tap_class(ga_config: Optional[dict]): - """Test class creation.""" - _ = SampleTapGoogleAnalytics(config=ga_config, parse_env_config=True) diff --git a/tests/external/test_ga_sync.py b/tests/external/test_ga_sync.py deleted file mode 100644 index 026ad6895..000000000 --- a/tests/external/test_ga_sync.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Test class creation.""" - -from typing import Optional - -from samples.sample_tap_google_analytics.ga_tap import SampleTapGoogleAnalytics - - -def test_ga_sync_sample(ga_config: Optional[dict]): - """Test class creation.""" - tap = SampleTapGoogleAnalytics(config=ga_config, parse_env_config=True) - tap.sync_all() diff --git a/tests/external/test_generic_tests_external.py b/tests/external/test_generic_tests_external.py deleted file mode 100644 index 6dbb847af..000000000 --- a/tests/external/test_generic_tests_external.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Run the generic tests from `singer_sdk.testing`.""" - -from pathlib import Path -from typing import Optional - -from samples.sample_tap_gitlab.gitlab_tap import SampleTapGitlab -from samples.sample_tap_google_analytics.ga_tap import SampleTapGoogleAnalytics -from singer_sdk.testing import get_standard_tap_tests - -GA_CONFIG_FILE = Path("singer_sdk/tests/external/.secrets/google-analytics-config.json") - - -def test_gitlab_tap_standard_tests(gitlab_config: Optional[dict]): - """Run standard tap tests against Gitlab tap.""" - tests = get_standard_tap_tests(SampleTapGitlab, config=gitlab_config) - for test in tests: - test() - - -def test_ga_tap_standard_tests(ga_config: Optional[dict]): - """Run standard tap tests against Google Analytics tap.""" - tests = get_standard_tap_tests(SampleTapGoogleAnalytics, config=ga_config) - for test in tests: - test() diff --git a/tests/external/test_gitlab_discovery.py b/tests/external/test_gitlab_discovery.py deleted file mode 100644 index fffa6a7a8..000000000 --- a/tests/external/test_gitlab_discovery.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Tests discovery features for Parquet.""" - -from typing import Optional - -from samples.sample_tap_gitlab.gitlab_tap import SampleTapGitlab - - -def test_gitlab_tap_discovery(gitlab_config: Optional[dict]): - """Test class creation.""" - tap = SampleTapGitlab(config=gitlab_config, state=None, parse_env_config=True) - catalog_json = tap.run_discovery() - assert catalog_json - - -def test_gitlab_replication_keys(gitlab_config: Optional[dict]): - stream_name = "issues" - expected_replication_key = "updated_at" - tap = SampleTapGitlab(config=gitlab_config, state=None, parse_env_config=True) - - catalog = tap._singer_catalog - catalog_entry = catalog.get_stream(stream_name) - metadata_root = catalog_entry.metadata.root - - key_props_1 = metadata_root.valid_replication_keys[0] - key_props_2 = catalog_entry.replication_key - assert key_props_1 == expected_replication_key, ( - f"Incorrect 'valid-replication-keys' in catalog: ({key_props_1})\n\n" - f"Root metadata was: {metadata_root}\n\nCatalog entry was: {catalog_entry}" - ) - assert key_props_2 == expected_replication_key, ( - f"Incorrect 'replication_key' in catalog: ({key_props_2})\n\n" - f"Catalog entry was: {catalog_entry}" - ) - assert tap.streams[ - stream_name - ].is_timestamp_replication_key, "Failed to detect `is_timestamp_replication_key`" - - assert tap.streams[ - "commits" - ].is_timestamp_replication_key, "Failed to detect `is_timestamp_replication_key`" diff --git a/tests/external/test_gitlab_sync.py b/tests/external/test_gitlab_sync.py deleted file mode 100644 index 6ed2f2644..000000000 --- a/tests/external/test_gitlab_sync.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Test sample sync.""" -from typing import Optional - -from samples.sample_tap_gitlab.gitlab_tap import SampleTapGitlab -from singer_sdk.helpers import _catalog -from singer_sdk.helpers._singer import Catalog - -COUNTER = 0 -SAMPLE_CONFIG_BAD = {"not": "correct"} - - -def test_gitlab_sync_all(gitlab_config: Optional[dict]): - """Test sync_all() for gitlab sample.""" - tap = SampleTapGitlab(config=gitlab_config, parse_env_config=True) - tap.sync_all() - - -def test_gitlab_sync_epic_issues(gitlab_config: Optional[dict]): - """Test sync for just the 'epic_issues' child stream.""" - # Initialize with basic config - stream_name = "epic_issues" - tap1 = SampleTapGitlab(config=gitlab_config, parse_env_config=True) - # Test discovery - tap1.run_discovery() - catalog1 = Catalog.from_dict(tap1.catalog_dict) - # Reset and re-initialize with an input catalog - _catalog.deselect_all_streams(catalog=catalog1) - _catalog.set_catalog_stream_selected( - catalog=catalog1, - stream_name=stream_name, - selected=True, - ) - tap1 = None - tap2 = SampleTapGitlab( - config=gitlab_config, parse_env_config=True, catalog=catalog1.to_dict() - ) - tap2.sync_all() diff --git a/tests/external/test_tap_gitlab.py b/tests/external/test_tap_gitlab.py new file mode 100644 index 000000000..19be732ac --- /dev/null +++ b/tests/external/test_tap_gitlab.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +import warnings + +from samples.sample_tap_gitlab.gitlab_tap import SampleTapGitlab +from singer_sdk._singerlib import Catalog +from singer_sdk.exceptions import ConfigValidationError +from singer_sdk.helpers import _catalog +from singer_sdk.testing import get_tap_test_class + +from .conftest import gitlab_config + +try: + config = gitlab_config() + TestSampleTapGitlab = get_tap_test_class( + tap_class=SampleTapGitlab, + config=config, + parse_env_config=True, + ) +except ConfigValidationError as e: + warnings.warn( + UserWarning( + "Could not configure external gitlab tests. " + f"Config in CI is expected via env vars.\n{e}", + ), + stacklevel=2, + ) + +COUNTER = 0 +SAMPLE_CONFIG_BAD = {"not": "correct"} + + +def test_gitlab_replication_keys(gitlab_config: dict | None): + stream_name = "issues" + expected_replication_key = "updated_at" + tap = SampleTapGitlab(config=gitlab_config, state=None, parse_env_config=True) + + catalog = tap._singer_catalog + catalog_entry = catalog.get_stream(stream_name) + metadata_root = catalog_entry.metadata.root + + key_props_1 = metadata_root.valid_replication_keys[0] + key_props_2 = catalog_entry.replication_key + assert key_props_1 == expected_replication_key, ( + f"Incorrect 'valid-replication-keys' in catalog: ({key_props_1})\n\n" + f"Root metadata was: {metadata_root}\n\nCatalog entry was: {catalog_entry}" + ) + assert key_props_2 == expected_replication_key, ( + f"Incorrect 'replication_key' in catalog: ({key_props_2})\n\n" + f"Catalog entry was: {catalog_entry}" + ) + assert tap.streams[ + stream_name + ].is_timestamp_replication_key, "Failed to detect `is_timestamp_replication_key`" + + assert tap.streams[ + "commits" + ].is_timestamp_replication_key, "Failed to detect `is_timestamp_replication_key`" + + +def test_gitlab_sync_epic_issues(gitlab_config: dict | None): + """Test sync for just the 'epic_issues' child stream.""" + # Initialize with basic config + stream_name = "epic_issues" + tap1 = SampleTapGitlab(config=gitlab_config, parse_env_config=True) + # Test discovery + tap1.run_discovery() + catalog1 = Catalog.from_dict(tap1.catalog_dict) + # Reset and re-initialize with an input catalog + _catalog.deselect_all_streams(catalog=catalog1) + _catalog.set_catalog_stream_selected( + catalog=catalog1, + stream_name=stream_name, + selected=True, + ) + tap1 = None + tap2 = SampleTapGitlab( + config=gitlab_config, + parse_env_config=True, + catalog=catalog1.to_dict(), + ) + tap2.sync_all() diff --git a/tests/external/test_tap_google_analytics.py b/tests/external/test_tap_google_analytics.py new file mode 100644 index 000000000..1bed825fd --- /dev/null +++ b/tests/external/test_tap_google_analytics.py @@ -0,0 +1,26 @@ +"""Tests standard tap features using the built-in SDK tests library.""" + +from __future__ import annotations + +import warnings + +from samples.sample_tap_google_analytics.ga_tap import SampleTapGoogleAnalytics +from singer_sdk.exceptions import ConfigValidationError +from singer_sdk.testing import get_tap_test_class + +from .conftest import ga_config + +try: + TestSampleTapGoogleAnalytics = get_tap_test_class( + tap_class=SampleTapGoogleAnalytics, + config=ga_config(), + parse_env_config=True, + ) +except ConfigValidationError as e: + warnings.warn( + UserWarning( + "Could not configure external gitlab tests. " + f"Config in CI is expected via env vars.\n{e}", + ), + stacklevel=2, + ) diff --git a/tests/external_snowflake/.secrets/snowflake-config.json.template b/tests/external_snowflake/.secrets/snowflake-config.json.template deleted file mode 100644 index 0967ef424..000000000 --- a/tests/external_snowflake/.secrets/snowflake-config.json.template +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/tests/external_snowflake/__init__.py b/tests/external_snowflake/__init__.py deleted file mode 100644 index e3a8c3cf5..000000000 --- a/tests/external_snowflake/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""SDK Snowflake sample tests.""" diff --git a/tests/external_snowflake/notest_snowflake.py b/tests/external_snowflake/notest_snowflake.py deleted file mode 100644 index 2e4ccb534..000000000 --- a/tests/external_snowflake/notest_snowflake.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Test class creation.""" - -import json -from pathlib import Path - -from samples.sample_tap_snowflake.snowflake_tap import SampleTapSnowflake - -CONFIG_FILE = "singer_sdk/tests/.secrets/snowflake-config.json" - -SAMPLE_CATALOG_FILEPATH = ( - "singer_sdk/samples/sample_tap_snowflake/snowflake-catalog.sample.json" -) - - -def test_snowflake_tap_init(): - """Test snowflake tap creation.""" - catalog_dict = json.loads(Path(SAMPLE_CATALOG_FILEPATH).read_text()) - _ = SampleTapSnowflake(config=CONFIG_FILE, state=None, catalog=catalog_dict) - - -def test_snowflake_sync_one(): - """Test snowflake discovery.""" - tap = SampleTapSnowflake(config=CONFIG_FILE, state=None) - tap.sync_one(tap.streams[tap.streams.keys()[0]]) - assert True - - -def test_snowflake_discovery(): - """Test snowflake discovery.""" - tap = SampleTapSnowflake(config=CONFIG_FILE, state=None) - catalog_json = tap.run_discovery() - assert catalog_json diff --git a/tests/samples/__init__.py b/tests/samples/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/samples/conftest.py b/tests/samples/conftest.py new file mode 100644 index 000000000..29560a330 --- /dev/null +++ b/tests/samples/conftest.py @@ -0,0 +1,88 @@ +"""Tap, target and stream test fixtures.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest +from sqlalchemy import text + +from samples.sample_tap_sqlite import SQLiteConnector, SQLiteTap +from singer_sdk._singerlib import Catalog +from singer_sdk.testing import _get_tap_catalog + + +@pytest.fixture +def csv_config(outdir: str) -> dict: + """Get configuration dictionary for target-csv.""" + return {"target_folder": outdir} + + +@pytest.fixture +def _sqlite_sample_db(sqlite_connector): + """Return a path to a newly constructed sample DB.""" + with sqlite_connector._connect() as conn, conn.begin(): + for t in range(3): + conn.execute(text(f"DROP TABLE IF EXISTS t{t}")) + conn.execute( + text(f"CREATE TABLE t{t} (c1 int PRIMARY KEY, c2 varchar(10))"), + ) + for x in range(100): + conn.execute( + text(f"INSERT INTO t{t} VALUES ({x}, 'x={x}')"), # noqa: S608 + ) + + +@pytest.fixture +def sqlite_sample_tap( + _sqlite_sample_db, + sqlite_sample_db_config, + sqlite_sample_db_state, +) -> SQLiteTap: + _ = _sqlite_sample_db + catalog_obj = Catalog.from_dict( + _get_tap_catalog(SQLiteTap, config=sqlite_sample_db_config, select_all=True), + ) + + # Set stream `t1` to use incremental replication. + t0 = catalog_obj.get_stream("main-t0") + t0.replication_key = "c1" + t0.replication_method = "INCREMENTAL" + t1 = catalog_obj.get_stream("main-t1") + t1.key_properties = ["c1"] + t1.replication_method = "FULL_TABLE" + t2 = catalog_obj.get_stream("main-t2") + t2.key_properties = ["c1"] + t2.replication_key = "c1" + t2.replication_method = "INCREMENTAL" + return SQLiteTap( + config=sqlite_sample_db_config, + catalog=catalog_obj.to_dict(), + state=sqlite_sample_db_state, + ) + + +@pytest.fixture +def sqlite_connector(sqlite_sample_db_config) -> SQLiteConnector: + return SQLiteConnector(config=sqlite_sample_db_config) + + +@pytest.fixture +def path_to_sample_data_db(tmp_path: Path) -> Path: + return tmp_path / Path("foo.db") + + +@pytest.fixture +def sqlite_sample_db_config(path_to_sample_data_db: Path) -> dict: + """Get configuration dictionary for target-csv.""" + return {"path_to_db": str(path_to_sample_data_db)} + + +@pytest.fixture +def sqlite_sample_db_state() -> dict: + """Get configuration dictionary for target-csv.""" + return { + "bookmarks": { + "main-t0": {"replication_key": "c1", "replication_key_value": 55}, + }, + } diff --git a/tests/core/resources/messages.jsonl b/tests/samples/resources/messages.jsonl similarity index 100% rename from tests/core/resources/messages.jsonl rename to tests/samples/resources/messages.jsonl diff --git a/tests/core/test_countries_sync.py b/tests/samples/test_tap_countries.py similarity index 51% rename from tests/core/test_countries_sync.py rename to tests/samples/test_tap_countries.py index 39abc58fe..31e0b8666 100644 --- a/tests/core/test_countries_sync.py +++ b/tests/samples/test_tap_countries.py @@ -1,21 +1,39 @@ """Test sample sync.""" +from __future__ import annotations + import copy +import io +import json import logging +import typing as t +from contextlib import redirect_stdout + +import pytest +from click.testing import CliRunner from samples.sample_tap_countries.countries_tap import SampleTapCountries from singer_sdk.helpers._catalog import ( get_selected_schema, pop_deselected_record_properties, ) +from singer_sdk.testing import get_tap_test_class +from singer_sdk.testing.config import SuiteConfig -SAMPLE_CONFIG_BAD = {"not": "correct"} +if t.TYPE_CHECKING: + from pathlib import Path + from pytest_snapshot.plugin import Snapshot -def test_countries_sync_all(): - """Test sync_all() for countries sample.""" - tap = SampleTapCountries(config=None) - tap.sync_all() +SAMPLE_CONFIG = {} +SAMPLE_CONFIG_BAD = {"not": "correct"} + +# standard tap tests +TestSampleTapCountries = get_tap_test_class( + tap_class=SampleTapCountries, + config=SAMPLE_CONFIG, + suite_config=SuiteConfig(max_records_limit=5), +) def test_countries_primary_key(): @@ -24,12 +42,12 @@ def test_countries_primary_key(): metadata_root = countries_entry.metadata.root key_props_1 = metadata_root.table_key_properties key_props_2 = countries_entry.key_properties - assert key_props_1 == ["code"], ( + assert key_props_1 == ("code",), ( f"Incorrect 'table-key-properties' in catalog: ({key_props_1})\n\n" f"Root metadata was: {metadata_root}\n\n" f"Catalog entry was: {countries_entry}" ) - assert key_props_2 == ["code"], ( + assert key_props_2 == ("code",), ( f"Incorrect 'key_properties' in catalog: ({key_props_2})\n\n" "Catalog entry was: {countries_entry}" ) @@ -82,3 +100,61 @@ def test_with_catalog_entry(): logger=logging.getLogger(), ) assert new_schema == stream.schema + + +def test_batch_mode(outdir): + """Test batch mode.""" + tap = SampleTapCountries( + config={ + "batch_config": { + "encoding": { + "format": "jsonl", + "compression": "gzip", + }, + "storage": { + "root": outdir, + "prefix": "pytest-countries-", + }, + }, + }, + ) + + buf = io.StringIO() + with redirect_stdout(buf): + tap.sync_all() + + buf.seek(0) + lines = buf.read().splitlines() + messages = [json.loads(line) for line in lines] + + def tally_messages(messages: list) -> t.Counter: + """Tally messages.""" + return t.Counter( + (message["type"], message["stream"]) + if message["type"] != "STATE" + else (message["type"],) + for message in messages + ) + + counter = tally_messages(messages) + assert counter["SCHEMA", "continents"] == 1 + assert counter["BATCH", "continents"] == 1 + + assert counter["SCHEMA", "countries"] == 1 + assert counter["BATCH", "countries"] == 1 + + assert counter[("STATE",)] == 3 + + +@pytest.mark.snapshot() +def test_write_schema( + snapshot: Snapshot, + snapshot_dir: Path, +): + snapshot.snapshot_dir = snapshot_dir.joinpath("countries_write_schemas") + + runner = CliRunner(mix_stderr=False) + result = runner.invoke(SampleTapCountries.cli, ["--test", "schema"]) + + snapshot_name = "countries_write_schemas" + snapshot.assert_match(result.stdout, snapshot_name) diff --git a/tests/samples/test_tap_sqlite.py b/tests/samples/test_tap_sqlite.py new file mode 100644 index 000000000..e2b1940da --- /dev/null +++ b/tests/samples/test_tap_sqlite.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +import json +import typing as t + +from click.testing import CliRunner + +from samples.sample_tap_sqlite import SQLiteTap +from samples.sample_target_csv.csv_target import SampleTargetCSV +from singer_sdk import SQLStream +from singer_sdk._singerlib import MetadataMapping, StreamMetadata +from singer_sdk.testing import ( + get_standard_tap_tests, + tap_to_target_sync_test, +) + +if t.TYPE_CHECKING: + from pathlib import Path + + from singer_sdk.tap_base import SQLTap + + +def _discover_and_select_all(tap: SQLTap) -> None: + """Discover catalog and auto-select all streams.""" + for catalog_entry in tap.catalog_dict["streams"]: + md = MetadataMapping.from_iterable(catalog_entry["metadata"]) + md.root.selected = True + catalog_entry["metadata"] = md.to_list() + + +def test_tap_sqlite_cli(sqlite_sample_db_config: dict[str, t.Any], tmp_path: Path): + runner = CliRunner() + filepath = tmp_path / "config.json" + + with filepath.open("w") as f: + json.dump(sqlite_sample_db_config, f) + + result = runner.invoke( + SQLiteTap.cli, + ["--discover", "--config", str(filepath)], + ) + assert result.exit_code == 0 + + catalog = json.loads(result.stdout) + assert "streams" in catalog + + +def test_sql_metadata(sqlite_sample_tap: SQLTap): + stream = t.cast(SQLStream, sqlite_sample_tap.streams["main-t1"]) + detected_metadata = stream.catalog_entry["metadata"] + detected_root_md = next(md for md in detected_metadata if md["breadcrumb"] == []) + detected_root_md = detected_root_md["metadata"] + translated_metadata = StreamMetadata.from_dict(detected_root_md) + assert detected_root_md["schema-name"] == translated_metadata.schema_name + assert detected_root_md == translated_metadata.to_dict() + md_map = MetadataMapping.from_iterable(stream.catalog_entry["metadata"]) + assert md_map[()].schema_name == "main" + assert md_map[()].table_key_properties == ["c1"] + + +def test_sqlite_discovery(sqlite_sample_tap: SQLTap): + _discover_and_select_all(sqlite_sample_tap) + sqlite_sample_tap.sync_all() + stream = t.cast(SQLStream, sqlite_sample_tap.streams["main-t1"]) + schema = stream.schema + assert len(schema["properties"]) == 2 + assert stream.name == stream.tap_stream_id == "main-t1" + + md_map = MetadataMapping.from_iterable(stream.catalog_entry["metadata"]) + assert md_map[()] is not None + assert md_map[()] is md_map.root + assert md_map[()].schema_name == "main" + + assert stream.metadata.root.schema_name == "main" + assert stream.fully_qualified_name == "main.t1" + + assert stream.metadata.root.table_key_properties == ["c1"] + assert stream.primary_keys == ["c1"] + + +def test_sqlite_input_catalog(sqlite_sample_tap: SQLTap): + sqlite_sample_tap.sync_all() + stream = t.cast(SQLStream, sqlite_sample_tap.streams["main-t1"]) + assert len(stream.schema["properties"]) == 2 + assert len(stream.stream_maps[0].transformed_schema["properties"]) == 2 + + for schema in [stream.schema, stream.stream_maps[0].transformed_schema]: + assert len(schema["properties"]) == 2 + assert schema["properties"]["c1"] == {"type": ["integer", "null"]} + assert schema["properties"]["c2"] == {"type": ["string", "null"]} + assert stream.name == stream.tap_stream_id == "main-t1" + + md_map = MetadataMapping.from_iterable(stream.catalog_entry["metadata"]) + assert md_map[()] is not None + assert md_map[()] is md_map.root + assert md_map[()].schema_name == "main" + + # Fails here (schema is None): + assert stream.metadata.root.schema_name == "main" + assert stream.fully_qualified_name == "main.t1" + + +def test_sqlite_tap_standard_tests(sqlite_sample_tap: SQLTap): + """Run standard tap tests against Countries tap.""" + tests = get_standard_tap_tests( + type(sqlite_sample_tap), + dict(sqlite_sample_tap.config), + ) + for test in tests: + test() + + +def test_sync_sqlite_to_csv(sqlite_sample_tap: SQLTap, tmp_path: Path): + _discover_and_select_all(sqlite_sample_tap) + orig_stdout, _, _, _ = tap_to_target_sync_test( + sqlite_sample_tap, + SampleTargetCSV(config={"target_folder": f"{tmp_path}/"}), + ) diff --git a/tests/core/test_target_end_to_end.py b/tests/samples/test_target_csv.py similarity index 60% rename from tests/core/test_target_end_to_end.py rename to tests/samples/test_target_csv.py index 4803341e7..715edbb65 100644 --- a/tests/core/test_target_end_to_end.py +++ b/tests/samples/test_target_csv.py @@ -1,75 +1,56 @@ """Test tap-to-target sync.""" +from __future__ import annotations -from typing import Any, Dict, List, Optional +import json +import shutil +import typing as t +import uuid +from pathlib import Path +import pytest +from click.testing import CliRunner from freezegun import freeze_time from samples.sample_mapper.mapper import StreamTransform from samples.sample_tap_countries.countries_tap import SampleTapCountries from samples.sample_target_csv.csv_target import SampleTargetCSV -from singer_sdk import typing as th -from singer_sdk.sinks import BatchSink -from singer_sdk.target_base import Target from singer_sdk.testing import ( + get_target_test_class, sync_end_to_end, tap_sync_test, tap_to_target_sync_test, target_sync_test, ) +from tests.conftest import TargetMock -SAMPLE_FILENAME = "/tmp/testfile.countries" -SAMPLE_TAP_CONFIG: Dict[str, Any] = {} -COUNTRIES_STREAM_MAPS_CONFIG: Dict[str, Any] = { - "stream_maps": {"continents": {}, "__else__": None} -} - - -class BatchSinkMock(BatchSink): - """A mock Sink class.""" +TEST_OUTPUT_DIR = Path(f".output/test_{uuid.uuid4()}/") +SAMPLE_CONFIG = {"target_folder": f"{TEST_OUTPUT_DIR}/"} - name = "batch-sink-mock" - def __init__( - self, - target: "TargetMock", - stream_name: str, - schema: Dict, - key_properties: Optional[List[str]], - ): - """Create the Mock batch-based sink.""" - super().__init__(target, stream_name, schema, key_properties) - self.target = target - - def process_record(self, record: dict, context: dict) -> None: - """Tracks the count of processed records.""" - self.target.num_records_processed += 1 - super().process_record(record, context) +StandardTests = get_target_test_class( + target_class=SampleTargetCSV, + config=SAMPLE_CONFIG, +) - def process_batch(self, context: dict) -> None: - """Write to mock trackers.""" - self.target.records_written.extend(context["records"]) - self.target.num_batches_processed += 1 +class TestSampleTargetCSV(StandardTests): + """Standard Target Tests.""" -class TargetMock(Target): - """A mock Target class.""" + @pytest.fixture(scope="class") + def test_output_dir(self): + return TEST_OUTPUT_DIR - name = "target-mock" - config_jsonschema = th.PropertiesList().to_dict() - default_sink_class = BatchSinkMock + @pytest.fixture(scope="class") + def resource(self, test_output_dir): + test_output_dir.mkdir(parents=True, exist_ok=True) + yield test_output_dir + shutil.rmtree(test_output_dir) - def __init__(self): - """Create the Mock target sync.""" - super().__init__(config={}) - self.state_messages_written: List[dict] = [] - self.records_written: List[dict] = [] - self.num_records_processed: int = 0 - self.num_batches_processed: int = 0 - def _write_state_message(self, state: dict): - """Emit the stream's latest state.""" - super()._write_state_message(state) - self.state_messages_written.append(state) +SAMPLE_TAP_CONFIG: dict[str, t.Any] = {} +COUNTRIES_STREAM_MAPS_CONFIG: dict[str, t.Any] = { + "stream_maps": {"continents": {}, "__else__": None}, +} def test_countries_to_csv(csv_config: dict): @@ -102,7 +83,7 @@ def test_target_batching(): countries_record_count = 257 with freeze_time(mocked_starttime): - target = TargetMock() + target = TargetMock(config={}) target.max_parallelism = 1 # Limit unit test to 1 process assert target.num_records_processed == 0 assert len(target.records_written) == 0 @@ -139,5 +120,51 @@ def test_target_batching(): assert target.num_records_processed == countries_record_count * 3 assert len(target.state_messages_written) == 3 assert target.state_messages_written[-1] == { - "bookmarks": {"continents": {}, "countries": {}} + "bookmarks": {"continents": {}, "countries": {}}, } + + +SAMPLE_FILENAME = Path(__file__).parent / Path("./resources/messages.jsonl") +EXPECTED_OUTPUT = """"id" "name" +1 "Chris" +2 "Mike" +""" + + +@pytest.fixture +def target(csv_config: dict): + return SampleTargetCSV(config=csv_config) + + +@pytest.fixture +def cli_runner(): + return CliRunner() + + +@pytest.fixture +def config_file_path(target): + try: + path = Path(target.config["target_folder"]) / "./config.json" + with path.open("w") as f: + f.write(json.dumps(dict(target.config))) + yield path + finally: + path.unlink() + + +def test_input_arg(cli_runner, config_file_path, target): + result = cli_runner.invoke( + target.cli, + [ + "--config", + config_file_path, + "--input", + SAMPLE_FILENAME, + ], + ) + + assert result.exit_code == 0 + + output = Path(target.config["target_folder"]) / "./users.csv" + with output.open() as f: + assert f.read() == EXPECTED_OUTPUT diff --git a/tests/samples/test_target_parquet.py b/tests/samples/test_target_parquet.py new file mode 100644 index 000000000..7f53036a4 --- /dev/null +++ b/tests/samples/test_target_parquet.py @@ -0,0 +1,36 @@ +"""Test class creation.""" +from __future__ import annotations + +import shutil +import uuid +from pathlib import Path + +import pytest + +from samples.sample_target_parquet.parquet_target import SampleTargetParquet +from singer_sdk.testing import get_target_test_class + +SAMPLE_FILEPATH = Path(f".output/test_{uuid.uuid4()}/") +SAMPLE_FILENAME = SAMPLE_FILEPATH / "testfile.parquet" +SAMPLE_CONFIG = { + "filepath": str(SAMPLE_FILENAME), +} + +StandardTests = get_target_test_class( + target_class=SampleTargetParquet, + config=SAMPLE_CONFIG, +) + + +class TestSampleTargetParquet(StandardTests): + """Standard Target Tests.""" + + @pytest.fixture(scope="class") + def test_output_dir(self): + return SAMPLE_FILEPATH + + @pytest.fixture(scope="class") + def resource(self, test_output_dir): + test_output_dir.mkdir(parents=True, exist_ok=True) + yield test_output_dir + shutil.rmtree(test_output_dir) diff --git a/tests/core/test_sqlite.py b/tests/samples/test_target_sqlite.py similarity index 57% rename from tests/core/test_sqlite.py rename to tests/samples/test_target_sqlite.py index b85ff7d67..abf1bccaf 100644 --- a/tests/core/test_sqlite.py +++ b/tests/samples/test_target_sqlite.py @@ -1,86 +1,33 @@ """Typing tests.""" +from __future__ import annotations + import json +import sqlite3 +import typing as t from copy import deepcopy from io import StringIO from pathlib import Path from textwrap import dedent -from typing import Dict, cast from uuid import uuid4 import pytest +import sqlalchemy -from samples.sample_tap_sqlite import SQLiteConnector, SQLiteTap -from samples.sample_target_csv.csv_target import SampleTargetCSV +from samples.sample_tap_hostile import SampleTapHostile +from samples.sample_tap_sqlite import SQLiteTap from samples.sample_target_sqlite import SQLiteSink, SQLiteTarget -from singer_sdk import SQLStream from singer_sdk import typing as th -from singer_sdk.helpers._singer import Catalog, MetadataMapping, StreamMetadata -from singer_sdk.tap_base import SQLTap -from singer_sdk.target_base import SQLTarget from singer_sdk.testing import ( _get_tap_catalog, - get_standard_tap_tests, tap_sync_test, tap_to_target_sync_test, target_sync_test, ) -# Sample DB Setup and Config - - -@pytest.fixture -def path_to_sample_data_db(tmp_path: Path) -> Path: - return tmp_path / Path("foo.db") - - -@pytest.fixture -def sqlite_sample_db_config(path_to_sample_data_db: str) -> dict: - """Get configuration dictionary for target-csv.""" - return {"path_to_db": str(path_to_sample_data_db)} - - -@pytest.fixture -def sqlite_connector(sqlite_sample_db_config) -> SQLiteConnector: - return SQLiteConnector(config=sqlite_sample_db_config) - - -@pytest.fixture -def sqlite_sample_db(sqlite_connector): - """Return a path to a newly constructed sample DB.""" - for t in range(3): - sqlite_connector.connection.execute(f"DROP TABLE IF EXISTS t{t}") - sqlite_connector.connection.execute( - f"CREATE TABLE t{t} (c1 int PRIMARY KEY, c2 varchar(10))" - ) - for x in range(100): - sqlite_connector.connection.execute( - f"INSERT INTO t{t} VALUES ({x}, 'x={x}')" - ) - - -@pytest.fixture -def sqlite_sample_tap(sqlite_sample_db, sqlite_sample_db_config) -> SQLiteTap: - _ = sqlite_sample_db - catalog_obj = Catalog.from_dict( - _get_tap_catalog(SQLiteTap, config=sqlite_sample_db_config, select_all=True) - ) - - # Set stream `t1` to use incremental replication. - t0 = catalog_obj.get_stream("main-t0") - t0.replication_key = "c1" - t0.replication_method = "INCREMENTAL" - t1 = catalog_obj.get_stream("main-t1") - t1.key_properties = ["c1"] - t1.replication_method = "FULL_TABLE" - t2 = catalog_obj.get_stream("main-t2") - t2.key_properties = ["c1"] - t2.replication_key = "c1" - t2.replication_method = "INCREMENTAL" - return SQLiteTap(config=sqlite_sample_db_config, catalog=catalog_obj.to_dict()) - - -# Target Test DB Setup and Config +if t.TYPE_CHECKING: + from singer_sdk.tap_base import SQLTap + from singer_sdk.target_base import SQLTarget @pytest.fixture @@ -97,105 +44,29 @@ def sqlite_target_test_config(path_to_target_db: str) -> dict: @pytest.fixture def sqlite_sample_target(sqlite_target_test_config): """Get a sample target object.""" - return SQLiteTarget(sqlite_target_test_config) + return SQLiteTarget(config=sqlite_target_test_config) @pytest.fixture def sqlite_sample_target_soft_delete(sqlite_target_test_config): """Get a sample target object with hard_delete disabled.""" - conf = sqlite_target_test_config - conf["hard_delete"] = False - - return SQLiteTarget(conf) - + return SQLiteTarget(config={**sqlite_target_test_config, "hard_delete": False}) -def _discover_and_select_all(tap: SQLTap) -> None: - """Discover catalog and auto-select all streams.""" - for catalog_entry in tap.catalog_dict["streams"]: - md = MetadataMapping.from_iterable(catalog_entry["metadata"]) - md.root.selected = True - catalog_entry["metadata"] = md.to_list() +@pytest.fixture +def sqlite_sample_target_batch(sqlite_target_test_config): + """Get a sample target object with hard_delete disabled.""" + conf = sqlite_target_test_config -# SQLite Tap Tests - - -def test_sql_metadata(sqlite_sample_tap: SQLTap): - stream = cast(SQLStream, sqlite_sample_tap.streams["main-t1"]) - detected_metadata = stream.catalog_entry["metadata"] - detected_root_md = [md for md in detected_metadata if md["breadcrumb"] == []][0] - detected_root_md = detected_root_md["metadata"] - translated_metadata = StreamMetadata.from_dict(detected_root_md) - assert detected_root_md["schema-name"] == translated_metadata.schema_name - assert detected_root_md == translated_metadata.to_dict() - md_map = MetadataMapping.from_iterable(stream.catalog_entry["metadata"]) - assert md_map[()].schema_name == "main" - assert md_map[()].table_key_properties == ["c1"] - - -def test_sqlite_discovery(sqlite_sample_tap: SQLTap): - _discover_and_select_all(sqlite_sample_tap) - sqlite_sample_tap.sync_all() - stream = cast(SQLStream, sqlite_sample_tap.streams["main-t1"]) - schema = stream.schema - assert len(schema["properties"]) == 2 - assert stream.name == stream.tap_stream_id == "main-t1" - - md_map = MetadataMapping.from_iterable(stream.catalog_entry["metadata"]) - assert md_map[()] is not None - assert md_map[()] is md_map.root - assert md_map[()].schema_name == "main" - - assert stream.metadata.root.schema_name == "main" - assert stream.fully_qualified_name == "main.t1" - - assert stream.metadata.root.table_key_properties == ["c1"] - assert stream.primary_keys == ["c1"] - - -def test_sqlite_input_catalog(sqlite_sample_tap: SQLTap): - sqlite_sample_tap.sync_all() - stream = cast(SQLStream, sqlite_sample_tap.streams["main-t1"]) - assert len(stream.schema["properties"]) == 2 - assert len(stream.stream_maps[0].transformed_schema["properties"]) == 2 - - for schema in [stream.schema, stream.stream_maps[0].transformed_schema]: - assert len(schema["properties"]) == 2 - assert schema["properties"]["c1"] == {"type": ["integer", "null"]} - assert schema["properties"]["c2"] == {"type": ["string", "null"]} - assert stream.name == stream.tap_stream_id == "main-t1" - - md_map = MetadataMapping.from_iterable(stream.catalog_entry["metadata"]) - assert md_map[()] is not None - assert md_map[()] is md_map.root - assert md_map[()].schema_name == "main" - - # Fails here (schema is None): - assert stream.metadata.root.schema_name == "main" - assert stream.fully_qualified_name == "main.t1" - - -def test_sqlite_tap_standard_tests(sqlite_sample_tap: SQLTap): - """Run standard tap tests against Countries tap.""" - tests = get_standard_tap_tests( - type(sqlite_sample_tap), dict(sqlite_sample_tap.config) - ) - for test in tests: - test() - - -def test_sync_sqlite_to_csv(sqlite_sample_tap: SQLTap, tmp_path: Path): - _discover_and_select_all(sqlite_sample_tap) - orig_stdout, _, _, _ = tap_to_target_sync_test( - sqlite_sample_tap, SampleTargetCSV(config={"target_folder": f"{tmp_path}/"}) - ) + return SQLiteTarget(config=conf) # SQLite Target Tests def test_sync_sqlite_to_sqlite( - sqlite_sample_tap: SQLTap, sqlite_sample_target: SQLTarget + sqlite_sample_tap: SQLTap, + sqlite_sample_target: SQLTarget, ): """End-to-end-to-end test for SQLite tap and target. @@ -208,7 +79,8 @@ def test_sync_sqlite_to_sqlite( STDOUT from the re-tapped target DB. """ orig_stdout, _, _, _ = tap_to_target_sync_test( - sqlite_sample_tap, sqlite_sample_target + sqlite_sample_tap, + sqlite_sample_target, ) orig_stdout.seek(0) tapped_config = dict(sqlite_sample_target.config) @@ -225,21 +97,21 @@ def test_sync_sqlite_to_sqlite( line_num = 0 for line_num, orig_out, new_out in zip( - range(len(orig_lines)), orig_lines, new_lines + range(len(orig_lines)), + orig_lines, + new_lines, ): try: orig_json = json.loads(orig_out) - except json.JSONDecodeError: - raise RuntimeError( - f"Could not parse JSON in orig line {line_num}: {orig_out}" - ) + except json.JSONDecodeError as e: + msg = f"Could not parse JSON in orig line {line_num}: {orig_out}" + raise RuntimeError(msg) from e try: tapped_json = json.loads(new_out) - except json.JSONDecodeError: - raise RuntimeError( - f"Could not parse JSON in new line {line_num}: {new_out}" - ) + except json.JSONDecodeError as e: + msg = f"Could not parse JSON in new line {line_num}: {new_out}" + raise RuntimeError(msg) from e assert ( tapped_json["type"] == orig_json["type"] @@ -256,6 +128,45 @@ def test_sync_sqlite_to_sqlite( assert line_num > 0, "No lines read." +def test_sqlite_schema_addition(sqlite_sample_target: SQLTarget): + """Test that SQL-based targets attempt to create new schema. + + It should attempt to create a schema if one is included in stream name, + e.g. "schema_name-table_name". + """ + schema_name = f"test_schema_{str(uuid4()).split('-')[-1]}" + table_name = f"zzz_tmp_{str(uuid4()).split('-')[-1]}" + test_stream_name = f"{schema_name}-{table_name}" + schema_message = { + "type": "SCHEMA", + "stream": test_stream_name, + "schema": { + "type": "object", + "properties": {"col_a": th.StringType().to_dict()}, + }, + } + tap_output = "\n".join( + json.dumps(msg) + for msg in [ + schema_message, + { + "type": "RECORD", + "stream": test_stream_name, + "record": {"col_a": "samplerow1"}, + }, + ] + ) + # sqlite doesn't support schema creation + with pytest.raises(sqlalchemy.exc.OperationalError) as excinfo: + target_sync_test( + sqlite_sample_target, + input=StringIO(tap_output), + finalize=True, + ) + # check the target at least tried to create the schema + assert excinfo.value.statement == f"CREATE SCHEMA {schema_name}" + + def test_sqlite_column_addition(sqlite_sample_target: SQLTarget): """End-to-end-to-end test for SQLite tap and target. @@ -265,7 +176,7 @@ def test_sqlite_column_addition(sqlite_sample_target: SQLTarget): - Load a dataset with 2 columns. """ test_tbl = f"zzz_tmp_{str(uuid4()).split('-')[-1]}" - props_a: Dict[str, dict] = {"col_a": th.StringType().to_dict()} + props_a: dict[str, dict] = {"col_a": th.StringType().to_dict()} props_b = deepcopy(props_a) props_b["col_b"] = th.IntegerType().to_dict() schema_msg_a, schema_msg_b = ( @@ -302,7 +213,8 @@ def test_sqlite_column_addition(sqlite_sample_target: SQLTarget): def test_sqlite_activate_version( - sqlite_sample_target: SQLTarget, sqlite_sample_target_soft_delete: SQLTarget + sqlite_sample_target: SQLTarget, + sqlite_sample_target_soft_delete: SQLTarget, ): """Test handling the activate_version message for the SQLite target. @@ -334,7 +246,9 @@ def test_sqlite_activate_version( target_sync_test(sqlite_sample_target, input=StringIO(tap_output), finalize=True) target_sync_test( - sqlite_sample_target_soft_delete, input=StringIO(tap_output), finalize=True + sqlite_sample_target_soft_delete, + input=StringIO(tap_output), + finalize=True, ) @@ -349,8 +263,8 @@ def test_sqlite_column_morph(sqlite_sample_target: SQLTarget): supported by SQLite. """ test_tbl = f"zzz_tmp_{str(uuid4()).split('-')[-1]}" - props_a: Dict[str, dict] = {"col_a": th.IntegerType().to_dict()} - props_b: Dict[str, dict] = {"col_a": th.StringType().to_dict()} + props_a: dict[str, dict] = {"col_a": th.IntegerType().to_dict()} + props_b: dict[str, dict] = {"col_a": th.StringType().to_dict()} schema_msg_a, schema_msg_b = ( { "type": "SCHEMA", @@ -384,10 +298,58 @@ def test_sqlite_column_morph(sqlite_sample_target: SQLTarget): with pytest.raises(NotImplementedError): # SQLite does not support altering column types. target_sync_test( - sqlite_sample_target, input=StringIO(tap_output_b), finalize=True + sqlite_sample_target, + input=StringIO(tap_output_b), + finalize=True, ) +def test_sqlite_process_batch_message( + sqlite_target_test_config: dict, + sqlite_sample_target_batch: SQLiteTarget, +): + """Test handling the batch message for the SQLite target. + + Test performs the following actions: + + - Sends a batch message for a table that doesn't exist (which should + have no effect) + """ + schema_message = { + "type": "SCHEMA", + "stream": "users", + "key_properties": ["id"], + "schema": { + "required": ["id"], + "type": "object", + "properties": { + "id": {"type": "integer"}, + "name": {"type": ["null", "string"]}, + }, + }, + } + batch_message = { + "type": "BATCH", + "stream": "users", + "encoding": {"format": "jsonl", "compression": "gzip"}, + "manifest": [ + "file://tests/core/resources/batch.1.jsonl.gz", + "file://tests/core/resources/batch.2.jsonl.gz", + ], + } + tap_output = "\n".join([json.dumps(schema_message), json.dumps(batch_message)]) + + target_sync_test( + sqlite_sample_target_batch, + input=StringIO(tap_output), + finalize=True, + ) + db = sqlite3.connect(sqlite_target_test_config["path_to_db"]) + cursor = db.cursor() + cursor.execute("SELECT COUNT(*) as count FROM users") + assert cursor.fetchone()[0] == 4 + + def test_sqlite_column_no_morph(sqlite_sample_target: SQLTarget): """End-to-end-to-end test for SQLite tap and target. @@ -398,8 +360,8 @@ def test_sqlite_column_no_morph(sqlite_sample_target: SQLTarget): - Ensure int value can still insert. """ test_tbl = f"zzz_tmp_{str(uuid4()).split('-')[-1]}" - props_a: Dict[str, dict] = {"col_a": th.StringType().to_dict()} - props_b: Dict[str, dict] = {"col_a": th.IntegerType().to_dict()} + props_a: dict[str, dict] = {"col_a": th.StringType().to_dict()} + props_b: dict[str, dict] = {"col_a": th.IntegerType().to_dict()} schema_msg_a, schema_msg_b = ( { "type": "SCHEMA", @@ -434,6 +396,35 @@ def test_sqlite_column_no_morph(sqlite_sample_target: SQLTarget): target_sync_test(sqlite_sample_target, input=StringIO(tap_output_b), finalize=True) +def test_record_with_missing_properties( + sqlite_sample_target: SQLTarget, +): + """Test handling of records with missing properties.""" + tap_output = "\n".join( + json.dumps(msg) + for msg in [ + { + "type": "SCHEMA", + "stream": "test_stream", + "schema": { + "type": "object", + "properties": { + "id": {"type": "integer"}, + "name": {"type": "string"}, + }, + }, + "key_properties": ["id"], + }, + { + "type": "RECORD", + "stream": "test_stream", + "record": {"id": 1}, + }, + ] + ) + target_sync_test(sqlite_sample_target, input=StringIO(tap_output), finalize=True) + + @pytest.mark.parametrize( "stream_name,schema,key_properties,expected_dml", [ @@ -451,7 +442,7 @@ def test_sqlite_column_no_morph(sqlite_sample_target: SQLTarget): """\ INSERT INTO test_stream (id, name) - VALUES (:id, :name)""" + VALUES (:id, :name)""", ), ), ], @@ -478,3 +469,42 @@ def test_sqlite_generate_insert_statement( sink.schema, ) assert dml == expected_dml + + +def test_hostile_to_sqlite( + sqlite_sample_target: SQLTarget, + sqlite_target_test_config: dict, +): + tap = SampleTapHostile() + tap_to_target_sync_test(tap, sqlite_sample_target) + # check if stream table was created + db = sqlite3.connect(sqlite_target_test_config["path_to_db"]) + cursor = db.cursor() + cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") + tables = [res[0] for res in cursor.fetchall()] + assert "hostile_property_names_stream" in tables + # check if columns were conformed + cursor.execute( + dedent( + """ + SELECT + p.name as columnName + FROM sqlite_master m + left outer join pragma_table_info((m.name)) p + on m.name <> p.name + where m.name = 'hostile_property_names_stream' + ; + """, + ), + ) + columns = {res[0] for res in cursor.fetchall()} + assert columns == { + "name_with_spaces", + "nameiscamelcase", + "name_with_dashes", + "name_with_dashes_and_mixed_cases", + "gname_starts_with_number", + "fname_starts_with_number", + "hname_starts_with_number", + "name_with_emoji_", + } diff --git a/tests/snapshots/about_format/json.snap.json b/tests/snapshots/about_format/json.snap.json new file mode 100644 index 000000000..e34e3a408 --- /dev/null +++ b/tests/snapshots/about_format/json.snap.json @@ -0,0 +1,32 @@ +{ + "name": "tap-example", + "description": "Example tap for Singer SDK", + "version": "0.1.1", + "sdk_version": "1.0.0", + "supported_python_versions": [ + "3.6", + "3.7", + "3.8" + ], + "capabilities": [ + "catalog", + "discover", + "state" + ], + "settings": { + "properties": { + "start_date": { + "type": "string", + "format": "date-time", + "description": "Start date for the tap to extract data from." + }, + "api_key": { + "type": "string", + "description": "API key for the tap to use." + } + }, + "required": [ + "api_key" + ] + } +} \ No newline at end of file diff --git a/tests/snapshots/about_format/markdown.snap.md b/tests/snapshots/about_format/markdown.snap.md new file mode 100644 index 000000000..75dd4e358 --- /dev/null +++ b/tests/snapshots/about_format/markdown.snap.md @@ -0,0 +1,26 @@ +# `tap-example` + +Example tap for Singer SDK + +Built with the [Meltano Singer SDK](https://sdk.meltano.com). + +## Capabilities + +* `catalog` +* `discover` +* `state` + +## Settings + +| Setting | Required | Default | Description | +|:----------|:--------:|:-------:|:------------| +| start_date| False | None | Start date for the tap to extract data from. | +| api_key | True | None | API key for the tap to use. | + +A full list of supported settings and capabilities is available by running: `tap-example --about` + +## Supported Python Versions + +* 3.6 +* 3.7 +* 3.8 diff --git a/tests/snapshots/about_format/text.snap.txt b/tests/snapshots/about_format/text.snap.txt new file mode 100644 index 000000000..4bac11b66 --- /dev/null +++ b/tests/snapshots/about_format/text.snap.txt @@ -0,0 +1,7 @@ +Name: tap-example +Description: Example tap for Singer SDK +Version: 0.1.1 +SDK Version: 1.0.0 +Supported Python Versions: ['3.6', '3.7', '3.8'] +Capabilities: [catalog, discover, state] +Settings: {'properties': {'start_date': {'type': 'string', 'format': 'date-time', 'description': 'Start date for the tap to extract data from.'}, 'api_key': {'type': 'string', 'description': 'API key for the tap to use.'}}, 'required': ['api_key']} \ No newline at end of file diff --git a/tests/snapshots/countries_write_schemas/countries_write_schemas b/tests/snapshots/countries_write_schemas/countries_write_schemas new file mode 100644 index 000000000..b0808ce23 --- /dev/null +++ b/tests/snapshots/countries_write_schemas/countries_write_schemas @@ -0,0 +1,2 @@ +{"type": "SCHEMA", "stream": "continents", "schema": {"properties": {"code": {"type": ["null", "string"]}, "name": {"type": ["null", "string"]}}, "type": "object"}, "key_properties": ["code"]} +{"type": "SCHEMA", "stream": "countries", "schema": {"properties": {"code": {"type": ["string", "null"]}, "name": {"type": ["string", "null"]}, "native": {"type": ["string", "null"]}, "phone": {"type": ["string", "null"]}, "capital": {"type": ["string", "null"]}, "currency": {"type": ["string", "null"]}, "emoji": {"type": ["string", "null"]}, "continent": {"properties": {"code": {"type": ["string", "null"]}, "name": {"type": ["string", "null"]}}, "type": ["object", "null"]}, "languages": {"items": {"properties": {"code": {"type": ["string", "null"]}, "name": {"type": ["string", "null"]}}, "type": "object"}, "type": ["array", "null"]}}, "type": "object"}, "key_properties": ["code"]} diff --git a/tests/snapshots/jsonschema/additional_properties.json b/tests/snapshots/jsonschema/additional_properties.json new file mode 100644 index 000000000..a5e7aa5d3 --- /dev/null +++ b/tests/snapshots/jsonschema/additional_properties.json @@ -0,0 +1,34 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string", + "null" + ] + }, + "username": { + "type": [ + "string", + "null" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "additionalProperties": { + "type": [ + "string" + ] + } +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/base.json b/tests/snapshots/jsonschema/base.json new file mode 100644 index 000000000..771725769 --- /dev/null +++ b/tests/snapshots/jsonschema/base.json @@ -0,0 +1,29 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string", + "null" + ] + }, + "username": { + "type": [ + "string", + "null" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + } +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/duplicates.json b/tests/snapshots/jsonschema/duplicates.json new file mode 100644 index 000000000..771725769 --- /dev/null +++ b/tests/snapshots/jsonschema/duplicates.json @@ -0,0 +1,29 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string", + "null" + ] + }, + "username": { + "type": [ + "string", + "null" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + } +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/duplicates_additional_properties.json b/tests/snapshots/jsonschema/duplicates_additional_properties.json new file mode 100644 index 000000000..a5e7aa5d3 --- /dev/null +++ b/tests/snapshots/jsonschema/duplicates_additional_properties.json @@ -0,0 +1,34 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string", + "null" + ] + }, + "username": { + "type": [ + "string", + "null" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "additionalProperties": { + "type": [ + "string" + ] + } +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/duplicates_no_additional_properties.json b/tests/snapshots/jsonschema/duplicates_no_additional_properties.json new file mode 100644 index 000000000..cc04f2a37 --- /dev/null +++ b/tests/snapshots/jsonschema/duplicates_no_additional_properties.json @@ -0,0 +1,30 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string", + "null" + ] + }, + "username": { + "type": [ + "string", + "null" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/no_additional_properties.json b/tests/snapshots/jsonschema/no_additional_properties.json new file mode 100644 index 000000000..cc04f2a37 --- /dev/null +++ b/tests/snapshots/jsonschema/no_additional_properties.json @@ -0,0 +1,30 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string", + "null" + ] + }, + "username": { + "type": [ + "string", + "null" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/pattern_properties.json b/tests/snapshots/jsonschema/pattern_properties.json new file mode 100644 index 000000000..aab4147b4 --- /dev/null +++ b/tests/snapshots/jsonschema/pattern_properties.json @@ -0,0 +1,36 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string", + "null" + ] + }, + "username": { + "type": [ + "string", + "null" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "patternProperties": { + "^attr_[a-z]+$": { + "type": [ + "string" + ] + } + } +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/required.json b/tests/snapshots/jsonschema/required.json new file mode 100644 index 000000000..5484f5247 --- /dev/null +++ b/tests/snapshots/jsonschema/required.json @@ -0,0 +1,31 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string" + ] + }, + "username": { + "type": [ + "string" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "email", + "username" + ] +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/required_additional_properties.json b/tests/snapshots/jsonschema/required_additional_properties.json new file mode 100644 index 000000000..17a773ded --- /dev/null +++ b/tests/snapshots/jsonschema/required_additional_properties.json @@ -0,0 +1,36 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string" + ] + }, + "username": { + "type": [ + "string" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "email", + "username" + ], + "additionalProperties": { + "type": [ + "string" + ] + } +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/required_duplicates.json b/tests/snapshots/jsonschema/required_duplicates.json new file mode 100644 index 000000000..5484f5247 --- /dev/null +++ b/tests/snapshots/jsonschema/required_duplicates.json @@ -0,0 +1,31 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string" + ] + }, + "username": { + "type": [ + "string" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "email", + "username" + ] +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/required_duplicates_additional_properties.json b/tests/snapshots/jsonschema/required_duplicates_additional_properties.json new file mode 100644 index 000000000..17a773ded --- /dev/null +++ b/tests/snapshots/jsonschema/required_duplicates_additional_properties.json @@ -0,0 +1,36 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string" + ] + }, + "username": { + "type": [ + "string" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "email", + "username" + ], + "additionalProperties": { + "type": [ + "string" + ] + } +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/required_duplicates_no_additional_properties.json b/tests/snapshots/jsonschema/required_duplicates_no_additional_properties.json new file mode 100644 index 000000000..dcd44ca07 --- /dev/null +++ b/tests/snapshots/jsonschema/required_duplicates_no_additional_properties.json @@ -0,0 +1,32 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string" + ] + }, + "username": { + "type": [ + "string" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "email", + "username" + ], + "additionalProperties": false +} \ No newline at end of file diff --git a/tests/snapshots/jsonschema/required_no_additional_properties.json b/tests/snapshots/jsonschema/required_no_additional_properties.json new file mode 100644 index 000000000..dcd44ca07 --- /dev/null +++ b/tests/snapshots/jsonschema/required_no_additional_properties.json @@ -0,0 +1,32 @@ +{ + "type": "object", + "properties": { + "id": { + "type": [ + "string", + "null" + ] + }, + "email": { + "type": [ + "string" + ] + }, + "username": { + "type": [ + "string" + ] + }, + "phone_number": { + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "email", + "username" + ], + "additionalProperties": false +} \ No newline at end of file diff --git a/tests/snapshots/mapped_stream/aliased_stream.jsonl b/tests/snapshots/mapped_stream/aliased_stream.jsonl new file mode 100644 index 000000000..46d5daffe --- /dev/null +++ b/tests/snapshots/mapped_stream/aliased_stream.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "aliased_stream", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "aliased_stream", "record": {"email": "alice@example.com", "count": 21, "user": {"id": 1, "sub": {"num": 1}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "aliased_stream", "record": {"email": "bob@example.com", "count": 13, "user": {"id": 2, "sub": {"num": 2}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "aliased_stream", "record": {"email": "charlie@example.com", "count": 19, "user": {"id": 3, "sub": {"num": 3}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/changed_key_properties.jsonl b/tests/snapshots/mapped_stream/changed_key_properties.jsonl new file mode 100644 index 000000000..c5168a45b --- /dev/null +++ b/tests/snapshots/mapped_stream/changed_key_properties.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"type": "object", "properties": {"email_hash": {"type": ["string", "null"]}}}, "key_properties": ["email_hash"]} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "c160f8cc69a4f0bf2b0362752353d060"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "4b9bb80620f03eb3719e0a061c14283d"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "426b189df1e2f359efe6ee90f2d2030f"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/drop_property.jsonl b/tests/snapshots/mapped_stream/drop_property.jsonl new file mode 100644 index 000000000..8694f4736 --- /dev/null +++ b/tests/snapshots/mapped_stream/drop_property.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"properties": {"count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"count": 21, "user": {"id": 1, "sub": {"num": 1}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"count": 13, "user": {"id": 2, "sub": {"num": 2}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"count": 19, "user": {"id": 3, "sub": {"num": 3}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/drop_property_null_string.jsonl b/tests/snapshots/mapped_stream/drop_property_null_string.jsonl new file mode 100644 index 000000000..8694f4736 --- /dev/null +++ b/tests/snapshots/mapped_stream/drop_property_null_string.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"properties": {"count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"count": 21, "user": {"id": 1, "sub": {"num": 1}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"count": 13, "user": {"id": 2, "sub": {"num": 2}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"count": 19, "user": {"id": 3, "sub": {"num": 3}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/flatten_all.jsonl b/tests/snapshots/mapped_stream/flatten_all.jsonl new file mode 100644 index 000000000..c54db1563 --- /dev/null +++ b/tests/snapshots/mapped_stream/flatten_all.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user__id": {"type": ["integer", "null"]}, "user__sub__num": {"type": ["integer", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"email": "alice@example.com", "count": 21, "user__id": 1, "user__sub__num": 1}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "bob@example.com", "count": 13, "user__id": 2, "user__sub__num": 2}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "charlie@example.com", "count": 19, "user__id": 3, "user__sub__num": 3}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/flatten_depth_1.jsonl b/tests/snapshots/mapped_stream/flatten_depth_1.jsonl new file mode 100644 index 000000000..275e3295c --- /dev/null +++ b/tests/snapshots/mapped_stream/flatten_depth_1.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user__id": {"type": ["integer", "null"]}, "user__sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"email": "alice@example.com", "count": 21, "user__id": 1, "user__sub": "{\"num\": 1}"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "bob@example.com", "count": 13, "user__id": 2, "user__sub": "{\"num\": 2}"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "charlie@example.com", "count": 19, "user__id": 3, "user__sub": "{\"num\": 3}"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/keep_all_fields.jsonl b/tests/snapshots/mapped_stream/keep_all_fields.jsonl new file mode 100644 index 000000000..13ddce438 --- /dev/null +++ b/tests/snapshots/mapped_stream/keep_all_fields.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}, "email_hash": {"type": ["string", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"email": "alice@example.com", "count": 21, "user": {"id": 1, "sub": {"num": 1}}, "email_hash": "c160f8cc69a4f0bf2b0362752353d060"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "bob@example.com", "count": 13, "user": {"id": 2, "sub": {"num": 2}}, "email_hash": "4b9bb80620f03eb3719e0a061c14283d"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "charlie@example.com", "count": 19, "user": {"id": 3, "sub": {"num": 3}}, "email_hash": "426b189df1e2f359efe6ee90f2d2030f"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/map_and_flatten.jsonl b/tests/snapshots/mapped_stream/map_and_flatten.jsonl new file mode 100644 index 000000000..bf2620184 --- /dev/null +++ b/tests/snapshots/mapped_stream/map_and_flatten.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user__id": {"type": ["integer", "null"]}, "user__sub__num": {"type": ["integer", "null"]}, "email_hash": {"type": ["string", "null"]}}, "type": "object"}, "key_properties": ["email_hash"]} +{"type": "RECORD", "stream": "mystream", "record": {"email": "alice@example.com", "count": 21, "user__id": 1, "user__sub__num": 1, "email_hash": "c160f8cc69a4f0bf2b0362752353d060"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "bob@example.com", "count": 13, "user__id": 2, "user__sub__num": 2, "email_hash": "4b9bb80620f03eb3719e0a061c14283d"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "charlie@example.com", "count": 19, "user__id": 3, "user__sub__num": 3, "email_hash": "426b189df1e2f359efe6ee90f2d2030f"}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/no_map.jsonl b/tests/snapshots/mapped_stream/no_map.jsonl new file mode 100644 index 000000000..019b1f9d9 --- /dev/null +++ b/tests/snapshots/mapped_stream/no_map.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"email": "alice@example.com", "count": 21, "user": {"id": 1, "sub": {"num": 1}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "bob@example.com", "count": 13, "user": {"id": 2, "sub": {"num": 2}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email": "charlie@example.com", "count": 19, "user": {"id": 3, "sub": {"num": 3}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/non_pk_passthrough.jsonl b/tests/snapshots/mapped_stream/non_pk_passthrough.jsonl new file mode 100644 index 000000000..0cbbf451a --- /dev/null +++ b/tests/snapshots/mapped_stream/non_pk_passthrough.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"type": "object", "properties": {"count": {"type": ["integer", "null"]}}}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"count": 21}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"count": 13}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"count": 19}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/only_mapped_fields.jsonl b/tests/snapshots/mapped_stream/only_mapped_fields.jsonl new file mode 100644 index 000000000..e53042958 --- /dev/null +++ b/tests/snapshots/mapped_stream/only_mapped_fields.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"type": "object", "properties": {"email_hash": {"type": ["string", "null"]}, "fixed_count": {"type": ["integer", "null"]}}}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "c160f8cc69a4f0bf2b0362752353d060", "fixed_count": 20}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "4b9bb80620f03eb3719e0a061c14283d", "fixed_count": 12}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "426b189df1e2f359efe6ee90f2d2030f", "fixed_count": 18}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/only_mapped_fields_null_string.jsonl b/tests/snapshots/mapped_stream/only_mapped_fields_null_string.jsonl new file mode 100644 index 000000000..e53042958 --- /dev/null +++ b/tests/snapshots/mapped_stream/only_mapped_fields_null_string.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "mystream", "schema": {"type": "object", "properties": {"email_hash": {"type": ["string", "null"]}, "fixed_count": {"type": ["integer", "null"]}}}, "key_properties": []} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "c160f8cc69a4f0bf2b0362752353d060", "fixed_count": 20}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "4b9bb80620f03eb3719e0a061c14283d", "fixed_count": 12}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "mystream", "record": {"email_hash": "426b189df1e2f359efe6ee90f2d2030f", "fixed_count": 18}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/sourced_stream_1.jsonl b/tests/snapshots/mapped_stream/sourced_stream_1.jsonl new file mode 100644 index 000000000..e63d03815 --- /dev/null +++ b/tests/snapshots/mapped_stream/sourced_stream_1.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "sourced_stream_1", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "sourced_stream_1", "record": {"email": "alice@example.com", "count": 21, "user": {"id": 1, "sub": {"num": 1}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "sourced_stream_1", "record": {"email": "bob@example.com", "count": 13, "user": {"id": 2, "sub": {"num": 2}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "sourced_stream_1", "record": {"email": "charlie@example.com", "count": 19, "user": {"id": 3, "sub": {"num": 3}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/sourced_stream_1_null_string.jsonl b/tests/snapshots/mapped_stream/sourced_stream_1_null_string.jsonl new file mode 100644 index 000000000..e63d03815 --- /dev/null +++ b/tests/snapshots/mapped_stream/sourced_stream_1_null_string.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "sourced_stream_1", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "sourced_stream_1", "record": {"email": "alice@example.com", "count": 21, "user": {"id": 1, "sub": {"num": 1}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "sourced_stream_1", "record": {"email": "bob@example.com", "count": 13, "user": {"id": 2, "sub": {"num": 2}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "sourced_stream_1", "record": {"email": "charlie@example.com", "count": 19, "user": {"id": 3, "sub": {"num": 3}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}} diff --git a/tests/snapshots/mapped_stream/sourced_stream_2.jsonl b/tests/snapshots/mapped_stream/sourced_stream_2.jsonl new file mode 100644 index 000000000..41cce23d7 --- /dev/null +++ b/tests/snapshots/mapped_stream/sourced_stream_2.jsonl @@ -0,0 +1,6 @@ +{"type": "STATE", "value": {}} +{"type": "SCHEMA", "stream": "sourced_stream_2", "schema": {"properties": {"email": {"type": ["string", "null"]}, "count": {"type": ["integer", "null"]}, "user": {"properties": {"id": {"type": ["integer", "null"]}, "sub": {"properties": {"num": {"type": ["integer", "null"]}}, "type": ["object", "null"]}}, "type": ["object", "null"]}}, "type": "object"}, "key_properties": []} +{"type": "RECORD", "stream": "sourced_stream_2", "record": {"email": "alice@example.com", "count": 21, "user": {"id": 1, "sub": {"num": 1}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "sourced_stream_2", "record": {"email": "bob@example.com", "count": 13, "user": {"id": 2, "sub": {"num": 2}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "RECORD", "stream": "sourced_stream_2", "record": {"email": "charlie@example.com", "count": 19, "user": {"id": 3, "sub": {"num": 3}}}, "time_extracted": "2022-01-01T00:00:00+00:00"} +{"type": "STATE", "value": {"bookmarks": {"mystream": {}}}}