diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7fdb857095..de9ba3afbe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,12 +1,21 @@ name: Build on: + # Run on pushes to master and pushed tags, and on pull requests against master, but ignore the docs folder push: branches: [ master ] tags: - 'v*' + paths: + - '**' + - '!docs/**' + - '.github/**' pull_request: branches: [ master ] + paths: + - '**' + - '!docs/**' + - '.github/**' jobs: build-client: @@ -101,24 +110,23 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - with: - repository: 'PhotonVision/photonvision-docs.git' - ref: master - uses: actions/setup-python@v5 with: - python-version: '3.9' + python-version: '3.11' - name: Install dependencies + working-directory: docs run: | python -m pip install --upgrade pip pip install sphinx sphinx_rtd_theme sphinx-tabs sphinxext-opengraph doc8 pip install -r requirements.txt - name: Build the docs + working-directory: docs run: | make html - uses: actions/upload-artifact@v4 with: name: built-docs - path: build/html + path: docs/build/html build-photonlib-host: env: MACOSX_DEPLOYMENT_TARGET: 13 diff --git a/.github/workflows/lint-format.yml b/.github/workflows/lint-format.yml index 0fd220a02e..fb97eb77af 100644 --- a/.github/workflows/lint-format.yml +++ b/.github/workflows/lint-format.yml @@ -1,12 +1,21 @@ name: Lint and Format on: + # Run on pushes to master and pushed tags, and on pull requests against master, but ignore the docs folder push: branches: [ master ] tags: - 'v*' + paths: + - '**' + - '!docs/**' + - '.github/**' pull_request: branches: [ master ] + paths: + - '**' + - '!docs/**' + - '.github/**' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} @@ -26,9 +35,9 @@ jobs: - name: Set up Python 3.8 uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: 3.11 - name: Install wpiformat - run: pip3 install wpiformat + run: pip3 install wpiformat==2024.37 - name: Run run: wpiformat - name: Check output diff --git a/.github/workflows/documentation.yml b/.github/workflows/photon-code-docs.yml similarity index 80% rename from .github/workflows/documentation.yml rename to .github/workflows/photon-code-docs.yml index 67d9cd203a..0eb7c50664 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/photon-code-docs.yml @@ -1,12 +1,21 @@ -name: Documentation +name: Photon Code Documentation on: + # Run on pushes to master and pushed tags, and on pull requests against master, but ignore the docs folder push: - # For now, run on all commits to master branches: [ master ] - # and also all tags starting with v tags: - 'v*' + paths: + - '**' + - '!docs/**' + - '.github/**' + pull_request: + branches: [ master ] + paths: + - '**' + - '!docs/**' + - '.github/**' # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: @@ -58,12 +67,12 @@ jobs: - name: Build javadocs/doxygen run: | chmod +x gradlew - ./gradlew docs:generateJavaDocs docs:doxygen + ./gradlew photon-docs:generateJavaDocs photon-docs:doxygen - uses: actions/upload-artifact@v4 with: name: built-docs - path: docs/build/docs + path: photon-docs/build/docs release: needs: [build-client, run_docs] @@ -76,6 +85,7 @@ jobs: - run: find . - name: copy file via ssh password + if: github.ref == 'refs/heads/master' uses: appleboy/scp-action@v0.1.7 with: host: ${{ secrets.WEBMASTER_SSH_HOST }} diff --git a/.github/workflows/photonvision-docs.yml b/.github/workflows/photonvision-docs.yml new file mode 100644 index 0000000000..e6cfd80bfb --- /dev/null +++ b/.github/workflows/photonvision-docs.yml @@ -0,0 +1,46 @@ +name: PhotonVision Sphinx Documentation Checks + +on: + push: + branches: [ master ] + paths: + - 'docs/**' + - '.github/**' + pull_request: + branches: [ master ] + paths: + - 'docs/**' + - '.github/**' + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install and upgrade pip + run: python -m pip install --upgrade pip + + - name: Install Python dependencies + working-directory: docs + run: | + pip install sphinx sphinx_rtd_theme sphinx-tabs sphinxext-opengraph doc8 + pip install -r requirements.txt + + - name: Check links + working-directory: docs + run: make linkcheck + continue-on-error: true + + - name: Check lint + working-directory: docs + run: make lint + + - name: Compile HTML + working-directory: docs + run: make html diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 2fdbda352c..525824cfdb 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -8,8 +8,16 @@ on: branches: [ master ] tags: - 'v*' + paths: + - '**' + - '!docs/**' + - '.github/**' pull_request: branches: [ master ] + paths: + - '**' + - '!docs/**' + - '.github/**' jobs: buildAndDeploy: diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000000..f79cf99890 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,11 @@ +build/* +.DS_Store +.vscode/* +.idea/* +source/_build +source/_build +photon-docs/build +source/docs/_build + +venv/* +.venv/* diff --git a/docs/.readthedocs.yml b/docs/.readthedocs.yml new file mode 100644 index 0000000000..3b80b77b96 --- /dev/null +++ b/docs/.readthedocs.yml @@ -0,0 +1,15 @@ +version: 2 + +sphinx: + builder: html + configuration: source/conf.py + fail_on_warning: true + +build: + os: ubuntu-22.04 + tools: + python: "3.11" + +python: + install: + - requirements: requirements.txt diff --git a/docs/.styleguide b/docs/.styleguide new file mode 100644 index 0000000000..5d98301f88 --- /dev/null +++ b/docs/.styleguide @@ -0,0 +1,16 @@ + +modifiableFileExclude { + \.jpg$ + \.jpeg$ + \.png$ + \.gif$ + \.so$ + \.pdf$ + \.mp4$ + \.dll$ + \.webp$ + \.ico$ + \.rknn$ + \.svg$ + gradlew +} diff --git a/docs/LICENSE b/docs/LICENSE new file mode 100644 index 0000000000..2f244ac814 --- /dev/null +++ b/docs/LICENSE @@ -0,0 +1,395 @@ +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..f2ae9eb31a --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,24 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = -W --keep-going +SPHINXBUILD = sphinx-build +SOURCEDIR = source +LINTER = doc8 +LINTEROPTS = --ignore D001 # D001 is linelength +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +lint: + @$(LINTER) $(LINTEROPTS) $(SOURCEDIR) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.MD b/docs/README.MD new file mode 100644 index 0000000000..1e7a067ea9 --- /dev/null +++ b/docs/README.MD @@ -0,0 +1,9 @@ +# PhotonVision ReadTheDocs + +[![Documentation Status](https://readthedocs.org/projects/photonvision-docs/badge/?version=latest)](https://docs.photonvision.org/en/latest/?badge=latest) + +PhotonVision is a free open-source vision processing software for FRC teams. + +This repository is the source code for our ReadTheDocs documentation, which can be found [here](https://docs.photonvision.org). + +[Contribution and formatting guidelines for this project](https://docs.photonvision.org/en/latest/docs/contributing/photonvision-docs/index.html) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000000..770cb6bc6c --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build +set SPHINXOPTS=-W --keep-going + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000..582e43a6c3 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,42 @@ +alabaster==0.7.13 +Babel==2.13.1 +beautifulsoup4==4.12.2 +certifi==2023.11.17 +charset-normalizer==3.3.2 +colorama==0.4.6 +doc8==0.11.2 +docopt==0.6.2 +docutils==0.18.1 +furo==2023.9.10 +idna==3.4 +imagesize==1.4.1 +Jinja2==3.0.3 +MarkupSafe==2.1.3 +packaging==23.2 +pbr==6.0.0 +pipreqs==0.4.13 +Pygments==2.17.1 +requests==2.31.0 +restructuredtext-lint==1.4.0 +six==1.16.0 +snowballstemmer==2.2.0 +soupsieve==2.5 +Sphinx==7.2.6 +sphinx-basic-ng==1.0.0b2 +sphinx-notfound-page==1.0.0 +sphinx-rtd-theme==1.3.0 +sphinx-tabs==3.4.4 +sphinx_design==0.5.0 +sphinxcontrib-applehelp==1.0.7 +sphinxcontrib-devhelp==1.0.5 +sphinxcontrib-ghcontributors==0.2.3 +sphinxcontrib-htmlhelp==2.0.4 +sphinxcontrib-jquery==4.1 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.6 +sphinxcontrib-serializinghtml==1.1.9 +sphinxext-opengraph==0.9.0 +sphinxext-remoteliteralinclude==0.4.0 +stevedore==5.1.0 +urllib3==2.1.0 +yarg==0.1.9 diff --git a/docs/source/404.rst b/docs/source/404.rst new file mode 100644 index 0000000000..ab9b20f84e --- /dev/null +++ b/docs/source/404.rst @@ -0,0 +1,6 @@ +:orphan: + +Requested Page Not Found +======================== + +This page you were looking for was not found. If you think this is a mistake, `file an issue on our GitHub. `__ diff --git a/docs/source/Makefile b/docs/source/Makefile new file mode 100644 index 0000000000..d0c3cbf102 --- /dev/null +++ b/docs/source/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/source/_static/assets/3d2019.mp4 b/docs/source/_static/assets/3d2019.mp4 new file mode 100644 index 0000000000..a41a9d720e Binary files /dev/null and b/docs/source/_static/assets/3d2019.mp4 differ diff --git a/docs/source/_static/assets/3d2020.mp4 b/docs/source/_static/assets/3d2020.mp4 new file mode 100644 index 0000000000..1033dc165e Binary files /dev/null and b/docs/source/_static/assets/3d2020.mp4 differ diff --git a/docs/source/_static/assets/AreaRatioFullness.mp4 b/docs/source/_static/assets/AreaRatioFullness.mp4 new file mode 100644 index 0000000000..01585b5464 Binary files /dev/null and b/docs/source/_static/assets/AreaRatioFullness.mp4 differ diff --git a/docs/source/_static/assets/PhotonVision-Header-noBG.png b/docs/source/_static/assets/PhotonVision-Header-noBG.png new file mode 100644 index 0000000000..64a7f267d0 Binary files /dev/null and b/docs/source/_static/assets/PhotonVision-Header-noBG.png differ diff --git a/docs/source/_static/assets/PhotonVision-Header-onWhite.png b/docs/source/_static/assets/PhotonVision-Header-onWhite.png new file mode 100644 index 0000000000..d35c18c419 Binary files /dev/null and b/docs/source/_static/assets/PhotonVision-Header-onWhite.png differ diff --git a/docs/source/_static/assets/RoundLogo.png b/docs/source/_static/assets/RoundLogo.png new file mode 100644 index 0000000000..e6be308650 Binary files /dev/null and b/docs/source/_static/assets/RoundLogo.png differ diff --git a/docs/source/_static/assets/RoundLogoLight.png b/docs/source/_static/assets/RoundLogoLight.png new file mode 100644 index 0000000000..ee460b28ff Binary files /dev/null and b/docs/source/_static/assets/RoundLogoLight.png differ diff --git a/docs/source/_static/assets/calibration_small.mp4 b/docs/source/_static/assets/calibration_small.mp4 new file mode 100644 index 0000000000..eed1054479 Binary files /dev/null and b/docs/source/_static/assets/calibration_small.mp4 differ diff --git a/docs/source/_static/assets/colorPicker.mp4 b/docs/source/_static/assets/colorPicker.mp4 new file mode 100644 index 0000000000..500b657d9e Binary files /dev/null and b/docs/source/_static/assets/colorPicker.mp4 differ diff --git a/docs/source/_static/assets/groupingSorting.mp4 b/docs/source/_static/assets/groupingSorting.mp4 new file mode 100644 index 0000000000..e8f3b1d61a Binary files /dev/null and b/docs/source/_static/assets/groupingSorting.mp4 differ diff --git a/docs/source/_static/assets/import-export-settings.mp4 b/docs/source/_static/assets/import-export-settings.mp4 new file mode 100644 index 0000000000..d020f989db Binary files /dev/null and b/docs/source/_static/assets/import-export-settings.mp4 differ diff --git a/docs/source/_static/assets/logGui.mp4 b/docs/source/_static/assets/logGui.mp4 new file mode 100644 index 0000000000..333b04fe6b Binary files /dev/null and b/docs/source/_static/assets/logGui.mp4 differ diff --git a/docs/source/_static/assets/objdetectFiltering.mp4 b/docs/source/_static/assets/objdetectFiltering.mp4 new file mode 100644 index 0000000000..4a328df9a8 Binary files /dev/null and b/docs/source/_static/assets/objdetectFiltering.mp4 differ diff --git a/docs/source/_static/assets/offsetandmultiple.mp4 b/docs/source/_static/assets/offsetandmultiple.mp4 new file mode 100644 index 0000000000..555c094573 Binary files /dev/null and b/docs/source/_static/assets/offsetandmultiple.mp4 differ diff --git a/docs/source/_static/assets/simaimandrange.mp4 b/docs/source/_static/assets/simaimandrange.mp4 new file mode 100644 index 0000000000..a4815408e0 Binary files /dev/null and b/docs/source/_static/assets/simaimandrange.mp4 differ diff --git a/docs/source/_static/assets/tuningHueSatVal.mp4 b/docs/source/_static/assets/tuningHueSatVal.mp4 new file mode 100644 index 0000000000..23bf6009f8 Binary files /dev/null and b/docs/source/_static/assets/tuningHueSatVal.mp4 differ diff --git a/docs/source/_static/css/pv-icons.css b/docs/source/_static/css/pv-icons.css new file mode 100644 index 0000000000..0dd7bbcbf0 --- /dev/null +++ b/docs/source/_static/css/pv-icons.css @@ -0,0 +1,17 @@ +/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */ + +@font-face { + font-family: FontAwesome; + src: url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713); + src: url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"), url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"), url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"), url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"), url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg"); + font-weight: 400; + font-style:normal +} + +.code-block-caption>.headerlink, dl dt>.headerlink, h1>.headerlink, h2>.headerlink, h3>.headerlink, h4>.headerlink, h5>.headerlink, h6>.headerlink, p.caption>.headerlink, table>caption>.headerlink { + font-family: FontAwesome; + font-size: 0.75em; +} diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html new file mode 100644 index 0000000000..0a48581e46 --- /dev/null +++ b/docs/source/_templates/layout.html @@ -0,0 +1,74 @@ +{# Import the theme's layout. #} +{% extends '!layout.html' %} + +{%- block extrahead %} + + +{# Call the parent block #} +{{ super() }} +{% endblock %} + +{%- block extrafooter %} +{# Add custom things to the head HTML tag #} + +
+ + +
+ +
+ + +
+ + + +{# Call the parent block #} +{{ super() }} +{%- endblock %} diff --git a/docs/source/assets/PhotonVision-Header-noBG.png b/docs/source/assets/PhotonVision-Header-noBG.png new file mode 100644 index 0000000000..64a7f267d0 Binary files /dev/null and b/docs/source/assets/PhotonVision-Header-noBG.png differ diff --git a/docs/source/assets/PhotonVision-Header-onWhite.png b/docs/source/assets/PhotonVision-Header-onWhite.png new file mode 100644 index 0000000000..d35c18c419 Binary files /dev/null and b/docs/source/assets/PhotonVision-Header-onWhite.png differ diff --git a/docs/source/assets/RectLogo.png b/docs/source/assets/RectLogo.png new file mode 100644 index 0000000000..340f6de5b8 Binary files /dev/null and b/docs/source/assets/RectLogo.png differ diff --git a/docs/source/assets/RoundLogo.png b/docs/source/assets/RoundLogo.png new file mode 100644 index 0000000000..e6be308650 Binary files /dev/null and b/docs/source/assets/RoundLogo.png differ diff --git a/docs/source/assets/RoundLogoWhite.png b/docs/source/assets/RoundLogoWhite.png new file mode 100644 index 0000000000..ee460b28ff Binary files /dev/null and b/docs/source/assets/RoundLogoWhite.png differ diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000000..cfc08effe9 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,126 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +# -- Project information ----------------------------------------------------- + +project = "PhotonVision" +copyright = "2024, PhotonVision" +author = "Banks Troutman, Matt Morley" + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx_rtd_theme", + "sphinx.ext.autosectionlabel", + "sphinx.ext.todo", + "sphinx_tabs.tabs", + "notfound.extension", + "sphinxext.remoteliteralinclude", + "sphinxext.opengraph", + "sphinxcontrib.ghcontributors", + "sphinx_design", +] + +# Configure OpenGraph support + +ogp_site_url = "https://docs.photonvision.org/en/latest/" +ogp_site_name = "PhotonVision Documentation" +ogp_image = "https://raw.githubusercontent.com/PhotonVision/photonvision-docs/master/source/assets/RectLogo.png" + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +# Enable hover content on glossary term +hoverxref_roles = ["term"] + +# Autosection labels prefix document path and filename +autosectionlabel_prefix_document = True + +# -- Options for HTML output ------------------------------------------------- + +html_title = "PhotonVision Docs" + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "furo" +html_favicon = "assets/RoundLogo.png" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + + +def setup(app): + app.add_css_file("css/pv-icons.css") + + +pygments_style = "sphinx" + +html_theme_options = { + "sidebar_hide_name": True, + "light_logo": "assets/PhotonVision-Header-onWhite.png", + "dark_logo": "assets/PhotonVision-Header-noBG.png", + "light_css_variables": { + "font-stack": "-apple-system, BlinkMacSystemFont, avenir next, avenir, segoe ui, helvetica neue, helvetica, Ubuntu, roboto, noto, arial, sans-serif;", + "admonition-font-size": "1rem", + "admonition-title-font-size": "1rem", + "color-background-primary": "#ffffff", + "color-background-secondary": "#f7f7f7", + "color-background-hover": "#efeff400", + "color-background-hover--transparent": "#efeff400", + "color-brand-primary": "#006492", + "color-brand-content": "#006492", + "color-foreground-primary": "#2d2d2d", + "color-foreground-secondary": "#39a4d5", + "color-foreground-muted": "#2d2d2d", + "color-foreground-border": "#ffffff", + "color-background-border": "ffffff", + "color-api-overall": "#101010", + }, + "dark_css_variables": { + "color-background-primary": "#242c37", + "color-background-secondary": "#006492", + "color-background-hover": "#efeff400", + "color-background-hover--transparent": "#efeff400", + "color-brand-primary": "#ffd843", + "color-brand-secondary": "#39a4d5", + "color-brand-content": "#ffd843", + "color-foreground-primary": "#ffffff", + "color-foreground-secondary": "#ffffff", + "color-foreground-muted": "#ffffff", + "color-foreground-border": "transparent", + "color-background-border": "transparent", + "color-api-overall": "#101010", + "color-inline-code-background": "#0d0d0d", + }, +} + +suppress_warnings = ["epub.unknown_project_files"] + +sphinx_tabs_valid_builders = ["epub", "linkcheck"] + +# Excluded links for linkcheck +# These should be periodically checked by hand to ensure that they are still functional +linkcheck_ignore = ["https://www.raspberrypi.com/software/"] diff --git a/docs/source/docs/additional-resources/best-practices.rst b/docs/source/docs/additional-resources/best-practices.rst new file mode 100644 index 0000000000..075d083649 --- /dev/null +++ b/docs/source/docs/additional-resources/best-practices.rst @@ -0,0 +1,32 @@ +Best Practices For Competition +============================== + +Before Competition +------------------ +* Ensure you have spares of the relevant electronics if you can afford it (switch, coprocessor, cameras, etc.). +* Download the latest release .jar onto your computer and update your Pi if necessary (only update if the release is labeled "critical" or similar, we do not recommend updating right before an event in case there are unforeseen bugs). +* Test out PhotonVision at your home setup. +* Ensure that you have set up SmartDashboard / Shuffleboard to view your camera streams during matches. +* Follow all the recommendations under the Networking section in installation (network switch and static IP). +* Use high quality ethernet cables that have been rigorously tested. +* Set up port forwarding using the guide in the Networking section in installation. + +During the Competition +---------------------- +* Make sure you take advantage of the field calibration time given at the start of the event: + * Bring your robot to the field at the allotted time. + * Turn on your robot and pull up the dashboard on your driver station. + * Point your robot at the target(s) and ensure you get a consistent tracking (you hold one target consistently, the ceiling lights aren't detected, etc.). + * If you have problems with your pipeline, go to the pipeline tuning section and retune the pipeline using the guide there. You want to make your exposure as low as possible with a tight hue value to ensure no extra targets are detected. + * Move the robot close, far, angled, and around the field to ensure no extra targets are found anywhere when looking for a target. + * Go to a practice match to ensure everything is working correctly. + +* After field calibration, use the "Export Settings" button in the "Settings" page to create a backup. + * Do this for each coprocessor on your robot that runs PhotonVision, and name your exports with meaningful names. + * This will contain camera information/calibration, pipeline information, network settings, etc. + * In the event of software/hardware failures (IE lost SD Card, broken device), you can then use the "Import Settings" button and select "All Settings" to restore your settings. + * This effectively works as a snapshot of your PhotonVision data that can be restored at any point. + +* Before every match, check the ethernet connection going into your coprocessor and that it is seated fully. +* Ensure that exposure is as low as possible and that you don't have the dashboard up when you don't need it to reduce bandwidth. +* Stream at as low of a resolution as possible while still detecting targets to stay within bandwidth limits. diff --git a/docs/source/docs/additional-resources/config.rst b/docs/source/docs/additional-resources/config.rst new file mode 100644 index 0000000000..480b9a04bf --- /dev/null +++ b/docs/source/docs/additional-resources/config.rst @@ -0,0 +1,54 @@ +Filesystem Directory +==================== + +PhotonVision stores and loads settings in the :code:`photonvision_config` directory, in the same folder as the PhotonVision JAR is stored. On the Pi image as well as the Gloworm, this is in the :code:`/opt/photonvision` directory. The contents of this directory can be exported as a zip archive from the settings page of the interface, under "export settings". This export will contain everything detailed below. These settings can later be uploaded using "import settings", to restore configurations from previous backups. + + +Directory Structure +------------------- + +The directory structure is outlined below. + +.. image:: images/configDir.png + :width: 600 + :alt: Config directory structure + +* calibImgs + - Images saved from the last run of the calibration routine +* cameras + - Contains a subfolder for each camera. This folder contains the following files: + + pipelines folder, which contains a :code:`json` file for each user-created pipeline. + + config.json, which contains all camera-specific configuration. This includes FOV, pitch, current pipeline index, and calibration data + + drivermode.json, which contains settings for the driver mode pipeline +* imgSaves + - Contains images saved with the input/output save commands. +* logs + - Contains timestamped logs in the format :code:`photonvision-YYYY-MM-D_HH-MM-SS.log`. Note that on Pi or Gloworm these timestamps will likely be significantly behind the real time. +* hardwareSettings.json + - Contains hardware settings. Currently this includes only the LED brightness. +* networkSettings.json + - Contains network settings, including team number (or remote network tables address), static/dynamic settings, and hostname. + +Importing and Exporting Settings +-------------------------------- + +The entire settings directory can be exported as a ZIP archive from the settings page. + + +.. raw:: html + + + +A variety of files can be imported back into PhotonVision: + +- ZIP Archive (:code:`.zip`) + - Useful for restoring a full configuration from a different PhotonVision instance. +- Single Config File + - Currently-supported Files + - :code:`hardwareConfig.json` + - :code:`hardwareSettings.json` + - :code:`networkSettings.json` + - Useful for simple hardware or network configuration tasks without overwriting all settings. diff --git a/docs/source/docs/additional-resources/images/configDir.png b/docs/source/docs/additional-resources/images/configDir.png new file mode 100644 index 0000000000..491179420b Binary files /dev/null and b/docs/source/docs/additional-resources/images/configDir.png differ diff --git a/docs/source/docs/additional-resources/nt-api.rst b/docs/source/docs/additional-resources/nt-api.rst new file mode 100644 index 0000000000..c52ac70d83 --- /dev/null +++ b/docs/source/docs/additional-resources/nt-api.rst @@ -0,0 +1,86 @@ +NetworkTables API +================= +About +^^^^^ + +.. warning:: + PhotonVision interfaces with PhotonLib, our vendor dependency, using NetworkTables. If you are running PhotonVision on a robot (ie. with a RoboRIO), you should **turn the NetworkTables server switch (in the settings tab) off** in order to get PhotonLib to work. Also ensure that you set your team number. The NetworkTables server should only be enabled if you know what you're doing! + +API +^^^ + +.. warning:: NetworkTables is not a supported setup/viable option when using PhotonVision as we only send one target at a time (this is problematic when using AprilTags, which will return data from multiple tags at once). We recommend using PhotonLib. + +The tables below contain the the name of the key for each entry that PhotonVision sends over the network and a short description of the key. The entries should be extracted from a subtable with your camera's nickname (visible in the PhotonVision UI) under the main ``photonvision`` table. + +Getting Target Information +-------------------------- ++-------------------+--------------+--------------------------------------------------------------------------+ +| Key | Type | Description | ++===================+==============+==========================================================================+ +| ``rawBytes`` | ``byte[]`` | A byte-packed string that contains target info from the same timestamp. | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``latencyMillis`` | ``double`` | The latency of the pipeline in milliseconds. | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``hasTarget`` | ``boolean`` | Whether the pipeline is detecting targets or not. | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``targetPitch`` | ``double`` | The pitch of the target in degrees (positive up). | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``targetYaw`` | ``double`` | The yaw of the target in degrees (positive right). | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``targetArea`` | ``double`` | The area (percent of bounding box in screen) as a percent (0-100). | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``targetSkew`` | ``double`` | The skew of the target in degrees (counter-clockwise positive). | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``targetPose`` | ``double[]`` | The pose of the target relative to the robot (x, y, z, qw, qx, qy, qz) | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``targetPixelsX`` | ``double`` | The target crosshair location horizontally, in pixels (origin top-right) | ++-------------------+--------------+--------------------------------------------------------------------------+ +| ``targetPixelsY`` | ``double`` | The target crosshair location vertically, in pixels (origin top-right) | ++-------------------+--------------+--------------------------------------------------------------------------+ + +Changing Settings +----------------- ++-------------------+-------------+-----------------------------+ +| Key | Type | Description | ++===================+=============+=============================+ +| ``pipelineIndex`` | ``int`` | Changes the pipeline index. | ++-------------------+-------------+-----------------------------+ +| ``driverMode`` | ``boolean`` | Toggles driver mode. | ++-------------------+-------------+-----------------------------+ + + +Saving Images +----------------- +PhotonVision can save images to file on command. The image is saved when PhotonVision detects the command went from ``false`` to ``true``. + +PhotonVision will automatically set these back to ``false`` after 500ms. + +Be careful saving images rapidly - it will slow vision processing performance and take up disk space very quickly. + +Images are returned as part of the .zip package from the "Export" operation in the Settings tab. + ++----------------------+-------------+----------------------------------------------------+ +| Key | Type | Description | ++======================+=============+====================================================+ +| ``inputSaveImgCmd`` | ``boolean`` | Triggers saving the current input image to file. | ++----------------------+-------------+----------------------------------------------------+ +| ``outputSaveImgCmd`` | ``boolean`` | Triggers saving the current output image to file. | ++----------------------+-------------+----------------------------------------------------+ + +.. warning:: If you manage to make calls to these commands faster than 500ms (between calls), additional photos will not be captured. + +Global Entries +-------------- +These entries are global, meaning that they should be called on the main ``photonvision`` table. + ++-------------+---------+----------------------------------------------------------+ +| Key | Type | Description | ++=============+=========+==========================================================+ +| ``ledMode`` | ``int`` | Sets the LED Mode (-1: default, 0: off, 1: on, 2: blink) | ++-------------+---------+----------------------------------------------------------+ + +.. warning:: + Setting the LED mode to -1 (default) when `multiple` cameras are connected may result in unexpected behavior. :ref:`This is a known limitation of PhotonVision. ` + + Single camera operation should work without issue. diff --git a/docs/source/docs/apriltag-pipelines/2D-tracking-tuning.rst b/docs/source/docs/apriltag-pipelines/2D-tracking-tuning.rst new file mode 100644 index 0000000000..b8ee6441f9 --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/2D-tracking-tuning.rst @@ -0,0 +1,66 @@ +2D AprilTag Tuning / Tracking +============================= + +Tracking Apriltags +------------------ + +Before you get started tracking AprilTags, ensure that you have followed the previous sections on installation, wiring and networking. Next, open the Web UI, go to the top right card, and switch to the "AprilTag" or "Aruco" type. You should see a screen similar to the one below. + +.. image:: images/apriltag.png + :align: center + +| + +You are now able to detect and track AprilTags in 2D (yaw, pitch, roll, etc.). In order to get 3D data from your AprilTags, please see :ref:`here. ` + +Tuning AprilTags +---------------- + +AprilTag pipelines come with reasonable defaults to get you up and running with tracking. However, in order to optimize your performance and accuracy, you must tune your AprilTag pipeline using the settings below. Note that the settings below are different between the AprilTag and Aruco detectors but the concepts are the same. + +.. image:: images/apriltag-tune.png + :scale: 45 % + :align: center + +| + +Target Family +^^^^^^^^^^^^^ + +Target families are defined by two numbers (before and after the h). The first number is the number of bits the tag is able to encode (which means more tags are available in the respective family) and the second is the hamming distance. Hamming distance describes the ability for error correction while identifying tag ids. A high hamming distance generally means that it will be easier for a tag to be identified even if there are errors. However, as hamming distance increases, the number of available tags decreases. The 2024 FRC game will be using 36h11 tags, which can be found `here `_. + +Decimate +^^^^^^^^ + +Decimation (also known as down-sampling) is the process of reducing the sampling frequency of a signal (in our case, the image). Increasing decimate will lead to an increased detection rate while decreasing detection distance. We recommend keeping this at the default value. + +Blur +^^^^ +This controls the sigma of Gaussian blur for tag detection. In clearer terms, increasing blur will make the image blurrier, decreasing it will make it closer to the original image. We strongly recommend that you keep blur to a minimum (0) due to it's high performance intensity unless you have an extremely noisy image. + + +Threads +^^^^^^^ + +Threads refers to the threads within your coprocessor's CPU. The theoretical maximum is device dependent, but we recommend that users to stick to one less than the amount of CPU threads that your coprocessor has. Increasing threads will increase performance at the cost of increased CPU load, temperature increase, etc. It may take some experimentation to find the most optimal value for your system. + +Refine Edges +^^^^^^^^^^^^ + +The edges of the each polygon are adjusted to "snap to" high color differences surrounding it. It is recommended to use this in tandem with decimate as it can increase the quality of the initial estimate. + +Pose Iterations +^^^^^^^^^^^^^^^ + +Pose iterations represents the amount of iterations done in order for the AprilTag algorithm to converge on its pose solution(s). A smaller number between 0-100 is recommended. A smaller amount of iterations cause a more noisy set of poses when looking at the tag straight on, while higher values much more consistently stick to a (potentially wrong) pair of poses. WPILib contains many useful filter classes in order to account for a noisy tag reading. + +Max Error Bits +^^^^^^^^^^^^^^ + +Max error bits, also known as hamming distance, is the number of positions at which corresponding pieces of data / tag are different. Put more generally, this is the number of bits (think of these as squares in the tag) that need to be changed / corrected in the tag to correctly detect it. A higher value means that more tags will be detected while a lower value cuts out tags that could be "questionable" in terms of detection. + +We recommend a value of 0 for the 16h5 and 7+ for the 36h11 family. + +Decision Margin Cutoff +^^^^^^^^^^^^^^^^^^^^^^ +The decision margin cutoff is how much “margin” the detector has left before it rejects a tag; increasing this rejects poorer tags. We recommend you keep this value around a 30. diff --git a/docs/source/docs/apriltag-pipelines/3D-tracking.rst b/docs/source/docs/apriltag-pipelines/3D-tracking.rst new file mode 100644 index 0000000000..4e06dd0005 --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/3D-tracking.rst @@ -0,0 +1,15 @@ +3D Tracking +=========== + +3D AprilTag tracking will allow you to track the real-world position and rotation of a tag relative to the camera's image sensor. This is useful for robot pose estimation and other applications like autonomous scoring. In order to use 3D tracking, you must first :ref:`calibrate your camera `. Once you have, you need to enable 3D mode in the UI and you will now be able to get 3D pose information from the tag! For information on getting and using this information in your code, see :ref:`the programming reference. `. + +Ambiguity +--------- + +Translating from 2D to 3D using data from the calibration and the four tag corners can lead to "pose ambiguity", where it appears that the AprilTag pose is flipping between two different poses. You can read more about this issue `here. ` Ambiguity is calculated as the ratio of reprojection errors between two pose solutions (if they exist), where reprojection error is the error corresponding to the image distance between where the apriltag's corners are detected vs where we expect to see them based on the tag's estimated camera relative pose. + +There are a few steps you can take to resolve/mitigate this issue: + +1. Mount cameras at oblique angles so it is less likely that the tag will be seen straight on. +2. Use the :ref:`MultiTag system ` in order to combine the corners from multiple tags to get a more accurate and unambiguous pose. +3. Reject all tag poses where the ambiguity ratio (available via PhotonLib) is greater than 0.2. diff --git a/docs/source/docs/apriltag-pipelines/about-apriltags.rst b/docs/source/docs/apriltag-pipelines/about-apriltags.rst new file mode 100644 index 0000000000..5f39c32d23 --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/about-apriltags.rst @@ -0,0 +1,12 @@ +About Apriltags +=============== + +.. image:: images/pv-apriltag.png + :align: center + :scale: 20 % + +AprilTags are a common type of visual fiducial marker. Visual fiducial markers are artificial landmarks added to a scene to allow "localization" (finding your current position) via images. In simpler terms, tags mark known points of reference that you can use to find your current location. They are similar to QR codes in which they encode information, however, they hold only a single number. By placing AprilTags in known locations around the field and detecting them using PhotonVision, you can easily get full field localization / pose estimation. Alternatively, you can use AprilTags the same way you used retroreflective tape, simply using them to turn to goal without any pose estimation. + +A more technical explanation can be found in the `WPILib documentation `_. + +.. note:: You can get FIRST's `official PDF of the targets used in 2024 here `_. diff --git a/docs/source/docs/apriltag-pipelines/coordinate-systems.rst b/docs/source/docs/apriltag-pipelines/coordinate-systems.rst new file mode 100644 index 0000000000..21dbb8f01a --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/coordinate-systems.rst @@ -0,0 +1,49 @@ +Coordinate Systems +================== + +Field and Robot Coordinate Frame +-------------------------------- + +PhotonVision follows the WPILib conventions for the robot and field coordinate systems, as defined `here `_. + +You define the camera to robot transform in the robot coordinate frame. + +Camera Coordinate Frame +----------------------- + +OpenCV by default uses x-left/y-down/z-out for camera transforms. PhotonVision applies a base rotation to this transformation to make robot to tag transforms more in line with the WPILib coordinate system. The x, y, and z axes are also shown in red, green, and blue in the 3D mini-map and targeting overlay in the UI. + +* The origin is the focal point of the camera lens +* The x-axis points out of the camera +* The y-axis points to the left +* The z-axis points upwards + + +.. image:: images/camera-coord.png + :scale: 45 % + :align: center + +| + +.. image:: images/multiple-tags.png + :scale: 45 % + :align: center + +| + +AprilTag Coordinate Frame +------------------------- + +The AprilTag coordinate system is defined as follows, relative to the center of the AprilTag itself, and when viewing the tag as a robot would. Again, PhotonVision changes this coordinate system to be more in line with WPILib. This means that a robot facing a tag head-on would see a robot-to-tag transform with a translation only in x, and a rotation of 180 degrees about z. The tag coordinate system is also shown with x/y/z in red/green/blue in the UI target overlay and mini-map. + +* The origin is the center of the tag +* The x-axis is normal to the plane the tag is printed on, pointing outward from the visible side of the tag. +* The y-axis points to the right +* The z-axis points upwards + + +.. image:: images/apriltag-coords.png + :scale: 45 % + :align: center + +| diff --git a/docs/source/docs/apriltag-pipelines/detector-types.rst b/docs/source/docs/apriltag-pipelines/detector-types.rst new file mode 100644 index 0000000000..3e596b6aa9 --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/detector-types.rst @@ -0,0 +1,15 @@ +AprilTag Pipeline Types +======================= + +PhotonVision offers two different AprilTag pipeline types based on different implementations of the underlying algorithm. Each one has its advantages / disadvantages, which are detailed below. + +.. note:: Note that both of these pipeline types detect AprilTag markers and are just two different algorithms for doing so. + +AprilTag +-------- + +The AprilTag pipeline type is based on the `AprilTag `_ library from the University of Michigan and we recommend it for most use cases. It is (to our understanding) most accurate pipeline type, but is also ~2x slower than AruCo. This was the pipeline type used by teams in the 2023 season and is well tested. + +AruCo +----- +The AruCo pipeline is based on the `AruCo `_ library implementation from OpenCV. It is ~2x higher fps and ~2x lower latency than the AprilTag pipeline type, but is less accurate. We recommend this pipeline type for teams that need to run at a higher framerate or have a lower powered device. This pipeline type is new for the 2024 season and is not as well tested as AprilTag. diff --git a/docs/source/docs/apriltag-pipelines/images/apriltag-coords.png b/docs/source/docs/apriltag-pipelines/images/apriltag-coords.png new file mode 100644 index 0000000000..6e4d1b9795 Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/apriltag-coords.png differ diff --git a/docs/source/docs/apriltag-pipelines/images/apriltag-tune.png b/docs/source/docs/apriltag-pipelines/images/apriltag-tune.png new file mode 100644 index 0000000000..bd704d75fb Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/apriltag-tune.png differ diff --git a/docs/source/docs/apriltag-pipelines/images/apriltag.png b/docs/source/docs/apriltag-pipelines/images/apriltag.png new file mode 100644 index 0000000000..dceda3482b Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/apriltag.png differ diff --git a/docs/source/docs/apriltag-pipelines/images/camera-coord.png b/docs/source/docs/apriltag-pipelines/images/camera-coord.png new file mode 100644 index 0000000000..7bf3322eac Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/camera-coord.png differ diff --git a/docs/source/docs/apriltag-pipelines/images/field-layout.png b/docs/source/docs/apriltag-pipelines/images/field-layout.png new file mode 100644 index 0000000000..1e1ec0a6d1 Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/field-layout.png differ diff --git a/docs/source/docs/apriltag-pipelines/images/multiple-tags.png b/docs/source/docs/apriltag-pipelines/images/multiple-tags.png new file mode 100644 index 0000000000..a9e23b473e Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/multiple-tags.png differ diff --git a/docs/source/docs/apriltag-pipelines/images/multitag-ui.png b/docs/source/docs/apriltag-pipelines/images/multitag-ui.png new file mode 100644 index 0000000000..0eab8f7589 Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/multitag-ui.png differ diff --git a/docs/source/docs/apriltag-pipelines/images/pv-apriltag.png b/docs/source/docs/apriltag-pipelines/images/pv-apriltag.png new file mode 100644 index 0000000000..5cbb06bf72 Binary files /dev/null and b/docs/source/docs/apriltag-pipelines/images/pv-apriltag.png differ diff --git a/docs/source/docs/apriltag-pipelines/index.rst b/docs/source/docs/apriltag-pipelines/index.rst new file mode 100644 index 0000000000..920b4cdb1e --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/index.rst @@ -0,0 +1,11 @@ +AprilTag Detection +================== + +.. toctree:: + + about-apriltags + detector-types + 2D-tracking-tuning + 3D-tracking + multitag + coordinate-systems diff --git a/docs/source/docs/apriltag-pipelines/multitag.rst b/docs/source/docs/apriltag-pipelines/multitag.rst new file mode 100644 index 0000000000..1b03a66477 --- /dev/null +++ b/docs/source/docs/apriltag-pipelines/multitag.rst @@ -0,0 +1,53 @@ +MultiTag Localization +===================== + +PhotonVision can combine AprilTag detections from multiple simultaneously observed AprilTags from a particular camera with information about where tags are expected to be located on the field to produce a better estimate of where the camera (and therefore robot) is located on the field. PhotonVision can calculate this multi-target result on your coprocessor, reducing CPU usage on your RoboRio. This result is sent over NetworkTables along with other detected targets as part of the ``PhotonPipelineResult`` provided by PhotonLib. + +.. warning:: MultiTag requires an accurate field layout JSON to be uploaded! Differences between this layout and the tags' physical location will drive error in the estimated pose output. + +Enabling MultiTag +^^^^^^^^^^^^^^^^^ + +Ensure that your camera is calibrated and 3D mode is enabled. Navigate to the Output tab and enable "Do Multi-Target Estimation". This enables MultiTag to use the uploaded field layout JSON to calculate your camera's pose in the field. This 3D transform will be shown as an additional table in the "targets" tab, along with the IDs of AprilTags used to compute this transform. + +.. image:: images/multitag-ui.png + :width: 600 + :alt: Multitarget enabled and running in the PhotonVision UI + +.. note:: By default, enabling multi-target will disable calculating camera-to-target transforms for each observed AprilTag target to increase performance; the X/Y/angle numbers shown in the target table of the UI are instead calculated using the tag's expected location (per the field layout JSON) and the field-to-camera transform calculated using MultiTag. If you additionally want the individual camera-to-target transform calculated using SolvePNP for each target, enable "Always Do Single-Target Estimation". + +This multi-target pose estimate can be accessed using PhotonLib. We suggest using :ref:`the PhotonPoseEstimator class ` with the ``MULTI_TAG_PNP_ON_COPROCESSOR`` strategy to simplify code, but the transform can be directly accessed using ``getMultiTagResult``/``MultiTagResult()`` (Java/C++). + + + +.. tab-set-code:: + + .. code-block:: java + + var result = camera.getLatestResult(); + if (result.getMultiTagResult().estimatedPose.isPresent) { + Transform3d fieldToCamera = result.getMultiTagResult().estimatedPose.best; + } + + + .. code-block:: C++ + + auto result = camera.GetLatestResult(); + if (result.MultiTagResult().result.isPresent) { + frc::Transform3d fieldToCamera = result.MultiTagResult().result.best; + } + +.. note:: The returned field to camera transform is a transform from the fixed field origin to the camera's coordinate system. This does not change based on alliance color, and by convention is on the BLUE ALLIANCE wall. + +Updating the Field Layout +^^^^^^^^^^^^^^^^^^^^^^^^^ + +PhotonVision ships by default with the `2024 field layout JSON `_. The layout can be inspected by navigating to the settings tab and scrolling down to the "AprilTag Field Layout" card, as shown below. + +.. image:: images/field-layout.png + :width: 600 + :alt: The currently saved field layout in the Photon UI + +An updated field layout can be uploaded by navigating to the "Device Control" card of the Settings tab and clicking "Import Settings". In the pop-up dialog, select the "AprilTag Layout" type and choose an updated layout JSON (in the same format as the WPILib field layout JSON linked above) using the paperclip icon, and select "Import Settings". The AprilTag layout in the "AprilTag Field Layout" card below should be updated to reflect the new layout. + +.. note:: Currently, there is no way to update this layout using PhotonLib, although this feature is under consideration. diff --git a/docs/source/docs/assets/AprilTag16h5.pdf b/docs/source/docs/assets/AprilTag16h5.pdf new file mode 100644 index 0000000000..b1f7fe2846 Binary files /dev/null and b/docs/source/docs/assets/AprilTag16h5.pdf differ diff --git a/docs/source/docs/assets/settings.png b/docs/source/docs/assets/settings.png new file mode 100644 index 0000000000..486a839dd3 Binary files /dev/null and b/docs/source/docs/assets/settings.png differ diff --git a/docs/source/docs/calibration/calibration.rst b/docs/source/docs/calibration/calibration.rst new file mode 100644 index 0000000000..1fbbff12a6 --- /dev/null +++ b/docs/source/docs/calibration/calibration.rst @@ -0,0 +1,159 @@ +Calibrating Your Camera +======================= + +.. important:: In order to detect AprilTags and use 3D mode, your camera must be calibrated at the desired resolution! Inaccurate calibration will lead to poor performance. + +To calibrate a camera, images of a chessboard (or grid of dots, or other target) are taken. by comparing where the grid corners (or dots) should be in object space (for example, a dot once every inch in an 8x6 grid) with where they appear in the camera image, we can find a least-squares estimate for intrinsic camera properties like focal lengths, center point, and distortion coefficients. For more on camera calibration, please review the `OpenCV documentation `_. + +.. warning:: While any resolution can be calibrated, resolutions lower than 960x720 are often too low to provide accurate results. Additionally, high resolutions may be too performance intensive for a coprocessor like a Raspberry Pi to handle (solutions to this are being looked into). Thus, we recommend 960x720 when using 3D mode. + +.. note::The calibration data collected during calibration is specific to each physical camera, as well as each individual resolution. + + +Calibration Tips +---------------- +Accurate camera calibration is required in order to get accurate pose measurements when using AprilTags and 3D mode. The tips below should help ensure success: + +1. Practice calibration using your laptop webcam and https://www.calibdb.net/. The target can be found on the website and should be printed out if possible. Once you print it out, try to line up your target with the overlay on the screen as best as possible. The point of this practice is to notice how you are prompted to place targets in certain positions on the screen that make sure you account for all regions of the sensor. The chessboard should (in general) not be facing parallel to the camera (straight on), nor should it be aligned with any of the camera axes (ie, rotated only about an axis going left/right, up/down, or out-of-the-camera). + +2. Ensure your the images you take have the target in different positions and angles, with as big of a difference between angles as possible. It is important to make sure the target overlay still lines up with the board while doing this. Tilt no more than 45 degrees. + +3. Use as big of a calibration target as your printer can print. + +4. Ensure that your printed pattern has enough white border around it. + +5. Ensure your camera stays in one position during the duration of the calibration. + +6. Make sure you get all 12 images from varying distances and angles. + +7. Take at least one image that covers the total image area, and generally ensure that you get even coverage of the lens with your image set. + +8. Have good lighting, having a diffusely lit target would be best (light specifically shining on the target without shadows). + +9. Ensure the calibration target is completely flat and does not bend or fold in any way. It should be mounted/taped down to something flat and then used for calibration, do not just hold it up. + +10. Avoid having targets that are parallel to the lens of the camera / straight on towards the camera as much as possible. You want angles and variations within your calibration images. + +Following the ideas above should help in getting an accurate calibration. + +Calibration Steps +----------------- + +Your camera can be calibrated using either the utility built into PhotonVision, which performs all the calculations on your coprocessor, or using a website such as `calibdb `_, which uses a USB webcam connected to your laptop. The integrated calibration utility is currently the only one that works with ribbon-cable CSI cameras or Limelights, but for USB webcams, calibdb is the preferred option. + +Calibrating using calibdb +------------------------- + +Calibdb uses a modified chessboard/aruco marker combination target called `ChArUco targets. `_ The website currently only supports Chrome browser. + +Download and print out (or display on a monitor) the calibration by clicking Show Pattern. Click "Calibrate" and align your camera with the ghost overlay of the calibration board. The website automatically calculates the next position and displays it for you. When complete, download the calibration (do **not** use the OpenCV format). Reconnect your camera to your coprocessor and navigate to the PhotonVision web interface's camera tab. Ensure the correct camera is selected, and click the "Import from CalibDB" button. Your calibration data will be automatically saved and applied! + +Calibrating using PhotonVision +------------------------------ + +1. Navigate to the calibration section in the UI. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The Cameras tab of the UI houses PhotonVision's camera calibration tooling. It assists users with calibrating their cameras, as well as allows them to view previously calibrated resolutions. We support both dot and chessboard calibrations. + +2. Print out the calibration target. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the Camera Calibration tab, we'll print out the calibration target using the "Download" button. This should be printed on 8.5x11 printer paper. This page shows using an 8x8 chessboard. + +.. warning:: Ensure that there is no scaling applied during printing (it should be at 100%) and that the PDF is printed as is on regular printer paper. Check the square size with calipers or an accurate measuring device after printing to ensure squares are sized properly, and enter the true size of the square in the UI text box. For optimal results, various resources are available online to calibrate your specific printer if needed. + +3. Select calibration resolution and fill in appropriate target data. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We'll next select a resolution to calibrate and populate our pattern spacing and board size. The provided chessboard is 8 squares in width and height, and each square should be about 1in across. Mine measured with a caliper was 0.96in, but this will vary per printer. Finally, once our entered data is correct, we'll click "start calibration." + +4. Take at calibration images from various angles. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Now, we'll capture images of our chessboard from various angles. The most important part of this step is to make sure that the chessboard overlay matches the chessboard in your image. The further the overdrawn points are from the true position of the chessboard corners, the less accurate the final calibration will be. We'll want to capture at least 12 images, trying to take one in each region of the camera sensor. Once we've got our images, we'll click "Finish calibration" and wait for the calibration process to complete. If all goes well, the mean error and standard deviation will be shown in the table on the right. + +.. raw:: html + + + +Accessing Calibration Images +---------------------------- + +Details about a particular calibration can be viewed by clicking on that resolution in the calibrations tab. This tab allows you to download raw calibration data, upload a previous calibration, and inspect details about calculated camera intrinsics. + +.. image:: images/cal-details.png + :width: 600 + :alt: Captured calibration images + +.. note:: More info on what these parameters mean can be found in `OpenCV's docs `_ + +- Fx/Fy: Estimated camera focal length, in mm +- Fx/Cy: Estimated camera optical center, in pixels. This should be at about the center of the image +- Distortion: OpenCV camera model distortion coefficients +- FOV: calculated using estimated focal length and image size. Useful for gut-checking calibration results +- Mean Err: Mean reprojection error, or distance between expected and observed chessboard cameras for the full calibration dataset + +Below these outputs are the snapshots collected for calibration, along with a per-snapshot mean reprojection error. A snapshot with a larger reprojection error might indicate a bad snapshot, due to effects such as motion blur or misidentified chessboard corners. + +Calibration images can also be extracted from the downloaded JSON file using `this Python script `_. This script will unpack calibration images, and also generate a VNL file for use `with mrcal `_. + +:: + + python3 /path/to/calibrationUtils.py path/to/photon_calibration.json /path/to/output/folder + +.. image:: images/unpacked-json.png + :width: 600 + :alt: Captured calibration images + + +Investigating Calibration Data with mrcal +----------------------------------------- + +`mrcal `_ is a command-line tool for camera calibration and visualization. PhotonVision has the option to use the mrcal backend during camera calibration to estimate intrinsics. mrcal can also be used post-calibration to inspect snapshots and provide feedback. These steps will closely follow the `mrcal tour `_ -- I'm aggregating commands and notes here, but the mrcal documentation is much more thorough. + +Start by `Installing mrcal `_. Note that while mrcal *calibration* using PhotonVision is supported on all platforms, but investigation right now only works on Linux. Some users have also reported luck using `WSL 2 on Windows `ap_ as well. You may also need to install ``feedgnuplot``. On Ubuntu systems, these commands should be run from a standalone terminal and *not* the one `built into vscode `_. + +Let's run ``calibrationUtils.py`` as described above, and then cd into the output folder. From here, you can follow the mrcal tour, just replacing the VNL filename and camera imager size as necessary. My camera calibration was at 1280x720, so I've set the XY limits to that below. + +:: + + $ cd /path/to/output/folder + $ ls + matt@photonvision:~/Documents/Downloads/2024-01-02_lifecam_1280$ ls + corners.vnl img0.png img10.png img11.png img12.png img13.png img1.png + img2.png img3.png img4.png img5.png img6.png img7.png img8.png + img9.png cameramodel_0.cameramodel + + $ < corners.vnl \ + vnl-filter -p x,y | \ + feedgnuplot --domain --square --set 'xrange [0:1280] noextend' --set 'yrange [720:0] noextend' + +.. image:: images/mrcal-coverage.svg + :alt: A diagram showing the locations of all detected chessboard corners. + +As you can see, we didn't do a fantastic job of covering our whole camera sensor -- there's a big gap across the whole right side, for example. We also only have 14 calibration images. We've also got our "cameramodel" file, which can be used by mrcal to display additional debug info. + +Let's inspect our reprojection error residuals. We expect their magnitudes and directions to be random -- if there's patterns in the colors shown, then our calibration probably doesn't fully explain our physical camera sensor. + +:: + + $ mrcal-show-residuals --magnitudes --set 'cbrange [0:1.5]' ./camera-0.cameramodel + $ mrcal-show-residuals --directions --unset key ./camera-0.cameramodel + +.. image:: images/residual-magnitudes.svg + :alt: A diagram showing residual magnitudes + +.. image:: images/residual-directions.svg + :alt: A diagram showing residual directions + +Clearly we don't have anywhere near enough data to draw any meaningful conclusions (yet). But for fun, let's dig into `camera uncertainty estimation `_. This diagram shows how expected projection error changes due to noise in calibration inputs. Lower projection error across a larger area of the sensor imply a better calibration that more fully covers the whole sensor. For my calibration data, you can tell the projection error isolines (lines of constant expected projection error) are skewed to the left, following my dataset (which was also skewed left). + +:: + + $ mrcal-show-projection-uncertainty --unset key ./cameramodel_0.cameramodel + +.. image:: images/camera-uncertainty.svg + :alt: A diagram showing camera uncertainty diff --git a/docs/source/docs/calibration/images/cal-details.png b/docs/source/docs/calibration/images/cal-details.png new file mode 100644 index 0000000000..ad9ab635c9 Binary files /dev/null and b/docs/source/docs/calibration/images/cal-details.png differ diff --git a/docs/source/docs/calibration/images/camera-uncertainty.svg b/docs/source/docs/calibration/images/camera-uncertainty.svg new file mode 100644 index 0000000000..547c5368d5 --- /dev/null +++ b/docs/source/docs/calibration/images/camera-uncertainty.svg @@ -0,0 +1,6201 @@ + + +Qt SVG Document +Generated with Qt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 800 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 500 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 700 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2.5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Projection uncertainty (in pixels) based on calibration input noise. Looking out to infinity + + + + + + + + + diff --git a/docs/source/docs/calibration/images/mrcal-coverage.svg b/docs/source/docs/calibration/images/mrcal-coverage.svg new file mode 100644 index 0000000000..f4cb0868d5 --- /dev/null +++ b/docs/source/docs/calibration/images/mrcal-coverage.svg @@ -0,0 +1,10773 @@ + + +Qt SVG Document +Generated with Qt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 500 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 700 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 800 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/source/docs/calibration/images/residual-directions.svg b/docs/source/docs/calibration/images/residual-directions.svg new file mode 100644 index 0000000000..899badbe33 --- /dev/null +++ b/docs/source/docs/calibration/images/residual-directions.svg @@ -0,0 +1,10306 @@ + + +Qt SVG Document +Generated with Qt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 500 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 700 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 800 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Imager y + + + + + + + + + + + + + + + + + + + + + +Imager x + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +-150 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +-100 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +-50 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 50 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 150 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Fitted residuals. Directions shown as colors. Magnitudes ignored + + + + + + + + + diff --git a/docs/source/docs/calibration/images/residual-magnitudes.svg b/docs/source/docs/calibration/images/residual-magnitudes.svg new file mode 100644 index 0000000000..10fdf823b8 --- /dev/null +++ b/docs/source/docs/calibration/images/residual-magnitudes.svg @@ -0,0 +1,10360 @@ + + +Qt SVG Document +Generated with Qt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 100 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 500 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 700 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 400 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 600 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 800 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1200 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Imager y + + + + + + + + + + + + + + + + + + + + + +Imager x + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Fitted residuals. Errors shown as colors + + + + + + + + + diff --git a/docs/source/docs/calibration/images/unpacked-json.png b/docs/source/docs/calibration/images/unpacked-json.png new file mode 100644 index 0000000000..045c56354b Binary files /dev/null and b/docs/source/docs/calibration/images/unpacked-json.png differ diff --git a/docs/source/docs/contributing/index.rst b/docs/source/docs/contributing/index.rst new file mode 100644 index 0000000000..14b1dceb56 --- /dev/null +++ b/docs/source/docs/contributing/index.rst @@ -0,0 +1,7 @@ +Contributing to PhotonVision Projects +===================================== + +.. toctree:: + + photonvision/index + photonvision-docs/index diff --git a/docs/source/docs/contributing/photonvision-docs/building-docs.rst b/docs/source/docs/contributing/photonvision-docs/building-docs.rst new file mode 100644 index 0000000000..93f63d6cd0 --- /dev/null +++ b/docs/source/docs/contributing/photonvision-docs/building-docs.rst @@ -0,0 +1,32 @@ +Building the PhotonVision Documentation +======================================= +To build the PhotonVision documentation, you will require `Git `_ and `Python 3.6 or greater `_. + +Cloning the Documentation Repository +------------------------------------ +If you are planning on contributing, it is recommended to create a fork of the `main docs repository `_. To clone this fork, run the following command in a terminal window: + +``git clone https://github.com/[your username]/photonvision-docs`` + +Installing Python Dependencies +------------------------------ +You must install a set of Python dependencies in order to build the documentation. To do so, you can run the following command in the root project directory: + +``python -m pip install -r requirements.txt`` + +Building the Documentation +-------------------------- +In order to build the documentation, you can run the following command in the root project directory: + +``make html`` + +.. note:: You may have to run ``./make html`` on Windows. + +Opening the Documentation +------------------------- +The built documentation is located at ``build/html/index.html``. + +Docs Builds on Pull Requests +---------------------------- + +Pre-merge builds of docs can be found at: ``https://photonvision-docs--PRNUMBER.org.readthedocs.build/en/PRNUMBER/index.html``. These docs are republished on every commit to a pull request made to PhotonVision/photonvision-docs. For example, PR 325 would have pre-merge documentation published to ``https://photonvision-docs--325.org.readthedocs.build/en/325/index.html`` diff --git a/docs/source/docs/contributing/photonvision-docs/index.rst b/docs/source/docs/contributing/photonvision-docs/index.rst new file mode 100644 index 0000000000..d869754ac1 --- /dev/null +++ b/docs/source/docs/contributing/photonvision-docs/index.rst @@ -0,0 +1,8 @@ +Contributing to PhotonVision Documentation +========================================== + +.. toctree:: + + building-docs + style-guide + top-contributors diff --git a/docs/source/docs/contributing/photonvision-docs/style-guide.rst b/docs/source/docs/contributing/photonvision-docs/style-guide.rst new file mode 100644 index 0000000000..6b130c76e8 --- /dev/null +++ b/docs/source/docs/contributing/photonvision-docs/style-guide.rst @@ -0,0 +1,3 @@ +Style Guide +=========== +PhotonVision follows the frc-docs style guide which can be found `here `_. In order to run the linter locally (which builds on doc8 and checks for compliance with the style guide), follow the instructions `on GitHub `_. diff --git a/docs/source/docs/contributing/photonvision-docs/top-contributors.rst b/docs/source/docs/contributing/photonvision-docs/top-contributors.rst new file mode 100644 index 0000000000..bc1fe933ea --- /dev/null +++ b/docs/source/docs/contributing/photonvision-docs/top-contributors.rst @@ -0,0 +1,5 @@ +Top Contributors +================ + +.. ghcontributors:: PhotonVision/photonvision-docs + :limit: 10 diff --git a/docs/source/docs/contributing/photonvision/assets/git-download.png b/docs/source/docs/contributing/photonvision/assets/git-download.png new file mode 100644 index 0000000000..5a19355a1c Binary files /dev/null and b/docs/source/docs/contributing/photonvision/assets/git-download.png differ diff --git a/docs/source/docs/contributing/photonvision/build-instructions.rst b/docs/source/docs/contributing/photonvision/build-instructions.rst new file mode 100644 index 0000000000..85d8d63672 --- /dev/null +++ b/docs/source/docs/contributing/photonvision/build-instructions.rst @@ -0,0 +1,267 @@ +Build Instructions +================== + +This section contains the build instructions from the source code available at `our GitHub page `_. + +Development Setup +----------------- + +Prerequisites +~~~~~~~~~~~~~ + +| **Java Development Kit:** This project requires Java Development Kit (JDK) 17 to be compiled. This is the same Java version that comes with WPILib for 2025+. If you don't have this JDK with WPILib, you can follow the instructions to install JDK 17 for your platform `here `_. +| **Node JS:** The UI is written in Node JS. To compile the UI, Node 14.18.0 to Node 16.0.0 is required. To install Node JS follow the instructions for your platform `on the official Node JS website `_. However, modify this line + +.. code-block:: bash + + nvm install 20 + +so that it instead reads + +.. code-block:: javascript + + nvm install 14.18.0 + +Compiling Instructions +---------------------- + +Getting the Source Code +~~~~~~~~~~~~~~~~~~~~~~~ +Get the source code from git: + +.. code-block:: bash + + git clone https://github.com/PhotonVision/photonvision + +or alternatively download the source code from github and extract the zip: + +.. image:: assets/git-download.png + :width: 600 + :alt: Download source code from git + +Install Necessary Node JS Dependencies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the photon-client directory: + +.. code-block:: bash + + npm install + +Build and Copy UI to Java Source +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the root directory: + + +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew buildAndCopyUI`` + + .. tab-item:: macOS + + ``./gradlew buildAndCopyUI`` + + .. tab-item:: Windows (cmd) + + ``gradlew buildAndCopyUI`` + +Build and Run PhotonVision +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To compile and run the project, issue the following command in the root directory: + +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew run`` + + .. tab-item:: macOS + + ``./gradlew run`` + + .. tab-item:: Windows (cmd) + + ``gradlew run`` + +Running the following command under the root directory will build the jar under ``photon-server/build/libs``: + +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew shadowJar`` + + .. tab-item:: macOS + + ``./gradlew shadowJar`` + + .. tab-item:: Windows (cmd) + + ``gradlew shadowJar`` + +Build and Run PhotonVision on a Raspberry Pi Coprocessor +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As a convenience, the build has a built-in `deploy` command which builds, deploys, and starts the current source code on a coprocessor. + +An architecture override is required to specify the deploy target's architecture. + +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew clean`` + + ``./gradlew deploy -PArchOverride=linuxarm64`` + + .. tab-item:: macOS + + ``./gradlew clean`` + + ``./gradlew deploy -PArchOverride=linuxarm64`` + + .. tab-item:: Windows (cmd) + + ``gradlew clean`` + + ``gradlew deploy -PArchOverride=linuxarm64`` + +The ``deploy`` command is tested against Raspberry Pi coprocessors. Other similar coprocessors may work too. + +Using PhotonLib Builds +~~~~~~~~~~~~~~~~~~~~~~ + +The build process includes the following task: + +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew generateVendorJson`` + + .. tab-item:: macOS + + ``./gradlew generateVendorJson`` + + .. tab-item:: Windows (cmd) + + ``gradlew generateVendorJson`` + +This generates a vendordep JSON of your local build at ``photon-lib/build/generated/vendordeps/photonlib.json``. + +The photonlib source can be published to your local maven repository after building: + +.. tab-set:: + + .. tab-item:: Linux + + ``./gradlew publishToMavenLocal`` + + .. tab-item:: macOS + + ``./gradlew publishToMavenLocal`` + + .. tab-item:: Windows (cmd) + + ``gradlew publishToMavenLocal`` + +After adding the generated vendordep to your project, add the following to your project's ``build.gradle`` under the ``plugins {}`` block. + +.. code-block:: Java + + repositories { + mavenLocal() + } + + +Debugging PhotonVision Running Locally +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One way is by running the program using gradle with the :code:`--debug-jvm` flag. Run the program with :code:`./gradlew run --debug-jvm`, and attach to it with VSCode by adding the following to :code:`launch.json`. Note args can be passed with :code:`--args="foobar"`. + +.. code-block:: + + { + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "java", + "name": "Attach to Remote Program", + "request": "attach", + "hostName": "localhost", + "port": "5005", + "projectName": "photon-core", + } + ] + } + +PhotonVision can also be run using the gradle tasks plugin with :code:`"args": "--debug-jvm"` added to launch.json. + + +Debugging PhotonVision Running on a CoProcessor +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Set up a VSCode configuration in :code:`launch.json` + +.. code-block:: + + { + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "java", + "name": "Attach to CoProcessor", + "request": "attach", + "hostName": "photonvision.local", + "port": "5801", + "projectName": "photon-core" + }, + ] + } + +Stop any existing instance of PhotonVision. + +Launch the program with the following additional argument to the JVM: :code:`java -jar -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5801 photonvision.jar` + +Once the program says it is listening on port 5801, launch the debug configuration in VSCode. + +The program will wait for the VSCode debugger to attach before proceeding. + +Running examples +~~~~~~~~~~~~~~~~ + +You can run one of the many built in examples straight from the command line, too! They contain a fully featured robot project, and some include simulation support. The projects can be found inside the photonlib-java-examples and photonlib-cpp-examples subdirectories, respectively. The projects currently available include: + +- photonlib-java-examples: + - aimandrange:simulateJava + - aimattarget:simulateJava + - getinrange:simulateJava + - simaimandrange:simulateJava + - simposeest:simulateJava +- photonlib-cpp-examples: + - aimandrange:simulateNative + - getinrange:simulateNative + +To run them, use the commands listed below. Photonlib must first be published to your local maven repository, then the copyPhotonlib task will copy the generated vendordep json file into each example. After that, the simulateJava/simulateNative task can be used like a normal robot project. Robot simulation with attached debugger is technically possible by using simulateExternalJava and modifying the launch script it exports, though unsupported. + +.. code-block:: + + ~/photonvision$ ./gradlew publishToMavenLocal + + ~/photonvision$ cd photonlib-java-examples + ~/photonvision/photonlib-java-examples$ ./gradlew copyPhotonlib + ~/photonvision/photonlib-java-examples$ ./gradlew :simulateJava + + ~/photonvision$ cd photonlib-cpp-examples + ~/photonvision/photonlib-cpp-examples$ ./gradlew copyPhotonlib + ~/photonvision/photonlib-cpp-examples$ ./gradlew :simulateNative diff --git a/docs/source/docs/contributing/photonvision/index.rst b/docs/source/docs/contributing/photonvision/index.rst new file mode 100644 index 0000000000..93185e0641 --- /dev/null +++ b/docs/source/docs/contributing/photonvision/index.rst @@ -0,0 +1,7 @@ +Contributing to PhotonVision +============================ + +.. toctree:: + + build-instructions + top-contributors diff --git a/docs/source/docs/contributing/photonvision/top-contributors.rst b/docs/source/docs/contributing/photonvision/top-contributors.rst new file mode 100644 index 0000000000..9c27a60562 --- /dev/null +++ b/docs/source/docs/contributing/photonvision/top-contributors.rst @@ -0,0 +1,5 @@ +Top Contributors +================ + +.. ghcontributors:: PhotonVision/photonvision + :limit: 10 diff --git a/docs/source/docs/description.rst b/docs/source/docs/description.rst new file mode 100644 index 0000000000..a75ab7b71e --- /dev/null +++ b/docs/source/docs/description.rst @@ -0,0 +1,47 @@ +About PhotonVision +================== + +Description +^^^^^^^^^^^ +PhotonVision is a free, fast, and easy-to-use vision processing solution for the *FIRST*\ Robotics Competition. PhotonVision is designed to get vision working on your robot *quickly*, without the significant cost of other similar solutions. +Using PhotonVision, teams can go from setting up a camera and coprocessor to detecting and tracking targets by simply tuning sliders. With an easy to use interface, comprehensive documentation, and a feature rich vendor dependency, no experience is necessary to use PhotonVision. No matter your resources, using PhotonVision is easy compared to its alternatives. + +Advantages +^^^^^^^^^^ +PhotonVision has a myriad of advantages over similar solutions, including: + +Affordable +---------- +Compared to alternatives, PhotonVision is much cheaper to use (at the cost of your coprocessor and camera) compared to alternatives that cost $400. This allows your team to save money while still being competitive. + +Easy to Use User Interface +-------------------------- +The PhotonVision user interface is simple and modular, making things easier for the user. With a simpler interface, you can focus on what matters most, tracking targets, rather than how to use our UI. A major unique quality is that the PhotonVision UI includes an offline copy of our documentation for your ease of access at competitions. + +PhotonLib Vendor Dependency +--------------------------- +The PhotonLib vendor dependency allows you to easily get necessary target data (without having to work directly with NetworkTables) while also providing utility methods to get distance and position on the field. This helps your team focus less on getting data and more on using it to do cool things. This also has the benefit of having a structure that ensures all data is from the same timestamp, which is helpful for latency compensation. + +User Calibration +---------------- +Using PhotonVision allows the user to calibrate for their specific camera, which will get you the best tracking results. This is extremely important as every camera (even if it is the same model) will have it's own quirks and user calibration allows for those to be accounted for. + +High FPS Processing +------------------- +Compared to alternative solutions, PhotonVision boasts higher frames per second which allows for a smoother video stream and detection of targets to ensure you aren't losing out on any performance. + +Low Latency +----------- +PhotonVision provides low latency processing to make sure you get vision measurements as fast as possible, which makes complex vision tasks easier. We guarantee that all measurements are sent from the same timestamp, making life easier for your programmers. + +Fully Open Source and Active Developer Community +------------------------------------------------ +You can find all of our code on `GitHub `_, including code for our main program, documentation, vendor dependency (PhotonLib), and more. This helps you see everything working behind the scenes and increases transparency. This also allows users to make pull requests for features that they want to add in to PhotonVision that will be reviewed by the development team. PhotonVision is licensed under the GNU General Public License (GPLv3) which you can learn more about `here `_. + +Multi-Camera Support +-------------------- +You can use multiple cameras within PhotonVision, allowing you to see multiple angles without the need to buy multiple coprocessors. This makes vision processing more affordable and simpler for your team. + +Comprehensive Documentation +--------------------------- +Using our comprehensive documentation, you will be able to easily start vision processing by following a series of simple steps. diff --git a/docs/source/docs/examples/aimandrange.rst b/docs/source/docs/examples/aimandrange.rst new file mode 100644 index 0000000000..9d3924f3a0 --- /dev/null +++ b/docs/source/docs/examples/aimandrange.rst @@ -0,0 +1,41 @@ +Combining Aiming and Getting in Range +===================================== + + +The following example is from the PhotonLib example repository (`Java `_/`C++ `_). + +Knowledge and Equipment Needed +----------------------------------------------- + +- Everything required in :ref:`Aiming at a Target ` and :ref:`Getting in Range of the Target `. + +Code +------- + +Now that you know how to both aim and get in range of the target, it is time to combine them both at the same time. This example will take the previous two code examples and make them into one function using the same tools as before. With this example, you now have all the knowledge you need to use PhotonVision on your robot in any game. + +.. tab-set:: + + .. tab-item:: Java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/aimandrange/src/main/java/frc/robot/Robot.java + :language: java + :lines: 42-111 + :linenos: + :lineno-start: 42 + + .. tab-item:: C++ (Header) + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimandrange/src/main/include/Robot.h + :language: cpp + :lines: 27-71 + :linenos: + :lineno-start: 27 + + .. tab-item:: C++ (Source) + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimandrange/src/main/cpp/Robot.cpp + :language: cpp + :lines: 25-67 + :linenos: + :lineno-start: 25 diff --git a/docs/source/docs/examples/aimingatatarget.rst b/docs/source/docs/examples/aimingatatarget.rst new file mode 100644 index 0000000000..53c077f9d9 --- /dev/null +++ b/docs/source/docs/examples/aimingatatarget.rst @@ -0,0 +1,46 @@ +Aiming at a Target +================== + +The following example is from the PhotonLib example repository (`Java `_/`C++ `_). + +Knowledge and Equipment Needed +------------------------------ + +- Robot with a vision system running PhotonVision +- Target +- Ability to track a target by properly tuning a pipeline + +Code +------- + +Now that you have properly set up your vision system and have tuned a pipeline, you can now aim your robot/turret at the target using the data from PhotonVision. This data is reported over NetworkTables and includes: latency, whether there is a target detected or not, pitch, yaw, area, skew, and target pose relative to the robot. This data will be used/manipulated by our vendor dependency, PhotonLib. The documentation for the Network Tables API can be found :ref:`here ` and the documentation for PhotonLib :ref:`here `. + +For this simple example, only yaw is needed. + +In this example, while the operator holds a button down, the robot will turn towards the goal using the P term of a PID loop. To learn more about how PID loops work, how WPILib implements them, and more, visit `Advanced Controls (PID) `_ and `PID Control in WPILib `_. + +.. tab-set:: + + .. tab-item:: Java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/aimattarget/src/main/java/frc/robot/Robot.java + :language: java + :lines: 41-98 + :linenos: + :lineno-start: 41 + + .. tab-item:: C++ (Header) + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimattarget/src/main/include/Robot.h + :language: c++ + :lines: 27-53 + :linenos: + :lineno-start: 27 + + .. tab-item:: C++ (Source) + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/aimattarget/src/main/cpp/Robot.cpp + :language: c++ + :lines: 25-52 + :linenos: + :lineno-start: 25 diff --git a/docs/source/docs/examples/gettinginrangeofthetarget.rst b/docs/source/docs/examples/gettinginrangeofthetarget.rst new file mode 100644 index 0000000000..2e79455958 --- /dev/null +++ b/docs/source/docs/examples/gettinginrangeofthetarget.rst @@ -0,0 +1,54 @@ +Getting in Range of the Target +============================== + +The following example is from the PhotonLib example repository (`Java `_/`C++ `_). + + +Knowledge and Equipment Needed +----------------------------------------------- + +- Everything required in :ref:`Aiming at a Target `. +- Large space where your robot can move around freely + +Code +------- + +In FRC, a mechanism usually has to be a certain distance away from its target in order to be effective and score. In the previous example, we showed how to aim your robot at the target. Now we will show how to move to a certain distance from the target. + +For proper functionality of just this example, ensure that your robot is pointed towards the target. + +While the operator holds down a button, the robot will drive towards the target and get in range. + +This example uses P term of the PID loop and PhotonLib and the distance function of PhotonUtils. + +.. warning:: The PhotonLib utility to calculate distance depends on the camera being at a different vertical height than the target. If this is not the case, a different method for estimating distance, such as target width or area, should be used. In general, this method becomes more accurate as range decreases and as the height difference increases. + +.. note:: There is no strict minimum delta-height necessary for this method to be applicable, just a requirement that a delta exists. + +.. tab-set:: + + .. tab-item:: Java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/getinrange/src/main/java/frc/robot/Robot.java + :language: java + :lines: 42-107 + :linenos: + :lineno-start: 42 + + .. tab-item:: C++ (Header) + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/getinrange/src/main/include/Robot.h + :language: c++ + :lines: 27-67 + :linenos: + :lineno-start: 27 + + .. tab-item:: C++ (Source) + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-cpp-examples/getinrange/src/main/cpp/Robot.cpp + :language: c++ + :lines: 25-58 + :linenos: + :lineno-start: 25 + +.. hint:: The accuracy of the measurement of the camera's pitch (:code:`CAMERA_PITCH_RADIANS` in the above example), as well as the camera's FOV, will determine the overall accuracy of this method. diff --git a/docs/source/docs/examples/index.rst b/docs/source/docs/examples/index.rst new file mode 100644 index 0000000000..b7407489d1 --- /dev/null +++ b/docs/source/docs/examples/index.rst @@ -0,0 +1,11 @@ +Code Examples +============= + +.. toctree:: + :maxdepth: 1 + + aimingatatarget + gettinginrangeofthetarget + aimandrange + simaimandrange + simposeest diff --git a/docs/source/docs/examples/simaimandrange.rst b/docs/source/docs/examples/simaimandrange.rst new file mode 100644 index 0000000000..db20413a16 --- /dev/null +++ b/docs/source/docs/examples/simaimandrange.rst @@ -0,0 +1,94 @@ +Simulating Aiming and Getting in Range +====================================== + +The following example comes from the PhotonLib example repository (`Java `_/`C++ `_). Full code is available at those links. + + +Knowledge and Equipment Needed +----------------------------------------------- + +- Everything required in :ref:`Combining Aiming and Getting in Range `. + +Background +---------- + +The previous examples show how to run PhotonVision on a real robot, with a physical robot drivetrain moving around and interacting with the software. + +This example builds upon that, adding support for simulating robot motion and incorporating that motion into a :code:`SimVisionSystem`. This allows you to test control algorithms on your development computer, without requiring access to a real robot. + +.. raw:: html + + + +Walkthrough +----------- + +First, in the main :code:`Robot` source file, we add support to periodically update a new simulation-specific object. This logic only gets used while running in simulation: + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/Robot.java + :language: java + :lines: 118-128 + :linenos: + :lineno-start: 118 + +Then, we add in the implementation of our new `DrivetrainSim` class. Please reference the `WPILib documentation on physics simulation `_. + +Simulated Vision support is added with the following steps: + +Creating the Simulated Vision System +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +First, we create a new :code:`SimVisionSystem` to represent our camera and coprocessor running PhotonVision. + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java + :language: java + :lines: 73-93 + :linenos: + :lineno-start: 72 + +Next, we create objects to represent the physical location and size of the vision targets we are calibrated to detect. This example models the down-field high goal vision target from the 2020 and 2021 games. + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java + :language: java + :lines: 95-111 + :linenos: + :lineno-start: 95 + +Finally, we add our target to the simulated vision system. + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java + :language: java + :lines: 116-117 + :linenos: + :lineno-start: 113 + + +If you have additional targets you want to detect, you can add them in the same way as the first one. + + +Updating the Simulated Vision System +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once we have all the properties of our simulated vision system defined, the work to do at runtime becomes very minimal. Simply pass in the robot's pose periodically to the simulated vision system. + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/ebef19af3d926cf87292177c9a16d01b71219306/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java + :language: java + :lines: 124-142 + :linenos: + :lineno-start: 122 + + +The rest is done behind the scenes. diff --git a/docs/source/docs/examples/simposeest.rst b/docs/source/docs/examples/simposeest.rst new file mode 100644 index 0000000000..b1d0a2ec84 --- /dev/null +++ b/docs/source/docs/examples/simposeest.rst @@ -0,0 +1,129 @@ +Using WPILib Pose Estimation, Simulation, and PhotonVision Together +=================================================================== + +The following example comes from the PhotonLib example repository (`Java `_). Full code is available at that links. + +Knowledge and Equipment Needed +----------------------------------------------- + +- Everything required in :ref:`Combining Aiming and Getting in Range `, plus some familiarity with WPILib pose estimation functionality. + +Background +---------- + +This example builds upon WPILib's `Differential Drive Pose Estimator `_. It adds a :code:`PhotonCamera` to gather estimates of the robot's position on the field. This in turn can be used for aligning with vision targets, and increasing accuracy of autonomous routines. + +To support simulation, a :code:`SimVisionSystem` is used to drive data into the :code:`PhotonCamera`. The far high goal target from 2020 is modeled. + +Walkthrough +----------- + +WPILib's :code:`Pose2d` class is used to represent robot positions on the field. + +Three different :code:`Pose2d` positions are relevant for this example: + +1) Desired Pose: The location some autonomous routine wants the robot to be in. +2) Estimated Pose: The location the software `believes` the robot to be in, based on physics models and sensor feedback. +3) Actual Pose: The locations the robot is actually at. The physics simulation generates this in simulation, but it cannot be directly measured on the real robot. + +Estimating Pose +^^^^^^^^^^^^^^^ + +The :code:`DrivetrainPoseEstimator` class is responsible for generating an estimated robot pose using sensor readings (including PhotonVision). + +Please reference the `WPILib documentation `_ on using the :code:`DifferentialDrivePoseEstimator` class. + +For both simulation and on-robot code, we create objects to represent the physical location and size of the vision targets we are calibrated to detect. This example models the down-field high goal vision target from the 2020 and 2021 games. + +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/Constants.java + :language: java + :lines: 83-106 + :linenos: + :lineno-start: 83 + + +To incorporate PhotonVision, we need to create a :code:`PhotonCamera`: + +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainPoseEstimator.java + :language: java + :lines: 46 + :linenos: + :lineno-start: 46 + +During periodic execution, we read back camera results. If we see a target in the image, we pass the camera-measured pose of the robot to the :code:`DifferentialDrivePoseEstimator`. + +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainPoseEstimator.java + :language: java + :lines: 81-92 + :linenos: + :lineno-start: 81 + + +That's it! + +Simulating the Camera +^^^^^^^^^^^^^^^^^^^^^ + +First, we create a new :code:`SimVisionSystem` to represent our camera and coprocessor running PhotonVision. + +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainSim.java + :language: java + :lines: 76-95 + :linenos: + :lineno-start: 76 + + +Then, we add our target to the simulated vision system. + +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainSim.java + :lines: 97-99 + :linenos: + :lineno-start: 97 + + +If you have additional targets you want to detect, you can add them in the same way as the first one. + + +Updating the Simulated Vision System +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once we have all the properties of our simulated vision system defined, the remaining work is minimal. Periodically, pass in the robot's pose to the simulated vision system. + +.. tab-set:: + + .. tab-item:: Java + :sync: java + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simposeest/src/main/java/frc/robot/DrivetrainSim.java + :language: java + :lines: 138-139 + :linenos: + :lineno-start: 138 + + +The rest is done behind the scenes. diff --git a/docs/source/docs/hardware/customhardware.rst b/docs/source/docs/hardware/customhardware.rst new file mode 100644 index 0000000000..b43d8a0753 --- /dev/null +++ b/docs/source/docs/hardware/customhardware.rst @@ -0,0 +1,112 @@ +Deploying on Custom Hardware +============================ + +Configuration +------------- + +By default, PhotonVision attempts to make minimal assumptions of the hardware it runs on. However, it may be configured to enable custom LED control, branding, and other functionality. + +``hardwareConfig.json`` is the location for this configuration. It is included when settings are exported, and can be uploaded as part of a .zip, or on its own. + +LED Support +----------- + +For Raspberry-Pi based hardware, PhotonVision can use `PiGPIO `_ to control IO pins. The mapping of which pins control which LED's is part of the hardware config. The pins are active-high: set high when LED's are commanded on, and set low when commanded off. + +.. tab-set-code:: + .. code-block:: json + + { + "ledPins" : [ 13 ], + "ledSetCommand" : "", + "ledsCanDim" : true, + "ledPWMRange" : [ 0, 100 ], + "ledPWMSetRange" : "", + "ledPWMFrequency" : 0, + "ledDimCommand" : "", + "ledBlinkCommand" : "", + "statusRGBPins" : [ ], + } + +.. note:: No hardware boards with status RGB LED pins or non-dimming LED's have been tested yet. Please reach out to the development team if these features are desired, they can assist with configuration and testing. + +Hardware Interaction Commands +----------------------------- + +For Non-Raspberry-Pi hardware, users must provide valid hardware-specific commands for some parts of the UI interaction (including performance metrics, and executing system restarts). + +Leaving a command blank will disable the associated functionality. + +.. tab-set-code:: + .. code-block:: json + + { + "cpuTempCommand" : "", + "cpuMemoryCommand" : "", + "cpuUtilCommand" : "", + "gpuMemoryCommand" : "", + "gpuTempCommand" : "", + "ramUtilCommand" : "", + "restartHardwareCommand" : "", + } + +.. note:: These settings have no effect if PhotonVision detects it is running on a Raspberry Pi. See `the MetricsBase class `_ for the commands utilized. + +Known Camera FOV +---------------- + +If your hardware contains a camera with a known field of vision, it can be entered into the hardware configuration. This will prevent users from editing it in the GUI. + +.. tab-set-code:: + .. code-block:: json + + { + "vendorFOV" : 98.9 + } + +Cosmetic & Branding +------------------- + +To help differentiate your hardware from other solutions, some customization is allowed. + +.. tab-set-code:: + .. code-block:: json + + { + "deviceName" : "Super Cool Custom Hardware", + "deviceLogoPath" : "", + "supportURL" : "https://cat-bounce.com/", + } + +.. note:: Not all configuration is currently presented in the User Interface. Additional file uploads may be needed to support custom images. + +Example +------- + +Here is a complete example ``hardwareConfig.json``: + +.. tab-set-code:: + .. code-block:: json + + { + "deviceName" : "Blinky McBlinkface", + "deviceLogoPath" : "", + "supportURL" : "https://www.youtube.com/watch?v=b-CvLWbfZhU", + "ledPins" : [2, 13], + "ledSetCommand" : "", + "ledsCanDim" : true, + "ledPWMRange" : [ 0, 100 ], + "ledPWMSetRange" : "", + "ledPWMFrequency" : 0, + "ledDimCommand" : "", + "ledBlinkCommand" : "", + "statusRGBPins" : [ ], + "cpuTempCommand" : "", + "cpuMemoryCommand" : "", + "cpuUtilCommand" : "", + "gpuMemoryCommand" : "", + "gpuTempCommand" : "", + "ramUtilCommand" : "", + "restartHardwareCommand" : "", + "vendorFOV" : 72.5 + } diff --git a/docs/source/docs/hardware/images/bootConfigTxt.png b/docs/source/docs/hardware/images/bootConfigTxt.png new file mode 100644 index 0000000000..1c3f5b845a Binary files /dev/null and b/docs/source/docs/hardware/images/bootConfigTxt.png differ diff --git a/docs/source/docs/hardware/images/motionblur.gif b/docs/source/docs/hardware/images/motionblur.gif new file mode 100644 index 0000000000..e1a6f01177 Binary files /dev/null and b/docs/source/docs/hardware/images/motionblur.gif differ diff --git a/docs/source/docs/hardware/index.rst b/docs/source/docs/hardware/index.rst new file mode 100644 index 0000000000..1e4a2f3bec --- /dev/null +++ b/docs/source/docs/hardware/index.rst @@ -0,0 +1,9 @@ +Hardware Selection +================== + +.. toctree:: + :maxdepth: 2 + + selecting-hardware + picamconfig + customhardware diff --git a/docs/source/docs/hardware/picamconfig.rst b/docs/source/docs/hardware/picamconfig.rst new file mode 100644 index 0000000000..085d795f1b --- /dev/null +++ b/docs/source/docs/hardware/picamconfig.rst @@ -0,0 +1,55 @@ +Pi Camera Configuration +======================= + +Background +---------- + +The Raspberry Pi CSI Camera port is routed through and processed by the GPU. Since the GPU boots before the CPU, it must be configured properly for the attached camera. Additionally, this configuration cannot be changed without rebooting. + +The GPU is not always capable of detecting other cameras automatically. The file ``/boot/config.txt`` is parsed by the GPU at boot time to determine what camera, if any, is expected to be attached. This file must be updated for some cameras. + +.. warning:: Incorrect camera configuration will cause the camera to not be detected. It looks exactly the same as if the camera was unplugged. + +Updating ``config.txt`` +----------------------- + +After flashing the pi image onto an SD card, open the ``boot`` segment in a file browser. + +.. note:: Windows may report "There is a problem with this drive". This should be ignored. + +Locate ``config.txt`` in the folder, and open it with your favorite text editor. + +.. image:: images/bootConfigTxt.png + +Within the file, find this block of text: + +.. code-block:: + + ############################################################## + ### PHOTONVISION CAM CONFIG + ### Comment/Uncomment to change which camera is supported + ### Picam V1, V2 or HQ: uncomment (remove leading # ) from camera_auto_detect=1, + ### and comment out all following lines + ### IMX290/327/OV9281/Any other cameras that require additional overlays: + ### Comment out (add a # ) to camera_auto_detect=1, and uncomment the line for + ### the sensor you're trying to user + + cameraAutoDetect=1 + + # dtoverlay=imx290,clock-frequency=74250000 + # dtoverlay=imx290,clock-frequency=37125000 + # dtoverlay=imx378 + # dtoverlay=ov9281 + + ############################################################## + +Remove the leading ``#`` character to uncomment the line associated with your camera. Add a ``#`` in front of other cameras. + +.. warning:: Leave lines outside the PhotonVision Camera Config block untouched. They are necessary for proper raspberry pi functionality. + +Save the file, close the editor, and eject the drive. The boot configuration should now be ready for your selected camera. + +Additional Information +---------------------- + +See `the libcamera documentation `_ for more details on configuring cameras. diff --git a/docs/source/docs/hardware/selecting-hardware.rst b/docs/source/docs/hardware/selecting-hardware.rst new file mode 100644 index 0000000000..82a59bcf10 --- /dev/null +++ b/docs/source/docs/hardware/selecting-hardware.rst @@ -0,0 +1,101 @@ +Selecting Hardware +================== + +In order to use PhotonVision, you need a coprocessor and a camera. This page will help you select the right hardware for your team depending on your budget, needs, and experience. + +Choosing a Coprocessor +---------------------- + +Minimum System Requirements +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Ubuntu 22.04 LTS or Windows 10/11 + * We don't recommend using Windows for anything except testing out the system on a local machine. +* CPU: ARM Cortex-A53 (the CPU on Raspberry Pi 3) or better +* At least 8GB of storage +* 2GB of RAM + * PhotonVision isn't very RAM intensive, but you'll need at least 2GB to run the OS and PhotonVision. +* The following IO: + * At least 1 USB or MIPI-CSI port for the camera + * Note that we only support using the Raspberry Pi's MIPI-CSI port, other MIPI-CSI ports from other coprocessors may not work. + * Ethernet port for networking + +Coprocessor Recommendations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When selecting a coprocessor, it is important to consider various factors, particularly when it comes to AprilTag detection. Opting for a coprocessor with a more powerful CPU can generally result in higher FPS AprilTag detection, leading to more accurate pose estimation. However, it is important to note that there is a point of diminishing returns, where the benefits of a more powerful CPU may not outweigh the additional cost. Below is a list of supported hardware, along with some notes on each. + +* Orange Pi 5 ($99) + * This is the recommended coprocessor for most teams. It has a powerful CPU that can handle AprilTag detection at high FPS, and is relatively cheap compared to processors of a similar power. +* Raspberry Pi 4/5 ($55-$80) + * This is the recommended coprocessor for teams on a budget. It has a less powerful CPU than the Orange Pi 5, but is still capable of running PhotonVision at a reasonable FPS. +* Mini PCs (such as Beelink N5095) + * This coprocessor will likely have similar performance to the Orange Pi 5 but has a higher performance ceiling (when using more powerful CPUs). Do note that this would require extra effort to wire to the robot / get set up. More information can be found in the set up guide `here. `_ +* Other coprocessors can be used but may require some extra work / command line usage in order to get it working properly. + +Choosing a Camera +----------------- + +PhotonVision works with Pi Cameras and most USB Cameras, the recommendations below are known to be working and have been tested. Other cameras such as webcams, virtual cameras, etc. are not officially supported and may not work. It is important to note that fisheye cameras should only be used as a driver camera and not for detecting targets. + +PhotonVision relies on `CSCore `_ to detect and process cameras, so camera support is determined based off compatibility with CScore along with native support for the camera within your OS (ex. `V4L compatibility `_ if using a Linux machine like a Raspberry Pi). + +.. note:: + Logitech Cameras and integrated laptop cameras will not work with PhotonVision due to oddities with their drivers. We recommend using a different camera. + +.. note:: + We do not currently support the usage of two of the same camera on the same coprocessor. You can only use two or more cameras if they are of different models or they are from Arducam, which has a `tool that allows for cameras to be renamed `_. + +Recommended Cameras +^^^^^^^^^^^^^^^^^^^ +For colored shape detection, any non-fisheye camera supported by PhotonVision will work. We recommend the Pi Camera V1 or a high fps USB camera. + +For driver camera, we recommend a USB camera with a fisheye lens, so your driver can see more of the field. + +For AprilTag detection, we recommend you use a global shutter camera that has ~100 degree diagonal FOV. This will allow you to see more AprilTags in frame, and will allow for more accurate pose estimation. You also want a camera that supports high FPS, as this will allow you to update your pose estimator at a higher frequency. + +* Recommendations For AprilTag Detection + * Arducam USB OV9281 + * This is the recommended camera for AprilTag detection as it is a high FPS, global shutter camera USB camera that has a ~70 degree FOV. + * Innomaker OV9281 + * Spinel AR0144 + * Pi Camera Module V1 + * The V1 is strongly preferred over the V2 due to the V2 having undesirable FOV choices + +AprilTags and Motion Blur +^^^^^^^^^^^^^^^^^^^^^^^^^ +When detecting AprilTags, you want to reduce the "motion blur" as much as possible. Motion blur is the visual streaking/smearing on the camera stream as a result of movement of the camera or object of focus. You want to mitigate this as much as possible because your robot is constantly moving and you want to be able to read as many tags as you possibly can. The possible solutions to this include: + +1. Cranking your exposure as low as it goes and increasing your gain/brightness. This will decrease the effects of motion blur and increase FPS. +2. Using a global shutter (as opposed to rolling shutter) camera. This should eliminate most, if not all motion blur. +3. Only rely on tags when not moving. + +.. image:: images/motionblur.gif + :align: center + +Using Multiple Cameras +^^^^^^^^^^^^^^^^^^^^^^ + +Using multiple cameras on your robot will help you detect more AprilTags at once and improve your pose estimation as a result. In order to use multiple cameras, you will need to create multiple PhotonPoseEstimators and add all of their measurements to a single drivetrain pose estimator. Please note that the accuracy of your robot to camera transform is especially important when using multiple cameras as any error in the transform will cause your pose estimations to "fight" each other. For more information, see :ref:`the programming reference. `. + + +Performance Matrix +------------------ + +.. raw:: html + + + + + + + +Please submit performance data to be added to the matrix here: + +.. raw:: html + + + + + + diff --git a/docs/source/docs/installation/images/gh_actions_1.png b/docs/source/docs/installation/images/gh_actions_1.png new file mode 100644 index 0000000000..babf9ea56e Binary files /dev/null and b/docs/source/docs/installation/images/gh_actions_1.png differ diff --git a/docs/source/docs/installation/images/gh_actions_2.png b/docs/source/docs/installation/images/gh_actions_2.png new file mode 100644 index 0000000000..f2fcfe8233 Binary files /dev/null and b/docs/source/docs/installation/images/gh_actions_2.png differ diff --git a/docs/source/docs/installation/images/gh_actions_3.png b/docs/source/docs/installation/images/gh_actions_3.png new file mode 100644 index 0000000000..18ae952525 Binary files /dev/null and b/docs/source/docs/installation/images/gh_actions_3.png differ diff --git a/docs/source/docs/installation/images/networking-diagram.png b/docs/source/docs/installation/images/networking-diagram.png new file mode 100644 index 0000000000..dcc57fe080 Binary files /dev/null and b/docs/source/docs/installation/images/networking-diagram.png differ diff --git a/docs/source/docs/installation/images/pololu-diagram.png b/docs/source/docs/installation/images/pololu-diagram.png new file mode 100644 index 0000000000..74b7b6f07a Binary files /dev/null and b/docs/source/docs/installation/images/pololu-diagram.png differ diff --git a/docs/source/docs/installation/images/release-page.png b/docs/source/docs/installation/images/release-page.png new file mode 100644 index 0000000000..478d9a8918 Binary files /dev/null and b/docs/source/docs/installation/images/release-page.png differ diff --git a/docs/source/docs/installation/images/static.png b/docs/source/docs/installation/images/static.png new file mode 100644 index 0000000000..b5e66e4ec2 Binary files /dev/null and b/docs/source/docs/installation/images/static.png differ diff --git a/docs/source/docs/installation/index.rst b/docs/source/docs/installation/index.rst new file mode 100644 index 0000000000..3039ceb37e --- /dev/null +++ b/docs/source/docs/installation/index.rst @@ -0,0 +1,40 @@ +Installation & Setup +==================== + +This page will help you install PhotonVision on your coprocessor, wire it, and properly setup the networking in order to start tracking targets. + + +Step 1: Software Install +------------------------ + +This section will walk you through how to install PhotonVision on your coprocessor. Your coprocessor is the device that has the camera and you are using to detect targets (ex. if you are using a Limelight / Raspberry Pi, that is your coprocessor and you should follow those instructions). + +.. warning:: You only need to install PhotonVision on the coprocessor/device that is being used to detect targets, you do NOT need to install it on the device you use to view the webdashboard. All you need to view the webdashboard is for a device to be on the same network as your vision coprocessor and an internet browser. + +.. toctree:: + :maxdepth: 3 + + sw_install/index + updating + + +Step 2: Wiring +-------------- + +This section will walk you through how to wire your coprocessor to get power. + +.. toctree:: + :maxdepth: 1 + + wiring + + +Step 3: Networking +------------------ + +This section will walk you though how to connect your coprocessor to a network. This section is very important (and easy to get wrong), so we recommend you read it thoroughly. + +.. toctree:: + :maxdepth: 1 + + networking diff --git a/docs/source/docs/installation/networking.rst b/docs/source/docs/installation/networking.rst new file mode 100644 index 0000000000..1d2ec305ba --- /dev/null +++ b/docs/source/docs/installation/networking.rst @@ -0,0 +1,57 @@ +Networking +========== + +Physical Networking +------------------- +.. note:: When using PhotonVision off robot, you *MUST* plug the coprocessor into a physical router/radio. You can then connect your laptop/device used to view the webdashboard to the same network. Any other networking setup will not work and will not be supported in any capacity. + +After imaging your coprocessor, run an ethernet cable from your coprocessor to a router/radio and power on your coprocessor by plugging it into the wall. Then connect whatever device you're using to view the webdashboard to the same network and navigate to photonvision.local:5800. + +PhotonVision *STRONGLY* recommends the usage of a network switch on your robot. This is because the second radio port on the current FRC radios is known to be buggy and cause frequent connection issues that are detrimental during competition. An in-depth guide on how to install a network switch can be found `on FRC 900's website `_. + + +.. image:: images/networking-diagram.png + :alt: Correctly set static IP + +Digital Networking +------------------ +PhotonVision *STRONGLY* recommends the usage of Static IPs as it increases reliability on the field and when using PhotonVision in general. To properly set up your static IP, follow the steps below: + +.. warning:: Only use a static IP when connected to the **robot radio**, and never when testing at home, unless you are well versed in networking or have the relevant "know how". + +1. Ensure your robot is on and you are connected to the robot network. +2. Navigate to ``photonvision.local:5800`` (this may be different if you are using a Gloworm / Limelight) in your browser. +3. Open the settings tab on the left pane. +4. Under the Networking section, set your team number. +5. Change your IP to Static. +6. Set your coprocessor's IP address to “10.TE.AM.11”. More information on IP format can be found `here `_. + +7. Click the “Save” button. +8. Set your roboRIO to the following static IP address: “10.TE.AM.2”. This can be done via the `roboRIO web dashboard `_. + +Power-cycle your robot and then you will now be access the PhotonVision dashboard at ``10.TE.AM.11:5800``. + +.. image:: images/static.png + :alt: Correctly set static IP + +Port Forwarding +--------------- + +If you would like to access your Ethernet-connected vision device from a computer when tethered to the USB port on the roboRIO, you can use `WPILib's `_ ``PortForwarder``. + +.. tab-set-code:: + + .. code-block:: java + + PortForwarder.add(5800, "photonvision.local", 5800); + + .. code-block:: C++ + + wpi::PortForwarder::GetInstance().Add(5800, "photonvision.local", 5800); + +.. note:: The address in the code above (``photonvision.local``) is the hostname of the coprocessor. This can be different depending on your hardware, and can be checked in the settings tab under "hostname". + +Camera Stream Ports +------------------- + +The camera streams start at they begin at 1181 with two ports for each camera (ex. 1181 and 1182 for camera one, 1183 and 1184 for camera two, etc.). The easiest way to identify the port of the camera that you want is by double clicking on the stream, which opens it in a separate page. The port will be listed below the stream. diff --git a/docs/source/docs/installation/sw_install/advanced-cmd.rst b/docs/source/docs/installation/sw_install/advanced-cmd.rst new file mode 100644 index 0000000000..a19371cf38 --- /dev/null +++ b/docs/source/docs/installation/sw_install/advanced-cmd.rst @@ -0,0 +1,53 @@ +Advanced Command Line Usage +=========================== +PhotonVision exposes some command line options which may be useful for customizing execution on Debian-based installations. + +Running a JAR File +------------------ +Assuming ``java`` has been installed, and the appropriate environment variables have been set upon installation (a package manager like ``apt`` should automatically set these), you can use ``java -jar`` to run a JAR file. If you downloaded the latest stable JAR of PhotonVision from the `GitHub releases page `_, you can run the following to start the program: + +.. code-block:: bash + + java -jar /path/to/photonvision/photonvision.jar + +Updating a JAR File +------------------- +When you need to update your JAR file, run the following: + +.. code-block:: bash + + wget https://git.io/JqkQ9 -O update.sh + sudo chmod +x update.sh + sudo ./update.sh + sudo reboot now + +Creating a ``systemd`` Service +------------------------------ +You can also create a systemd service that will automatically run on startup. To do so, first navigate to ``/lib/systemd/system``. Create a file called ``photonvision.service`` (or name it whatever you want) using ``touch photonvision.service``. Then open this file in the editor of your choice and paste the following text: + +.. code-block:: + + [Unit] + Description=Service that runs PhotonVision + + [Service] + WorkingDirectory=/path/to/photonvision + # Optional: run photonvision at "nice" -10, which is higher priority than standard + # Nice=-10 + ExecStart=/usr/bin/java -jar /path/to/photonvision/photonvision.jar + + [Install] + WantedBy=multi-user.target + +Then copy the ``.service`` file to ``/etc/systemd/system/`` using ``cp photonvision.service /etc/systemd/system/photonvision.service``. Then modify the file to have ``644`` permissions using ``chmod 644 /etc/systemd/system/photonvision.service``. + +.. note:: + Many ARM processors have a big.LITTLE architecture where some of the CPU cores are more powerful than others. On this type of architecture, you may get more consistent performance by limiting which cores PhotonVision can use. To do this, add the parameter ``AllowedCPUs`` to the systemd service file in the ``[Service]`` section. + + For instance, for an Orange Pi 5, cores 4 through 7 are the fast ones, and you can target those cores with the line ``AllowedCPUs=4-7``. + +Installing the ``systemd`` Service +---------------------------------- +To install the service, simply run ``systemctl enable photonvision.service``. + +.. note:: It is recommended to reload configurations by running ``systemctl daemon-reload``. diff --git a/docs/source/docs/installation/sw_install/files/Limelight2+/hardwareConfig.json b/docs/source/docs/installation/sw_install/files/Limelight2+/hardwareConfig.json new file mode 100644 index 0000000000..5465d0d060 --- /dev/null +++ b/docs/source/docs/installation/sw_install/files/Limelight2+/hardwareConfig.json @@ -0,0 +1,9 @@ +{ + "deviceName" : "Limelight 2+", + "supportURL" : "https://limelightvision.io", + "ledPins" : [ 13, 18 ], + "ledsCanDim" : true, + "ledPWMRange" : [ 0, 100 ], + "ledPWMFrequency" : 30000, + "vendorFOV" : 75.76079874010732 +} diff --git a/docs/source/docs/installation/sw_install/files/Limelight2/hardwareConfig.json b/docs/source/docs/installation/sw_install/files/Limelight2/hardwareConfig.json new file mode 100644 index 0000000000..b38176e119 --- /dev/null +++ b/docs/source/docs/installation/sw_install/files/Limelight2/hardwareConfig.json @@ -0,0 +1,7 @@ +{ + "deviceName" : "Limelight 2", + "supportURL" : "https://limelightvision.io", + "ledPins" : [ 17, 18 ], + "ledsCanDim" : false, + "vendorFOV" : 75.76079874010732 +} diff --git a/docs/source/docs/installation/sw_install/gloworm.rst b/docs/source/docs/installation/sw_install/gloworm.rst new file mode 100644 index 0000000000..2dfc3454ab --- /dev/null +++ b/docs/source/docs/installation/sw_install/gloworm.rst @@ -0,0 +1,59 @@ +Gloworm Installation +==================== +While not currently in production, PhotonVision still supports Gloworm vision processing cameras. + +Downloading the Gloworm Image +----------------------------- +Download the latest `Gloworm/Limelight release of PhotonVision `_; the image will be suffixed with "image_limelight2.xz". You do not need to extract the downloaded archive. + +Flashing the Gloworm Image +-------------------------- +Plug a USB C cable from your computer into the USB C port on Gloworm labeled with a download icon. + +Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto the coprocessor. + +Run BalenaEtcher as an administrator. Select the downloaded ``.zip`` file. + +Select the compute module. If it doesn't show up after 30s try using another USB port, initialization may take a while. If prompted, install the recommended missing drivers. + +Hit flash. Wait for flashing to complete, then disconnect your USB C cable. + +.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Gloworm. Updating to 1.18.11 will fix this issue. + +Final Steps +----------- +Power your device per its documentation and connect it to a robot network. + +You should be able to locate the camera at ``http://photonvision.local:5800/`` in your browser on your computer when connected to the robot. + +Troubleshooting/Setting a Static IP +----------------------------------- +A static IP address may be used as an alternative to the mDNS ``photonvision.local`` address. + +Download and run `Angry IP Scanner `_ to find PhotonVision/your coprocessor on your network. + +.. image:: images/angryIP.png + +Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. ` + +Updating PhotonVision +--------------------- +Download the latest stable .jar from `the releases page `_, go to the settings tab, and upload the .jar using the Offline Update button. + +.. note:: If you are updating PhotonVision on a Gloworm/Limelight, download the LinuxArm64 .jar file. + +As an alternative option - Export your settings, reimage your coprocessor using the instructions above, and import your settings back in. + +Hardware Troubleshooting +------------------------ +To turn the LED lights off or on you need to modify the ``ledMode`` network tables entry or the ``camera.setLED`` of PhotonLib. + + +Support Links +------------- + +* `Website/Documentation `__ (Note: Gloworm is no longer in production) + +* `Image `__ + +* `Discord `__ diff --git a/docs/source/docs/installation/sw_install/images/angryIP.png b/docs/source/docs/installation/sw_install/images/angryIP.png new file mode 100644 index 0000000000..20247d2272 Binary files /dev/null and b/docs/source/docs/installation/sw_install/images/angryIP.png differ diff --git a/docs/source/docs/installation/sw_install/images/nano.png b/docs/source/docs/installation/sw_install/images/nano.png new file mode 100644 index 0000000000..b5ba54b1a3 Binary files /dev/null and b/docs/source/docs/installation/sw_install/images/nano.png differ diff --git a/docs/source/docs/installation/sw_install/index.rst b/docs/source/docs/installation/sw_install/index.rst new file mode 100644 index 0000000000..b9503adc41 --- /dev/null +++ b/docs/source/docs/installation/sw_install/index.rst @@ -0,0 +1,34 @@ +Software Installation +===================== + +Supported Coprocessors +---------------------- + +.. toctree:: + :maxdepth: 1 + + raspberry-pi + limelight + orange-pi + snakeyes + +Desktop Environments +---------------------- + +.. toctree:: + :maxdepth: 1 + + windows-pc + linux-pc + mac-os + +Other +----- + +.. toctree:: + :maxdepth: 1 + + other-coprocessors + advanced-cmd + romi + gloworm diff --git a/docs/source/docs/installation/sw_install/limelight.rst b/docs/source/docs/installation/sw_install/limelight.rst new file mode 100644 index 0000000000..686dc1044b --- /dev/null +++ b/docs/source/docs/installation/sw_install/limelight.rst @@ -0,0 +1,25 @@ +Limelight Installation +====================== + +Imaging +------- +Limelight imaging is a very similar process to Gloworm, but with extra steps. + + +Base Install Steps +^^^^^^^^^^^^^^^^^^ +Due to the similarities in hardware, follow the :ref:`Gloworm install instructions `. + + +Hardware-Specific Steps +----------------------- +Download the hardwareConfig.json file for the version of your Limelight: + +- :download:`Limelight Version 2 `. +- :download:`Limelight Version 2+ `. + +.. note:: No hardware config is provided for the Limelight 3 as AprilTags do not require the LEDs (meaning nobody has reverse-engineered what I/O pins drive the LEDs) and the camera FOV is determined as part of calibration. + +:ref:`Import the hardwareConfig.json file `. Again, this is **REQUIRED** or target measurements will be incorrect, and LEDs will not work. + +After installation you should be able to `locate the camera `_ at: ``http://photonvision.local:5800/`` (not ``gloworm.local``, as previously) diff --git a/docs/source/docs/installation/sw_install/linux-pc.rst b/docs/source/docs/installation/sw_install/linux-pc.rst new file mode 100644 index 0000000000..2899787547 --- /dev/null +++ b/docs/source/docs/installation/sw_install/linux-pc.rst @@ -0,0 +1,41 @@ +Linux PC Installation +===================== +PhotonVision may be run on a Debian-based Linux Desktop PC for basic testing and evaluation. + +.. note:: You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). + +Installing Java +--------------- +PhotonVision requires a JDK installed and on the system path. JDK 11 is needed (different versions will not work). If you don't have JDK 11 already, run the following to install it: + +.. code-block:: + + $ sudo apt-get install openjdk-11-jdk + +.. warning:: Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. + +Downloading the Latest Stable Release of PhotonVision +----------------------------------------------------- +Go to the `GitHub releases page `_ and download the relevant .jar file for your coprocessor. + +.. note:: + If your coprocessor has a 64 bit ARM based CPU architecture (OrangePi, Raspberry Pi, etc.), download the LinuxArm64.jar file. + + If your coprocessor has an 64 bit x86 based CPU architecture (Mini PC, laptop, etc.), download the Linuxx64.jar file. + + +.. warning:: Be careful to pick the latest stable release. "Draft" or "Pre-Release" versions are not stable and often have bugs. + +Running PhotonVision +-------------------- +To run PhotonVision, open a terminal window of your choice and run the following command: + +.. code-block:: + + $ java -jar /path/to/photonvision/photonvision-xxx.jar + +If your computer has a compatible webcam connected, PhotonVision should startup without any error messages. If there are error messages, your webcam isn't supported or another issue has occurred. If it is the latter, please open an issue on the `PhotonVision issues page `_. + +Accessing the PhotonVision Interface +------------------------------------ +Once the Java backend is up and running, you can access the main vision interface by navigating to ``localhost:5800`` inside your browser. diff --git a/docs/source/docs/installation/sw_install/mac-os.rst b/docs/source/docs/installation/sw_install/mac-os.rst new file mode 100644 index 0000000000..cdb19e5b04 --- /dev/null +++ b/docs/source/docs/installation/sw_install/mac-os.rst @@ -0,0 +1,41 @@ +Mac OS Installation +=================== + +.. warning:: Due to current `cscore `_ restrictions, the PhotonVision server backend may have issues running macOS. + +.. note:: You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). + +VERY Limited macOS support is available. + +Installing Java +--------------- +PhotonVision requires a JDK installed and on the system path. JDK 11 is needed (different versions will not work). You may already have this if you have installed WPILib. If not, `download and install it from here `_. + +.. warning:: Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. + +Downloading the Latest Stable Release of PhotonVision +----------------------------------------------------- +Go to the `GitHub releases page `_ and download the relevant .jar file for your coprocessor. + +.. note:: + If you have an M1/M2 Mac, download the macarm64.jar file. + + If you have an Intel based Mac, download the macx64.jar file. + +.. warning:: Be careful to pick the latest stable release. "Draft" or "Pre-Release" versions are not stable and often have bugs. + +Running PhotonVision +-------------------- +To run PhotonVision, open a terminal window of your choice and run the following command: + +.. code-block:: + + $ java -jar /path/to/photonvision/photonvision-xxx.jar + +.. warning:: Due to current `cscore `_ restrictions, the PhotonVision using test mode is all that is known to work currently. + +Accessing the PhotonVision Interface +------------------------------------ +Once the Java backend is up and running, you can access the main vision interface by navigating to ``localhost:5800`` inside your browser. + +.. warning:: Due to current `cscore `_ restrictions, it is unlikely any streams will open from real webcams. diff --git a/docs/source/docs/installation/sw_install/orange-pi.rst b/docs/source/docs/installation/sw_install/orange-pi.rst new file mode 100644 index 0000000000..c99805c1f7 --- /dev/null +++ b/docs/source/docs/installation/sw_install/orange-pi.rst @@ -0,0 +1,37 @@ +Orange Pi Installation +====================== + +Downloading Linux Image +----------------------- + +Starting in 2024, PhotonVision provides pre-configured system images for Orange Pi 5 devices. Download the latest release of the PhotonVision Orange Pi 5 image (.xz file suffixed with ``orangepi5.xz``) from the `releases page `_. You do not need to extract the downloaded archive file. This image is configured with a ``pi`` user with password ``raspberry``. + +For an Orange Pi 4, download the latest release of the Armbian Bullseye CLI image from `here `_. + +Flashing the Pi Image +--------------------- +An 8GB or larger SD card is recommended. + +Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto a Orange Pi. Select the downloaded image file, select your microSD card, and flash. + +For more detailed instructions on using Etcher, please see the `Etcher website `_. + +.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Orange Pi. Updating to 1.18.11 will fix this issue. + +Alternatively, you can use the `Raspberry Pi Imager `_ to flash the image. + +Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. + +.. note:: If you are working on Linux, "dd" can be used in the command line to flash an image. + +If you're using an Orange Pi 5, that's it! Orange Pi 4 users will need to install PhotonVision (see below). + +Initial User Setup (Orange Pi 4 Only) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Insert the flashed microSD card into your Orange Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. + +Plug your Orange Pi into a display via HDMI and plug in a keyboard via USB once its powered up. For an Orange Pi 4, complete the initial set up which involves creating a root password and adding a user, as well as setting localization language. Additionally, choose “bash” when prompted. + +Installing PhotonVision (Orange Pi 4 Only) +------------------------------------------ +From here, you can follow :ref:`this guide `. diff --git a/docs/source/docs/installation/sw_install/other-coprocessors.rst b/docs/source/docs/installation/sw_install/other-coprocessors.rst new file mode 100644 index 0000000000..27125343a4 --- /dev/null +++ b/docs/source/docs/installation/sw_install/other-coprocessors.rst @@ -0,0 +1,36 @@ +Other Debian-Based Co-Processor Installation +============================================ + +.. warning:: Working with unsupported coprocessors requires some level of "know how" of your target system. The install script has only been tested on Debian/Raspberry Pi OS Buster and Ubuntu Bionic. If any issues arise with your specific OS, please open an issue on our `issues page `_. + +.. note:: We'd love to have your input! If you get PhotonVision working on another coprocessor, consider documenting your steps and submitting a `docs issue `_., `pull request `_ , or `ping us on Discord `_. For example, Limelight and Romi install instructions came about because someone spent the time to figure it out, and did a writeup. + +Installing PhotonVision +----------------------- + +We provide an `install script `_ for other Debian-based systems (with ``apt``) that will automatically install PhotonVision and make sure that it runs on startup. + +.. code-block:: bash + + $ wget https://git.io/JJrEP -O install.sh + $ sudo chmod +x install.sh + $ sudo ./install.sh + $ sudo reboot now + +.. note:: Your co-processor will require an Internet connection for this process to work correctly. + +For installation on any other co-processors, we recommend reading the :ref:`advanced command line documentation `. + +Updating PhotonVision +--------------------- + +PhotonVision can be updated by downloading the latest jar file, copying it onto the processor, and restarting the service. + +For example, from another computer, run the following commands. Substitute the correct username for "[user]" (e.g. Raspberry Pi uses "pi", Orange Pi uses "orangepi".) + +.. code-block:: bash + + $ scp [jar name].jar [user]@photonvision.local:~/ + $ ssh [user]@photonvision.local + $ sudo mv [jar name].jar /opt/photonvision/photonvision.jar + $ sudo systemctl restart photonvision.service diff --git a/docs/source/docs/installation/sw_install/raspberry-pi.rst b/docs/source/docs/installation/sw_install/raspberry-pi.rst new file mode 100644 index 0000000000..7dd617400c --- /dev/null +++ b/docs/source/docs/installation/sw_install/raspberry-pi.rst @@ -0,0 +1,46 @@ +Raspberry Pi Installation +========================= +A Pre-Built Raspberry Pi image is available for ease of installation. + +Downloading the Pi Image +------------------------ +Download the latest release of the PhotonVision Raspberry image (.xz file) from the `releases page `_. You do not need to extract the downloaded ZIP file. + +.. note:: Make sure you download the image that ends in '-RasberryPi.xz'. + +Flashing the Pi Image +--------------------- +An 8GB or larger card is recommended. + +Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto a Raspberry Pi. Select the downloaded ``.tar.xz`` file, select your microSD card, and flash. + +For more detailed instructions on using Etcher, please see the `Etcher website `_. + +.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Raspberry Pi. Updating to 1.18.11 will fix this issue. + +Alternatively, you can use the `Raspberry Pi Imager `_ to flash the image. + +Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. + +If you are using a non-standard Pi Camera connected to the CSI port, :ref:`additional configuration may be required. ` + +Final Steps +----------- +Insert the flashed microSD card into your Raspberry Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. + +After the initial setup process, your Raspberry Pi should be configured for PhotonVision. You can verify this by making sure your Raspberry Pi and computer are connected to the same network and navigating to ``http://photonvision.local:5800`` in your browser on your computer. + +Troubleshooting/Setting a Static IP +----------------------------------- +A static IP address may be used as an alternative to the mDNS ``photonvision.local`` address. + +Download and run `Angry IP Scanner `_ to find PhotonVision/your coprocessor on your network. + +.. image:: images/angryIP.png + +Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. ` + +Updating PhotonVision +--------------------- + +To upgrade a Raspberry Pi device with PhotonVision already installed, follow the :ref:`Raspberry Pi update instructions`. diff --git a/docs/source/docs/installation/sw_install/romi.rst b/docs/source/docs/installation/sw_install/romi.rst new file mode 100644 index 0000000000..55e16c2f39 --- /dev/null +++ b/docs/source/docs/installation/sw_install/romi.rst @@ -0,0 +1,21 @@ +Romi Installation +================= + +The `Romi `_ is a small robot that can be controlled with the WPILib software. The main controller is a Raspberry Pi that must be imaged with `WPILibPi `_ . + +Installation +------------ + +The WPILibPi image includes FRCVision, which reserves USB cameras; to use PhotonVision, we need to edit the `/home/pi/runCamera` script to disable it. First we will need to make the file system writeable; the easiest way to do this is to go to ``10.0.0.2`` and choose "Writable" at the top. + +SSH into the Raspberry Pi (using Windows command line, or a tool like `Putty `_ ) at the Romi's default address ``10.0.0.2``. The default user is ``pi``, and the password is ``raspberry``. + +Follow the process for installing PhotonVision on :ref:`"Other Debian-Based Co-Processor Installation" `. As it mentions this will require an internet connection so plugging into the ethernet jack on the Raspberry Pi will be the easiest solution. The pi must remain writable! + +Next, from the SSH terminal, run ``sudo nano /home/pi/runCamera`` then arrow down to the start of the exec line and press "Enter" to add a new line. Then add ``#`` before the exec command to comment it out. Then, arrow up to the new line and type ``sleep 10000``. Hit "Ctrl + O" and then "Enter" to save the file. Finally press "Ctrl + X" to exit nano. Now, reboot the Romi by typing ``sudo reboot``. + +.. image:: images/nano.png + +After it reboots, you should be able to `locate the PhotonVision UI `_ at: ``http://10.0.0.2:5800/``. + +.. warning:: In order for settings, logs, etc. to be saved / take effect, ensure that PhotonVision is in writable mode. diff --git a/docs/source/docs/installation/sw_install/snakeyes.rst b/docs/source/docs/installation/sw_install/snakeyes.rst new file mode 100644 index 0000000000..971a32f5d1 --- /dev/null +++ b/docs/source/docs/installation/sw_install/snakeyes.rst @@ -0,0 +1,56 @@ +SnakeEyes Installation +====================== +A Pre-Built Raspberry Pi image with configuration for `the SnakeEyes Raspberry Pi Hat `_ is available for ease of setup. + +Downloading the SnakeEyes Image +------------------------------- +Download the latest release of the SnakeEyes-specific PhotonVision Pi image from the `releases page `_. You do not need to extract the downloaded ZIP file. + +Flashing the SnakeEyes Image +---------------------------- +An 8GB or larger card is recommended. + +Use the 1.18.11 version of `Balena Etcher `_ to flash an image onto a Raspberry Pi. Select the downloaded ``.zip`` file, select your microSD card, and flash. + +For more detailed instructions on using Etcher, please see the `Etcher website `_. + +.. warning:: Using a version of Balena Etcher older than 1.18.11 may cause bootlooping (the system will repeatedly boot and restart) when imaging your Raspberry Pi. Updating to 1.18.11 will fix this issue. + +Alternatively, you can use the `Raspberry Pi Imager `_ to flash the image. + +Select "Choose OS" and then "Use custom" to select the downloaded image file. Select your microSD card and flash. + +Final Steps +----------- +Insert the flashed microSD card into your Raspberry Pi and boot it up. The first boot may take a few minutes as the Pi expands the filesystem. Be sure not to unplug during this process. + +After the initial setup process, your Raspberry Pi should be configured for PhotonVision. You can verify this by making sure your Raspberry Pi and computer are connected to the same network and navigating to ``http://photonvision.local:5800`` in your browser on your computer. + +Troubleshooting/Setting a Static IP +----------------------------------- +A static IP address may be used as an alternative to the mDNS ``photonvision.local`` address. + +Download and run `Angry IP Scanner `_ to find PhotonVision/your coprocessor on your network. + +.. image:: images/angryIP.png + +Once you find it, set the IP to a desired :ref:`static IP in PhotonVision. ` + +Updating PhotonVision +---------------------- +Download the latest xxxxx-LinuxArm64.jar from `our releases page `_, go to the settings tab, and upload the .jar using the Offline Update button. + +As an alternative option - Export your settings, reimage your coprocessor using the instructions above, and import your settings back in. + +Hardware Troubleshooting +------------------------ +To turn the LED lights off or on you need to modify the ``ledMode`` network tables entry or the ``camera.setLED`` of PhotonLib. + +Support Links +------------- + +* `Website `__ + +* `Image `__ + +* `Documentation `__ diff --git a/docs/source/docs/installation/sw_install/windows-pc.rst b/docs/source/docs/installation/sw_install/windows-pc.rst new file mode 100644 index 0000000000..33dda46899 --- /dev/null +++ b/docs/source/docs/installation/sw_install/windows-pc.rst @@ -0,0 +1,35 @@ +Windows PC Installation +======================= +PhotonVision may be run on a Windows Desktop PC for basic testing and evaluation. + +.. note:: You do not need to install PhotonVision on a Windows PC in order to access the webdashboard (assuming you are using an external coprocessor like a Raspberry Pi). + +Install Bonjour +--------------- +Bonjour provides more stable networking when using Windows PCs. Install `Bonjour here `_ before continuing to ensure a stable experience while using PhotonVision. + +Installing Java +--------------- +PhotonVision requires a JDK installed and on the system path. **JDK 11 is needed** (different versions will not work). You may already have this if you have installed WPILib, but ensure that running ``java -version`` shows JDK 11. If not, `download and install it from here `_ and ensure that the new JDK is being used. + +.. warning:: Using a JDK other than JDK11 will cause issues when running PhotonVision and is not supported. + +Downloading the Latest Stable Release of PhotonVision +----------------------------------------------------- +Go to the `GitHub releases page `_ and download the winx64.jar file. + +Running PhotonVision +-------------------- +To run PhotonVision, open a terminal window of your choice and run the following command: + +.. code-block:: + + > java -jar C:\path\to\photonvision\NAME OF JAR FILE GOES HERE.jar + +If your computer has a compatible webcam connected, PhotonVision should startup without any error messages. If there are error messages, your webcam isn't supported or another issue has occurred. If it is the latter, please open an issue on the `PhotonVision issues page `_. + +.. warning:: Using an integrated laptop camera may cause issues when trying to run PhotonVision. If you are unable to run PhotonVision on a laptop with an integrated camera, try disabling the camera's driver in Windows Device Manager. + +Accessing the PhotonVision Interface +------------------------------------ +Once the Java backend is up and running, you can access the main vision interface by navigating to ``localhost:5800`` inside your browser. diff --git a/docs/source/docs/installation/updating.rst b/docs/source/docs/installation/updating.rst new file mode 100644 index 0000000000..f5a98fbec1 --- /dev/null +++ b/docs/source/docs/installation/updating.rst @@ -0,0 +1,49 @@ +Updating PhotonVision +===================== + +PhotonVision provides many different files on a single release page. Each release contains JAR files for performing "offline updates" of a device with PhotonVision already installed, as well as full image files to "flash" to supported coprocessors. + +.. image:: images/release-page.png + :alt: Example GitHub release page + +In the example release above, we see: + +- Image files for flashing directly to supported coprocessors. + + - Raspberry Pi 3/4/5/CM4: follow our :ref:`Raspberry Pi flashing instructions`. + - For LimeLight devices: follow our :ref:`LimeLight flashing instructions`. + - For Orange Pi 5 devices: follow our :ref:`Orange Pi flashing instructions`. + +- JAR files for the suite of supported operating systems for use with Offline Update. In general: + + - Raspberry Pi, Limelight, and Orange Pi: use images suffixed with -linuxarm64.jar. For example: :code:`photonvision-v2024.1.1-linuxarm64.jar` + - Beelink and other Intel/AMD-based Mini-PCs: use images suffixed with -linuxx64.jar. For example: :code:`photonvision-v2024.1.1-linuxx64.jar` + +Offline Update +-------------- + +Unless noted in the release page, an offline update allows you to quickly upgrade the version of PhotonVision running on a coprocessor with PhotonVision already installed on it. + +Unless otherwise noted on the release page, config files should be backward compatible with previous version of PhotonVision, and this offline update process should preserve any pipelines and calibrations previously performed. For paranoia, we suggest exporting settings from the Settings tab prior to performing an offline update. + +.. note:: Carefully review release notes to ensure that reflashing the device (for supported devices) or other installation steps are not required, as dependencies needed for PhotonVision may change between releases + +Installing Pre-Release Versions +------------------------------- + +Pre-release/development version of PhotonVision can be tested by installing/downloading artifacts from Github Actions (see below), which are built automatically on commits to open pull requests and to PhotonVision's ``master`` branch, or by :ref:`compiling PhotonVision locally `. + +.. warning:: If testing a pre-release version of PhotonVision with a robot, PhotonLib must be updated to match the version downloaded! If not, packet schema definitions may not match and unexpected things will occur. + +Github Actions builds pre-release version of PhotonVision automatically on PRs and on each commit merged to master. To test a particular commit to master, navigate to the `PhotonVision commit list `_ and click on the check mark (below). Scroll to "Build / Build fat JAR - PLATFORM", click details, and then summary. From here, JAR and image files can be downloaded to be flashed or uploaded using "Offline Update". + +.. image:: images/gh_actions_1.png + :alt: Github Actions Badge + +.. image:: images/gh_actions_2.png + :alt: Github Actions artifact list + +Built JAR files (but not image files) can also be downloaded from PRs before they are merged. Navigate to the PR in GitHub, and select Checks at the top. Click on "Build" to display the same artifact list as above. + +.. image:: images/gh_actions_3.png + :alt: Github Actions artifacts from PR diff --git a/docs/source/docs/installation/wiring.rst b/docs/source/docs/installation/wiring.rst new file mode 100644 index 0000000000..3e00f3d0af --- /dev/null +++ b/docs/source/docs/installation/wiring.rst @@ -0,0 +1,42 @@ +Wiring +====== + + +Off-Robot Wiring +---------------- + +Plugging your coprocessor into the wall via a power brick will suffice for off robot wiring. + +.. note:: Please make sure your chosen power supply can provide enough power for your coprocessor. Undervolting (where enough power isn't being supplied) can cause many issues. + + +On-Robot Wiring +--------------- + +.. note:: We recommend users use the `SnakeEyes Pi Hat `_ as it provides passive power over ethernet (POE) and other useful features to simplify wiring and make your life easier. + +Recommended: Coprocessor with Passive POE (Gloworm, Pi with SnakeEyes, Limelight) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. Plug the `passive POE injector `_ into the coprocessor and wire it to PDP/PDH (NOT the VRM). + +2. Add a breaker to relevant slot in your PDP/PDH + +3. Run an ethernet cable from the passive POE injector to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the `networking `_ section for more info.) + +Coprocessor without Passive POE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +1a. Option 1: Get a micro USB (may be USB-C if using a newer Pi) pigtail cable and connect the wire ends to a regulator like `this `_. Then, wire the regulator into your PDP/PDH and the Micro USB / USB C into your coprocessor. + +1b. Option 2: Use a USB power bank to power your coprocessor. Refer to this year's robot rulebook on legal implementations of this. + +2. Run an ethernet cable from your Pi to your network switch / radio (we *STRONGLY* recommend the usage of a network switch, see the `networking `_ section for more info.) + +This diagram shows how to use the recommended regulator to power a coprocessor. + +.. image:: images/pololu-diagram.png + :alt: A flowchart-type diagram showing how to connect wires from the PDP or PDH to the recommended voltage regulator and then a Coprocessor. + +.. note:: The regulator comes with optional screw terminals that may be used to connect the PDP/PDH and Coprocessor power wires if you do not wish to solder them. + +Once you have wired your coprocessor, you are now ready to install PhotonVision. diff --git a/docs/source/docs/integration/advancedStrategies.rst b/docs/source/docs/integration/advancedStrategies.rst new file mode 100644 index 0000000000..a2c46bf4c3 --- /dev/null +++ b/docs/source/docs/integration/advancedStrategies.rst @@ -0,0 +1,43 @@ +Advanced Strategies +=================== + +Advanced strategies for using vision processing results involve working with the robot's *pose* on the field. A *pose* is a combination an X/Y coordinate, and an angle describing where the robot's front is pointed. It is always considered *relative* to some fixed point on the field. + +WPILib provides a `Pose2d `_ class to describe poses in software. + +Knowledge and Equipment Needed +------------------------------ + +- A Coprocessor running PhotonVision + - Accurate camera calibration to support "3D mode" required +- A Drivetrain with wheels and sensors + - Sufficient sensors to measure wheel rotation + - Capable of closed-loop velocity control +- A gyroscope or IMU measuring actual robot heading +- Experience using some path-planning library (WPILib is our recommendation) + +Path Planning in a Target-Centered Reference Frame +-------------------------------------------------- + +When using 3D mode in PhotonVision, the `SolvePNP Algorithm `_ is used to deduce the *camera\'s* position in a 3D coordinate system centered on the target itself. + +A simple algorithm for using this measurement is: + +#. Assume your robot needs to be at a fixed ``Pose2D`` *relative to the target*. +#. When triggered: + #. Read the most recent vision measurement - this is your *actual* pose. + #. Generate a simple trajectory to the goal position + #. Execute the trajectory + +.. note:: There is not currently an example demonstrating this technique. + +Global Pose Estimation +---------------------- + +A more complex way to utilize a camera-supplied ``Pose2D`` is to incorporate it into an estimation of the robot's ``Pose2D`` in a global field reference frame. + +When using this strategy, the measurements made by the camera are *fused* with measurements from other sensors, a model of expected robot behavior, and a matrix of weights that describes how trustworthy each sensor is. The result is a *best-guess* at the current pose on the field. + +In turn, this best-guess position is used to path plan to the known positions on the field, which may or may not have vision targets nearby. + +See the :ref:`Pose Estimation ` example for more information. diff --git a/docs/source/docs/integration/aprilTagStrategies.rst b/docs/source/docs/integration/aprilTagStrategies.rst new file mode 100644 index 0000000000..420baf4c00 --- /dev/null +++ b/docs/source/docs/integration/aprilTagStrategies.rst @@ -0,0 +1,48 @@ +AprilTag Strategies +==================== + +.. note:: The same strategies covered in the simple and advanced strategy sections still apply to AprilTags, and we encourage you to read them first. This page will discuss the specific nuances to using AprilTags. + +Simple Strategies +----------------- + +Prior to the introduction of AprilTags, the most common vision strategy for teams was to use the yaw of the detected target in order to turn to the target, and then score. This is still possible with AprilTags as the yaw of the tag is reported. Similarly, getting the distance to the target via trigonometry will also work. This is discussed in greater detail in the previous page. + +Advanced Strategies +------------------- +AprilTags allows you find the robot pose on the field using data from the tags. A pose is a combination an X/Y coordinate, and an angle describing where the robot’s front is pointed. It is always considered relative to some fixed point on the field. + +Knowledge and Equipment Needed +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Knowledge + +* How to tune an AprilTag Pipeline (found in the pipeline tuning section) + +Equipment + +* A Coprocessor running PhotonVision - Accurate camera calibration to support “3D mode” required + +* A Drivetrain with wheels and sensors (Sufficient sensors to measure wheel rotation and capable of closed-loop velocity control) + +* A gyroscope or IMU measuring actual robot heading + +Global Pose Estimation / Pose Estimation Strategies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. note:: See the previous page for more general information. Most of the information is the same except now the camera is supplying a ``Pose3D``. + +The nature of how AprilTags will be laid out makes it very likely that you will get multiple pose measurements within a single frame from seeing multiple targets. This requires strategies to fuse these observations together and get a "best guess" as to where your robot is. The best way to do this is to use the corners from all visible AprilTags to estimate the robot's pose. This is done by using the ``PhotonPoseEstimator`` class and the "MULTI_TAG_PNP_ON_COPROCESSOR" strategy. Additional strategies include: + +* A camera seeing multiple targets, taking the average of all the returned poses +* A camera seeing one target, with an assumed height off the ground, picking the pose which places it to the assumed height +* A camera seeing one target, and picking a pose most similar to the most recently observed pose +* A camera seeing one target, and picking a pose most similar to one provided externally (ie, from previous loop's odometry) +* A camera seeing one target, and picking the pose with the lowest ambiguity. + +PhotonVision supports all of these different strategies via our ``PhotonPoseEstimator`` class that allows you to select one of the strategies above and get the relevant pose estimation. + +Tuning Pose Estimators +^^^^^^^^^^^^^^^^^^^^^^ + +Coming soon! +TODO: Add this back in once simposeest example is added. diff --git a/docs/source/docs/integration/background.rst b/docs/source/docs/integration/background.rst new file mode 100644 index 0000000000..1615aacea2 --- /dev/null +++ b/docs/source/docs/integration/background.rst @@ -0,0 +1,21 @@ +Vision - Robot Integration Background +===================================== + +Vision Processing's Purpose +--------------------------- + +Each year, the FRC game requires a fundamental operation: **Align the Robot to a Goal**. + +Regardless of whether that alignment point is for picking up gamepieces, or for scoring, fast and effective robots must be able to align to them quickly and repeatably. + +Software strategies can be used to help augment the ability of a human operator, or step in when a human operator is not allowed to control the robot. + +*Vision Processing* is one key *input* to these software strategies. However, the inputs your coprocessor provides must be interpreted and converted (ultimately) to motor voltage commands. + +There are many valid strategies for doing this transformation. Picking a strategy is a balancing act between: + + 1. Available team resources (time, programming skills, previous experience) + 2. Precision of alignment required + 3. Team willingness to take on risk + +Simple strategies are low-risk - they require comparatively little effort to implement and tune, but have hard limits on the complexity of motion they can control on the robot. Advanced methods allow for more complex and precise movement, but take more effort to implement and tune. For this reason, it is more risky to attempt to use them. diff --git a/docs/source/docs/integration/index.rst b/docs/source/docs/integration/index.rst new file mode 100644 index 0000000000..c8b9f8787a --- /dev/null +++ b/docs/source/docs/integration/index.rst @@ -0,0 +1,10 @@ +Robot Integration +================= + +.. toctree:: + :maxdepth: 2 + + background + simpleStrategies + advancedStrategies + aprilTagStrategies diff --git a/docs/source/docs/integration/simpleStrategies.rst b/docs/source/docs/integration/simpleStrategies.rst new file mode 100644 index 0000000000..e6ef936533 --- /dev/null +++ b/docs/source/docs/integration/simpleStrategies.rst @@ -0,0 +1,40 @@ +Simple Strategies +================= + +Simple strategies for using vision processor outputs involve using the target's position in the 2D image to infer *range* and *angle* to the target. + +Knowledge and Equipment Needed +------------------------------ + +- A Coprocessor running PhotonVision +- A Drivetrain with wheels + +Angle Alignment +--------------- + +The simplest way to use a vision processing result is to first determine how far left or right in the image the vision target should be for your robot to be "aligned" to the target. Then, + +1. Read the current angle to the target from the vision Coprocessor. +2. If too far in one direction, command the drivetrain to rotate in the opposite direction to compensate. + +See the :ref:`Aiming at a Target ` example for more information. + +.. note:: Sometimes, these strategies have also involved incorporating a gyroscope. This can be necessary due to the high latency of vision processing algorithms. However, advancements in the tools available (including PhotonVision) has made that unnecessary for most applications. + +Range Alignment +--------------- + +By looking at the position of the target in the "vertical" direction in the image, and applying some trionometery, the distance between the robot and the camera can be deduced. + +1. Read the current distance to the target from the vision coprocessor. +2. If too far in one direction, command the drivetrain to travel in the opposite direction to compensate. + +See the :ref:`Getting in Range of the Target ` example for more information. + + +Angle + Range +------------- + +Since the previous two alignment strategies work on independent axes of the robot, there's no reason you can't do them simultaneously. + +See the :ref:`Aim and Range ` example for more information. diff --git a/docs/source/docs/objectDetection/about-object-detection.rst b/docs/source/docs/objectDetection/about-object-detection.rst new file mode 100644 index 0000000000..f054c2c8be --- /dev/null +++ b/docs/source/docs/objectDetection/about-object-detection.rst @@ -0,0 +1,48 @@ +About Object Detection +====================== + +How does it work? +^^^^^^^^^^^^^^^^^ + +PhotonVision supports object detection using neural network accelerator hardware built into Orange Pi 5/5+ coprocessors. The Neural Processing Unit, or NPU, is `used by PhotonVision `_ to massively accelerate certain math operations like those needed for running ML-based object detection. + +For the 2024 season, PhotonVision ships with a **pre-trained NOTE detector** (shown above), as well as a mechanism for swapping in custom models. Future development will focus on enabling lower friction management of multiple custom models. + +.. image:: images/notes-ui.png + +Tracking Objects +^^^^^^^^^^^^^^^^ + +Before you get started with object detection, ensure that you have followed the previous sections on installation, wiring, and networking. Next, open the Web UI, go to the top right card, and switch to the “Object Detection” type. You should see a screen similar to the image above. + +PhotonVision currently ships with a NOTE detector based on a `YOLOv5 model `_. This model is trained to detect one or more object "classes" (such as cars, stoplights, or in our case, NOTES) in an input image. For each detected object, the model outputs a bounding box around where in the image the object is located, what class the object belongs to, and a unitless confidence between 0 and 1. + +.... note:: This model output means that while its fairly easy to say that "this rectangle probably contains a NOTE", we don't have any information about the NOTE's orientation or location. Further math in user code would be required to make estimates about where an object is physically located relative to the camera. + +Tuning and Filtering +^^^^^^^^^^^^^^^^^^^^ + +Compared to other pipelines, object detection exposes very few tuning handles. The Confidence slider changes the minimum confidence that the model needs to have in a given detection to consider it valid, as a number between 0 and 1 (with 0 meaning completely uncertain and 1 meaning maximally certain). + +.. raw:: html + + + +The same area, aspect ratio, and target orientation/sort parameters from :ref:`reflective pipelines ` are also exposed in the object detection card. + +Training Custom Models +^^^^^^^^^^^^^^^^^^^^^^ + +Coming soon! + +Uploading Custom Models +^^^^^^^^^^^^^^^^^^^^^^^ + +.. warning:: PhotonVision currently ONLY supports YOLOv5 models trained and converted to ``.rknn`` format for RK3588 CPUs! Other models require different post-processing code and will NOT work. The model conversion process is also highly particular. Proceed with care. + +Our `pre-trained NOTE model `_ is automatically extracted from the JAR when PhotonVision starts, only if a file named “note-640-640-yolov5s.rknn” and "labels.txt" does not exist in the folder ``photonvision_config/models/``. This technically allows power users to replace the model and label files with new ones without rebuilding Photon from source and uploading a new JAR. + +Use a program like WinSCP or FileZilla to access your coprocessor's filesystem, and copy the new ``.rknn`` model file into /home/pi. Next, SSH into the coprocessor and ``sudo mv /path/to/new/model.rknn /opt/photonvision/photonvision_config/models/note-640-640-yolov5s.rknn``. Repeat this process with the labels file, which should contain one line per label the model outputs with no training newline. Next, restart PhotonVision via the web UI. diff --git a/docs/source/docs/objectDetection/images/notes-ui.png b/docs/source/docs/objectDetection/images/notes-ui.png new file mode 100644 index 0000000000..9993d5e019 Binary files /dev/null and b/docs/source/docs/objectDetection/images/notes-ui.png differ diff --git a/docs/source/docs/objectDetection/index.rst b/docs/source/docs/objectDetection/index.rst new file mode 100644 index 0000000000..0bb65ca5a0 --- /dev/null +++ b/docs/source/docs/objectDetection/index.rst @@ -0,0 +1,8 @@ +Object Detection +================ + +.. toctree:: + :maxdepth: 0 + :titlesonly: + + about-object-detection diff --git a/docs/source/docs/pipelines/about-pipelines.rst b/docs/source/docs/pipelines/about-pipelines.rst new file mode 100644 index 0000000000..560deff330 --- /dev/null +++ b/docs/source/docs/pipelines/about-pipelines.rst @@ -0,0 +1,52 @@ +:orphan: + +About Pipelines +=============== + +What is a pipeline? +^^^^^^^^^^^^^^^^^^^ + +A vision pipeline represents a series of steps that are used to acquire an image, process it, and analyzing it to find a target. In most FRC games, this means processing an image in order to detect a piece of retroreflective tape or an AprilTag. + +Types of Pipelines +^^^^^^^^^^^^^^^^^^ + +Reflective +---------- + +This is the most common pipeline type and it is based on detecting targets with retroreflective tape. In the contours tab of this pipeline type, you can filter the area, width/height ratio, fullness, degree of speckle rejection. + +Colored Shape +------------- + +This pipeline type is based on detecting different shapes like circles, triangles, quadrilaterals, or a polygon. An example usage would be detecting yellow PowerCells from the 2020 FRC game. You can read more about the specific settings available in the contours page. + +AprilTag / AruCo +---------------- + +This pipeline type is based on detecting AprilTag fiducial markers. More information about AprilTags can be found in the WPILib documentation. While being more performance intensive than the reflective and colored shape pipeline, it has the benefit of providing easy to use 3D pose information which allows localization. + +.. note:: In order to get 3D Pose data about AprilTags, you are required to :ref:`calibrate your camera`. + +Note About Multiple Cameras and Pipelines +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When using more than one camera, it is important to keep in mind that all cameras run one pipeline each, all publish to NT, and all send both streams. This will have a noticeable affect on performance and we recommend users limit themselves to 1-2 cameras per coprocessor. + +Pipeline Steps +^^^^^^^^^^^^^^ +Reflective and Colored Shape Pipelines have 4 steps (represented as 4 tabs): + +1. Input: This tab allows the raw camera image to be modified before it gets processed. Here, you can set exposure, brightness, gain, orientation, and resolution. + +2. Threshold (Only Reflective and Colored Shape): This tabs allows you to filter our specific colors/pixels in your camera stream through HSV tuning. The end goal here is having a black and white image that will only have your target lit up. + +3. Contours: After thresholding, contiguous white pixels are grouped together, and described by a curve that outlines the group. This curve is called a "contour" which represent various targets on your screen. Regardless of type, you can filter how the targets are grouped, their intersection, and how the targets are sorted. Other available filters will change based on different pipeline types. + +4. Output: Now that you have filtered all of your contours, this allows you to manipulate the detected target via orientation, the offset point, and offset. + +AprilTag / AruCo Pipelines have 3 steps: + +1. Input: This is the same as the above. +2. AprilTag: This step include AprilTag specific tuning parameters, such as decimate, blur, threads, pose iterations, and more. +3. Output: This is the same as the above. diff --git a/docs/source/docs/pipelines/images/motionblur.gif b/docs/source/docs/pipelines/images/motionblur.gif new file mode 100644 index 0000000000..e1a6f01177 Binary files /dev/null and b/docs/source/docs/pipelines/images/motionblur.gif differ diff --git a/docs/source/docs/pipelines/images/pipelinetype.png b/docs/source/docs/pipelines/images/pipelinetype.png new file mode 100644 index 0000000000..906be5f644 Binary files /dev/null and b/docs/source/docs/pipelines/images/pipelinetype.png differ diff --git a/docs/source/docs/pipelines/index.rst b/docs/source/docs/pipelines/index.rst new file mode 100644 index 0000000000..cf9f55811b --- /dev/null +++ b/docs/source/docs/pipelines/index.rst @@ -0,0 +1,8 @@ +Pipelines +========= + +.. toctree:: + + about-pipelines + input + output diff --git a/docs/source/docs/pipelines/input.rst b/docs/source/docs/pipelines/input.rst new file mode 100644 index 0000000000..553dbf02f4 --- /dev/null +++ b/docs/source/docs/pipelines/input.rst @@ -0,0 +1,47 @@ +Camera Tuning / Input +===================== + +PhotonVision's "Input" tab contains settings that affect the image captured by the currently selected camera. This includes camera exposure and brightness, as well as resolution and orientation. + +Resolution +---------- + +Resolution changes the resolution of the image captured. While higher resolutions are often more accurate than lower resolutions, they also run at a slower update rate. + +When using the reflective/colored shape pipeline, detection should be run as low of a resolution as possible as you are only trying to detect simple contours (essentially colored blobs). + +When using the AprilTag pipeline, you should try to use as high of a resolution as you can while still maintaining a reasonable FPS measurement. This is because higher resolution allows you to detect tags with higher accuracy and from larger distances. + +Exposure and brightness +----------------------- + +Camera exposure and brightness control how bright the captured image will be, although they function differently. Camera exposure changes how long the camera shutter lets in light, which changes the overall brightness of the captured image. This is in contrast to brightness, which is a post-processing effect that boosts the overall brightness of the image at the cost of desaturating colors (making colors look less distinct). + +.. important:: For all pipelines, exposure time should be set as low as possible while still allowing for the target to be reliably tracked. This allows for faster processing as decreasing exposure will increase your camera FPS. + +For reflective pipelines, after adjusting exposure and brightness, the target should be lit green (or the color of the vision tracking LEDs used). The more distinct the color of the target, the more likely it will be tracked reliably. + +.. note:: Unlike with retroreflective tape, AprilTag tracking is not very dependent on lighting consistency. If you have trouble detecting tags due to low light, you may want to try increasing exposure, but this will likely decrease your achievable framerate. + + +AprilTags and Motion Blur +^^^^^^^^^^^^^^^^^^^^^^^^^ + +For AprilTag pipelines, your goal is to reduce the "motion blur" as much as possible. Motion blur is the visual streaking/smearing on the camera stream as a result of movement of the camera or object of focus. You want to mitigate this as much as possible because your robot is constantly moving and you want to be able to read as many tags as you possibly can. The possible solutions to this include: + +1. Cranking your exposure as low as it goes and increasing your gain/brightness. This will decrease the effects of motion blur and increase FPS. +2. Using a global shutter (as opposed to rolling shutter) camera. This should eliminate most, if not all motion blur. +3. Only rely on tags when not moving. + +.. image:: images/motionblur.gif + :align: center + +Orientation +----------- + +Orientation can be used to rotate the image prior to vision processing. This can be useful for cases where the camera is not oriented parallel to the ground. Do note that this operation can in some cases significantly reduce FPS. + +Stream Resolution +----------------- + +This changes the resolution which is used to stream frames from PhotonVision. This does not change the resolution used to perform vision processing. This is useful to reduce bandwidth consumption on the field. In some high-resolution cases, decreasing stream resolution can increase processing FPS. diff --git a/docs/source/docs/pipelines/output.rst b/docs/source/docs/pipelines/output.rst new file mode 100644 index 0000000000..3ec552c5a9 --- /dev/null +++ b/docs/source/docs/pipelines/output.rst @@ -0,0 +1,25 @@ +Output +====== + +The output card contains sections for target manipulation and offset modes. + +Target Manipulation +------------------- + +In this section, the Target Offset Point changes where the "center" of the target is. This can be useful if the pitch/yaw of the middle of the top edge of the target is desired, rather than the center of mass of the target. The "top"/"bottom"/"left"/"right" of the target are defined by the Target Orientation selection. For example, a 400x200px target in landscape mode would have the "top" offset point located at the middle of the uppermost long edge of the target, while in portrait mode the "top" offset point would be located in the middle of the topmost short edge (in this case, either the left or right sides). + +This section also includes a switch to enable processing and sending multiple targets, up to 5, simultaneously. This information is available through PhotonLib. Note that the :code:`GetPitch`/:code:`GetYaw` methods will report the pitch/yaw of the "best" (lowest indexed) target. + +.. raw:: html + + + +Robot Offset +------------ + +PhotonVision offers both single and dual point offset modes. In single point mode, the "Take Point" button will set the crosshair location to the center of the current "best" target. + +In dual point mode, two snapshots are required. Take one snapshot with the target far away, and the other with the target closer. The position of the crosshair will be linearly interpolated between these two points based on the area of the current "best" target. This might be useful if single point is not accurate across the range of the tracking distance, or for significantly offset cameras. diff --git a/docs/source/docs/programming/index.rst b/docs/source/docs/programming/index.rst new file mode 100644 index 0000000000..38d5fd4f8c --- /dev/null +++ b/docs/source/docs/programming/index.rst @@ -0,0 +1,9 @@ +:orphan: + +Programming Reference +===================== + +.. toctree:: + :maxdepth: 1 + + photonlib/index diff --git a/docs/source/docs/programming/photonlib/adding-vendordep.rst b/docs/source/docs/programming/photonlib/adding-vendordep.rst new file mode 100644 index 0000000000..178ad29c53 --- /dev/null +++ b/docs/source/docs/programming/photonlib/adding-vendordep.rst @@ -0,0 +1,37 @@ +Installing PhotonLib +==================== + +What is PhotonLib? +------------------ +PhotonLib is the C++ and Java vendor dependency that accompanies PhotonVision. We created this vendor dependency to make it easier for teams to retrieve vision data from their integrated vision system. + +PhotonLibPy is a minimal, pure-python implementation of PhotonLib. + +Online Install - Java/C++ +------------------------- +Click on the WPI icon on the top right of your VS Code window or hit Ctrl+Shift+P (Cmd+Shift+P on macOS) to bring up the command palette. Type, "Manage Vendor Libraries" and select the "WPILib: Manage Vendor Libraries" option. Then, select the "Install new library (online)" option. + +.. image:: images/adding-offline-library.png + +Paste the following URL into the box that pops up: + +``https://maven.photonvision.org/repository/internal/org/photonvision/photonlib-json/1.0/photonlib-json-1.0.json`` + +.. note:: It is recommended to Build Robot Code at least once when connected to the Internet before heading to an area where Internet connectivity is limited (for example, a competition). This ensures that the relevant files are downloaded to your filesystem. + +Offline Install - Java/C++ +-------------------------- +This installation option is currently a work-in-progress. For now, we recommend users use the online installation method. + +Install - Python +---------------- +Add photonlibpy to `pyproject.toml`. + +.. code-block:: toml + + # Other pip packages to install + requires = [ + "photonlibpy", + ] + +See `The WPILib/RobotPy docs `_ for more information on using `pyproject.toml.` diff --git a/docs/source/docs/programming/photonlib/controlling-led.rst b/docs/source/docs/programming/photonlib/controlling-led.rst new file mode 100644 index 0000000000..b4a018670f --- /dev/null +++ b/docs/source/docs/programming/photonlib/controlling-led.rst @@ -0,0 +1,14 @@ +Controlling LEDs +================= +You can control the vision LEDs of supported hardware via PhotonLib using the ``setLED()`` method on a ``PhotonCamera`` instance. In Java and C++, an ``VisionLEDMode`` enum class is provided to choose values from. These values include, ``kOff``, ``kOn``, ``kBlink``, and ``kDefault``. ``kDefault`` uses the default LED value from the selected pipeline. + +.. tab-set-code:: + .. code-block:: java + + // Blink the LEDs. + camera.setLED(VisionLEDMode.kBlink); + + .. code-block:: c++ + + // Blink the LEDs. + camera.SetLED(photonlib::VisionLEDMode::kBlink); diff --git a/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.rst b/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.rst new file mode 100644 index 0000000000..14c158d5d2 --- /dev/null +++ b/docs/source/docs/programming/photonlib/driver-mode-pipeline-index.rst @@ -0,0 +1,53 @@ +Driver Mode and Pipeline Index/Latency +====================================== + +After :ref:`creating a PhotonCamera `, one can toggle Driver Mode and change the Pipeline Index of the vision program from robot code. + +Toggle Driver Mode +------------------ +You can use the ``setDriverMode()``/``SetDriverMode()`` (Java and C++ respectively) to toggle driver mode from your robot program. Driver mode is an unfiltered / normal view of the camera to be used while driving the robot. + +.. tab-set-code:: + + .. code-block:: java + + // Set driver mode to on. + camera.setDriverMode(true); + + .. code-block:: C++ + + // Set driver mode to on. + camera.SetDriverMode(true); + +Setting the Pipeline Index +-------------------------- +You can use the ``setPipelineIndex()``/``SetPipelineIndex()`` (Java and C++ respectively) to dynamically change the vision pipeline from your robot program. + +.. tab-set-code:: + + .. code-block:: java + + // Change pipeline to 2 + camera.setPipelineIndex(2); + + .. code-block:: C++ + + // Change pipeline to 2 + camera.SetPipelineIndex(2); + +Getting the Pipeline Latency +---------------------------- +You can also get the pipeline latency from a pipeline result using the ``getLatencyMillis()``/``GetLatency()`` (Java and C++ respectively) methods on a ``PhotonPipelineResult``. + +.. tab-set-code:: + .. code-block:: java + + // Get the pipeline latency. + double latencySeconds = result.getLatencyMillis() / 1000.0; + + .. code-block:: c++ + + // Get the pipeline latency. + units::second_t latency = result.GetLatency(); + +.. note:: The C++ version of PhotonLib returns the latency in a unit container. For more information on the Units library, see `here `_. diff --git a/docs/source/docs/programming/photonlib/getting-target-data.rst b/docs/source/docs/programming/photonlib/getting-target-data.rst new file mode 100644 index 0000000000..1e9ddc662a --- /dev/null +++ b/docs/source/docs/programming/photonlib/getting-target-data.rst @@ -0,0 +1,241 @@ +Getting Target Data +=================== + +Constructing a PhotonCamera +--------------------------- + +What is a PhotonCamera? +^^^^^^^^^^^^^^^^^^^^^^^ +``PhotonCamera`` is a class in PhotonLib that allows a user to interact with one camera that is connected to hardware that is running PhotonVision. Through this class, users can retrieve yaw, pitch, roll, robot-relative pose, latency, and a wealth of other information. + + +The ``PhotonCamera`` class has two constructors: one that takes a ``NetworkTable`` and another that takes in the name of the network table that PhotonVision is broadcasting information over. For ease of use, it is recommended to use the latter. The name of the NetworkTable (for the string constructor) should be the same as the camera's nickname (from the PhotonVision UI). + +.. tab-set-code:: + + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/aimattarget/Robot.java + :language: java + :lines: 51-52 + + .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/aimattarget/include/Robot.h + :language: c++ + :lines: 42-43 + + .. code-block:: python + + # Change this to match the name of your camera as shown in the web ui + self.camera = PhotonCamera("your_camera_name_here") + + +.. warning:: Teams must have unique names for all of their cameras regardless of which coprocessor they are attached to. + +Getting the Pipeline Result +--------------------------- + +What is a Photon Pipeline Result? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +A ``PhotonPipelineResult`` is a container that contains all information about currently detected targets from a ``PhotonCamera``. You can retrieve the latest pipeline result using the PhotonCamera instance. + +Use the ``getLatestResult()``/``GetLatestResult()`` (Java and C++ respectively) to obtain the latest pipeline result. An advantage of using this method is that it returns a container with information that is guaranteed to be from the same timestamp. This is important if you are using this data for latency compensation or in an estimator. + +.. tab-set-code:: + + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/aimattarget/Robot.java + :language: java + :lines: 79-80 + + .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/aimattarget/cpp/Robot.cpp + :language: c++ + :lines: 35-36 + + .. code-block:: python + + # Query the latest result from PhotonVision + result = self.camera.getLatestResult() + + + +.. note:: Unlike other vision software solutions, using the latest result guarantees that all information is from the same timestamp. This is achievable because the PhotonVision backend sends a byte-packed string of data which is then deserialized by PhotonLib to get target data. For more information, check out the `PhotonLib source code `_. + + + +Checking for Existence of Targets +--------------------------------- +Each pipeline result has a ``hasTargets()``/``HasTargets()`` (Java and C++ respectively) method to inform the user as to whether the result contains any targets. + +.. tab-set-code:: + .. code-block:: java + + // Check if the latest result has any targets. + boolean hasTargets = result.hasTargets(); + + .. code-block:: c++ + + // Check if the latest result has any targets. + bool hasTargets = result.HasTargets(); + + .. code-block:: python + + # Check if the latest result has any targets. + hasTargets = result.hasTargets() + +.. warning:: In Java/C++, You must *always* check if the result has a target via ``hasTargets()``/``HasTargets()`` before getting targets or else you may get a null pointer exception. Further, you must use the same result in every subsequent call in that loop. + + +Getting a List of Targets +------------------------- + +What is a Photon Tracked Target? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +A tracked target contains information about each target from a pipeline result. This information includes yaw, pitch, area, and robot relative pose. + + +You can get a list of tracked targets using the ``getTargets()``/``GetTargets()`` (Java and C++ respectively) method from a pipeline result. + +.. tab-set-code:: + .. code-block:: java + + // Get a list of currently tracked targets. + List targets = result.getTargets(); + + .. code-block:: c++ + + // Get a list of currently tracked targets. + wpi::ArrayRef targets = result.GetTargets(); + + .. code-block:: python + + # Get a list of currently tracked targets. + targets = result.getTargets() + +Getting the Best Target +----------------------- +You can get the :ref:`best target ` using ``getBestTarget()``/``GetBestTarget()`` (Java and C++ respectively) method from the pipeline result. + +.. tab-set-code:: + .. code-block:: java + + // Get the current best target. + PhotonTrackedTarget target = result.getBestTarget(); + + .. code-block:: c++ + + // Get the current best target. + photonlib::PhotonTrackedTarget target = result.GetBestTarget(); + + + .. code-block:: python + + # TODO - Not currently supported + + +Getting Data From A Target +-------------------------- +* double ``getYaw()``/``GetYaw()``: The yaw of the target in degrees (positive right). +* double ``getPitch()``/``GetPitch()``: The pitch of the target in degrees (positive up). +* double ``getArea()``/``GetArea()``: The area (how much of the camera feed the bounding box takes up) as a percent (0-100). +* double ``getSkew()``/``GetSkew()``: The skew of the target in degrees (counter-clockwise positive). +* double[] ``getCorners()``/``GetCorners()``: The 4 corners of the minimum bounding box rectangle. +* Transform2d ``getCameraToTarget()``/``GetCameraToTarget()``: The camera to target transform. See `2d transform documentation here `_. + + +.. tab-set-code:: + .. code-block:: java + + // Get information from target. + double yaw = target.getYaw(); + double pitch = target.getPitch(); + double area = target.getArea(); + double skew = target.getSkew(); + Transform2d pose = target.getCameraToTarget(); + List corners = target.getCorners(); + + .. code-block:: c++ + + // Get information from target. + double yaw = target.GetYaw(); + double pitch = target.GetPitch(); + double area = target.GetArea(); + double skew = target.GetSkew(); + frc::Transform2d pose = target.GetCameraToTarget(); + wpi::SmallVector, 4> corners = target.GetCorners(); + + .. code-block:: python + + # Get information from target. + yaw = target.getYaw() + pitch = target.getPitch() + area = target.getArea() + skew = target.getSkew() + pose = target.getCameraToTarget() + corners = target.getDetectedCorners() + +Getting AprilTag Data From A Target +----------------------------------- +.. note:: All of the data above (**except skew**) is available when using AprilTags. + +* int ``getFiducialId()``/``GetFiducialId()``: The ID of the detected fiducial marker. +* double ``getPoseAmbiguity()``/``GetPoseAmbiguity()``: How ambiguous the pose of the target is (see below). +* Transform3d ``getBestCameraToTarget()``/``GetBestCameraToTarget()``: Get the transform that maps camera space (X = forward, Y = left, Z = up) to object/fiducial tag space (X forward, Y left, Z up) with the lowest reprojection error. +* Transform3d ``getAlternateCameraToTarget()``/``GetAlternateCameraToTarget()``: Get the transform that maps camera space (X = forward, Y = left, Z = up) to object/fiducial tag space (X forward, Y left, Z up) with the highest reprojection error. + +.. tab-set-code:: + .. code-block:: java + + // Get information from target. + int targetID = target.getFiducialId(); + double poseAmbiguity = target.getPoseAmbiguity(); + Transform3d bestCameraToTarget = target.getBestCameraToTarget(); + Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); + + .. code-block:: c++ + + // Get information from target. + int targetID = target.GetFiducialId(); + double poseAmbiguity = target.GetPoseAmbiguity(); + frc::Transform3d bestCameraToTarget = target.getBestCameraToTarget(); + frc::Transform3d alternateCameraToTarget = target.getAlternateCameraToTarget(); + + .. code-block:: python + + # Get information from target. + targetID = target.getFiducialId() + poseAmbiguity = target.getPoseAmbiguity() + bestCameraToTarget = target.getBestCameraToTarget() + alternateCameraToTarget = target.getAlternateCameraToTarget() + +Saving Pictures to File +----------------------- +A ``PhotonCamera`` can save still images from the input or output video streams to file. This is useful for debugging what a camera is seeing while on the field and confirming targets are being identified properly. + +Images are stored within the PhotonVision configuration directory. Running the "Export" operation in the settings tab will download a .zip file which contains the image captures. + +.. tab-set-code:: + + .. code-block:: java + + // Capture pre-process camera stream image + camera.takeInputSnapshot(); + + // Capture post-process camera stream image + camera.takeOutputSnapshot(); + + .. code-block:: C++ + + // Capture pre-process camera stream image + camera.TakeInputSnapshot(); + + // Capture post-process camera stream image + camera.TakeOutputSnapshot(); + + .. code-block:: python + + # Capture pre-process camera stream image + camera.takeInputSnapshot() + + # Capture post-process camera stream image + camera.takeOutputSnapshot() + +.. note:: Saving images to file takes a bit of time and uses up disk space, so doing it frequently is not recommended. In general, the camera will save an image every 500ms. Calling these methods faster will not result in additional images. Consider tying image captures to a button press on the driver controller, or an appropriate point in an autonomous routine. diff --git a/docs/source/docs/programming/photonlib/images/adding-offline-library.png b/docs/source/docs/programming/photonlib/images/adding-offline-library.png new file mode 100644 index 0000000000..67afacfde6 Binary files /dev/null and b/docs/source/docs/programming/photonlib/images/adding-offline-library.png differ diff --git a/docs/source/docs/programming/photonlib/index.rst b/docs/source/docs/programming/photonlib/index.rst new file mode 100644 index 0000000000..d1d1c381e3 --- /dev/null +++ b/docs/source/docs/programming/photonlib/index.rst @@ -0,0 +1,12 @@ +PhotonLib: Robot Code Interface +=============================== + +.. toctree:: + :maxdepth: 1 + + adding-vendordep + getting-target-data + using-target-data + robot-pose-estimator + driver-mode-pipeline-index + controlling-led diff --git a/docs/source/docs/programming/photonlib/robot-pose-estimator.rst b/docs/source/docs/programming/photonlib/robot-pose-estimator.rst new file mode 100644 index 0000000000..c367c926f8 --- /dev/null +++ b/docs/source/docs/programming/photonlib/robot-pose-estimator.rst @@ -0,0 +1,113 @@ +AprilTags and PhotonPoseEstimator +================================= + +.. note:: For more information on how to methods to get AprilTag data, look :ref:`here `. + +PhotonLib includes a ``PhotonPoseEstimator`` class, which allows you to combine the pose data from all tags in view in order to get a field relative pose. The ``PhotonPoseEstimator`` class works with one camera per object instance, but more than one instance may be created. + +Creating an ``AprilTagFieldLayout`` +----------------------------------- +``AprilTagFieldLayout`` is used to represent a layout of AprilTags within a space (field, shop at home, classroom, etc.). WPILib provides a JSON that describes the layout of AprilTags on the field which you can then use in the AprilTagFieldLayout constructor. You can also specify a custom layout. + +The API documentation can be found in here: `Java `_ and `C++ `_. + +.. tab-set-code:: + .. code-block:: java + + // The field from AprilTagFields will be different depending on the game. + AprilTagFieldLayout aprilTagFieldLayout = AprilTagFields.k2024Crescendo.loadAprilTagLayoutField(); + + .. code-block:: c++ + + // The parameter for LoadAPrilTagLayoutField will be different depending on the game. + frc::AprilTagFieldLayout aprilTagFieldLayout = frc::LoadAprilTagLayoutField(frc::AprilTagField::k2024Crescendo); + + +Creating a ``PhotonPoseEstimator`` +---------------------------------- +The PhotonPoseEstimator has a constructor that takes an ``AprilTagFieldLayout`` (see above), ``PoseStrategy``, ``PhotonCamera``, and ``Transform3d``. ``PoseStrategy`` has six possible values: + +* MULTI_TAG_PNP_ON_COPROCESSOR + * Calculates a new robot position estimate by combining all visible tag corners. Recommended for all teams as it will be the most accurate. + * Must configure the AprilTagFieldLayout properly in the UI, please see :ref:`here ` for more information. +* LOWEST_AMBIGUITY + * Choose the Pose with the lowest ambiguity. +* CLOSEST_TO_CAMERA_HEIGHT + * Choose the Pose which is closest to the camera height. +* CLOSEST_TO_REFERENCE_POSE + * Choose the Pose which is closest to the pose from setReferencePose(). +* CLOSEST_TO_LAST_POSE + * Choose the Pose which is closest to the last pose calculated. +* AVERAGE_BEST_TARGETS + * Choose the Pose which is the average of all the poses from each tag. + +.. tab-set-code:: + .. code-block:: java + + //Forward Camera + cam = new PhotonCamera("testCamera"); + Transform3d robotToCam = new Transform3d(new Translation3d(0.5, 0.0, 0.5), new Rotation3d(0,0,0)); //Cam mounted facing forward, half a meter forward of center, half a meter up from center. + + // Construct PhotonPoseEstimator + PhotonPoseEstimator photonPoseEstimator = new PhotonPoseEstimator(aprilTagFieldLayout, PoseStrategy.CLOSEST_TO_REFERENCE_POSE, cam, robotToCam); + + .. code-block:: c++ + + // Forward Camera + std::shared_ptr cameraOne = + std::make_shared("testCamera"); + // Camera is mounted facing forward, half a meter forward of center, half a + // meter up from center. + frc::Transform3d robotToCam = + frc::Transform3d(frc::Translation3d(0.5_m, 0_m, 0.5_m), + frc::Rotation3d(0_rad, 0_rad, 0_rad)); + + // ... Add other cameras here + + // Assemble the list of cameras & mount locations + std::vector< + std::pair, frc::Transform3d>> + cameras; + cameras.push_back(std::make_pair(cameraOne, robotToCam)); + + photonlib::RobotPoseEstimator estimator( + aprilTags, photonlib::CLOSEST_TO_REFERENCE_POSE, cameras); + +Using a ``PhotonPoseEstimator`` +------------------------------- +Calling ``update()`` on your ``PhotonPoseEstimator`` will return an ``EstimatedRobotPose``, which includes a ``Pose3d`` of the latest estimated pose (using the selected strategy) along with a ``double`` of the timestamp when the robot pose was estimated. You should be updating your `drivetrain pose estimator `_ with the result from the ``PhotonPoseEstimator`` every loop using ``addVisionMeasurement()``. + +.. tab-set-code:: + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/357d8a518a93f7a1f8084a79449249e613b605a7/photonlib-java-examples/apriltagExample/src/main/java/frc/robot/PhotonCameraWrapper.java + :language: java + :lines: 85-88 + + .. code-block:: c++ + + std::pair getEstimatedGlobalPose( + frc::Pose3d prevEstimatedRobotPose) { + robotPoseEstimator.SetReferencePose(prevEstimatedRobotPose); + units::millisecond_t currentTime = frc::Timer::GetFPGATimestamp(); + auto result = robotPoseEstimator.Update(); + if (result.second) { + return std::make_pair<>(result.first.ToPose2d(), + currentTime - result.second); + } else { + return std::make_pair(frc::Pose2d(), 0_ms); + } + } + +You should be updating your `drivetrain pose estimator `_ with the result from the ``RobotPoseEstimator`` every loop using ``addVisionMeasurement()``. TODO: add example note + +Additional ``PhotonPoseEstimator`` Methods +------------------------------------------ + +``setReferencePose(Pose3d referencePose)`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Updates the stored reference pose when using the CLOSEST_TO_REFERENCE_POSE strategy. + +``setLastPose(Pose3d lastPose)`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Update the stored last pose. Useful for setting the initial estimate when using the CLOSEST_TO_LAST_POSE strategy. diff --git a/docs/source/docs/programming/photonlib/using-target-data.rst b/docs/source/docs/programming/photonlib/using-target-data.rst new file mode 100644 index 0000000000..6df0b7fec0 --- /dev/null +++ b/docs/source/docs/programming/photonlib/using-target-data.rst @@ -0,0 +1,97 @@ +Using Target Data +================= + +A ``PhotonUtils`` class with helpful common calculations is included within ``PhotonLib`` to aid teams in using target data in order to get positional information on the field. This class contains two methods, ``calculateDistanceToTargetMeters()``/``CalculateDistanceToTarget()`` and ``estimateTargetTranslation2d()``/``EstimateTargetTranslation()`` (Java and C++ respectively). + +Estimating Field Relative Pose with AprilTags +--------------------------------------------- +``estimateFieldToRobotAprilTag(Transform3d cameraToTarget, Pose3d fieldRelativeTagPose, Transform3d cameraToRobot)`` returns your robot's ``Pose3d`` on the field using the pose of the AprilTag relative to the camera, pose of the AprilTag relative to the field, and the transform from the camera to the origin of the robot. + +.. tab-set-code:: + .. code-block:: java + + // Calculate robot's field relative pose + Pose3d robotPose = PhotonUtils.estimateFieldToRobotAprilTag(target.getBestCameraToTarget(), aprilTagFieldLayout.getTagPose(target.getFiducialId()), cameraToRobot); + .. code-block:: c++ + + //TODO + +Estimating Field Relative Pose (Traditional) +-------------------------------------------- + +You can get your robot's ``Pose2D`` on the field using various camera data, target yaw, gyro angle, target pose, and camera position. This method estimates the target's relative position using ``estimateCameraToTargetTranslation`` (which uses pitch and yaw to estimate range and heading), and the robot's gyro to estimate the rotation of the target. + +.. tab-set-code:: + .. code-block:: java + + // Calculate robot's field relative pose + Pose2D robotPose = PhotonUtils.estimateFieldToRobot( + kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, Rotation2d.fromDegrees(-target.getYaw()), gyro.getRotation2d(), targetPose, cameraToRobot); + + .. code-block:: c++ + + // Calculate robot's field relative pose + frc::Pose2D robotPose = photonlib::EstimateFieldToRobot( + kCameraHeight, kTargetHeight, kCameraPitch, kTargetPitch, frc::Rotation2d(units::degree_t(-target.GetYaw())), frc::Rotation2d(units::degree_t(gyro.GetRotation2d)), targetPose, cameraToRobot); + + +Calculating Distance to Target +------------------------------ +If your camera is at a fixed height on your robot and the height of the target is fixed, you can calculate the distance to the target based on your camera's pitch and the pitch to the target. + +.. tab-set-code:: + + + .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-java-examples/src/main/java/org/photonlib/examples/getinrange/Robot.java + :language: java + :lines: 78-94 + + .. rli:: https://github.com/PhotonVision/photonvision/raw/a3bcd3ac4f88acd4665371abc3073bdbe5effea8/photonlib-cpp-examples/src/main/cpp/examples/getinrange/cpp/Robot.cpp + :language: cpp + :lines: 33-46 + +.. note:: The C++ version of PhotonLib uses the Units library. For more information, see `here `_. + +Calculating Distance Between Two Poses +-------------------------------------- +``getDistanceToPose(Pose2d robotPose, Pose2d targetPose)`` allows you to calculate the distance between two poses. This is useful when using AprilTags, given that there may not be an AprilTag directly on the target. + +.. tab-set-code:: + .. code-block:: java + + double distanceToTarget = PhotonUtils.getDistanceToPose(robotPose, targetPose); + + .. code-block:: c++ + + //TODO + +Estimating Camera Translation to Target +--------------------------------------- +You can get a `translation `_ to the target based on the distance to the target (calculated above) and angle to the target (yaw). + +.. tab-set-code:: + .. code-block:: java + + // Calculate a translation from the camera to the target. + Translation2d translation = PhotonUtils.estimateCameraToTargetTranslation( + distanceMeters, Rotation2d.fromDegrees(-target.getYaw())); + + .. code-block:: c++ + + // Calculate a translation from the camera to the target. + frc::Translation2d translation = photonlib::PhotonUtils::EstimateCameraToTargetTranslationn( + distance, frc::Rotation2d(units::degree_t(-target.GetYaw()))); + +.. note:: We are negating the yaw from the camera from CV (computer vision) conventions to standard mathematical conventions. In standard mathematical conventions, as you turn counter-clockwise, angles become more positive. + +Getting the Yaw To a Pose +------------------------- +``getYawToPose(Pose2d robotPose, Pose2d targetPose)`` returns the ``Rotation2d`` between your robot and a target. This is useful when turning towards an arbitrary target on the field (ex. the center of the hub in 2022). + +.. tab-set-code:: + .. code-block:: java + + Rotation2d targetYaw = PhotonUtils.getYawToPose(robotPose, targetPose); + .. code-block:: c++ + + //TODO diff --git a/docs/source/docs/reflectiveAndShape/3D.rst b/docs/source/docs/reflectiveAndShape/3D.rst new file mode 100644 index 0000000000..bc820bb8b1 --- /dev/null +++ b/docs/source/docs/reflectiveAndShape/3D.rst @@ -0,0 +1,24 @@ +3D Tuning +========= + +In 3D mode, the SolvePNP algorithm is used to compute the position and rotation of the target relative to the robot. This requires your :ref:`camera to be calibrated ` which can be done through the cameras tab. + +The target model dropdown is used to select the target model used to compute target position. This should match the target your camera will be tracking. + +If solvePNP is working correctly, the target should be displayed as a small rectangle within the "Target Location" minimap. The X/Y/Angle reading will also be displayed in the "Target Info" card. + +.. raw:: html + + + + +Contour Simplification +---------------------- + +3D mode internally computes a polygon that approximates the target contour being tracked. This polygon is used to detect the extreme corners of the target. The contour simplification slider changes how far from the original contour the approximation is allowed to deviate. Note that the approximate polygon is drawn on the output image for tuning. diff --git a/docs/source/docs/reflectiveAndShape/contour-filtering.rst b/docs/source/docs/reflectiveAndShape/contour-filtering.rst new file mode 100644 index 0000000000..1d4367cd4a --- /dev/null +++ b/docs/source/docs/reflectiveAndShape/contour-filtering.rst @@ -0,0 +1,79 @@ +Contour Filtering and Grouping +============================== + +Contours that make it past thresholding are filtered and grouped so that only likely targets remain. + +Filtering Options +^^^^^^^^^^^^^^^^^ + +Reflective +---------- + +Contours can be filtered by area, width/height ratio, "fullness", and "speckle rejection" percentage. + +Area filtering adjusts the percentage of overall image area that contours are allowed to occupy. The area of valid contours is shown in the "target info" card on the right. + +Ratio adjusts the width to height ratio of allowable contours. For example, a width to height filtering range of [2, 3] would allow targets that are 250 x 100 pixels in size through. + +Fullness is a measurement of the ratio between the contour's area and the area of its bounding rectangle. This can be used to reject contours that are for example solid blobs. + +Finally, speckle rejection is an algorithm that can discard contours whose area are below a certain percentage of the average area of all visible contours. This might be useful in rejecting stray lights or image noise. + +.. raw:: html + + + +Colored Shape +------------- + +The contours tab has new options for specifying the properties of your colored shape. The target shape types are: + +* Circle - No edges +* Triangle - 3 edges +* Quadrilateral - 4 edges +* Polygon - Any number of edges + +.. image:: images/triangle.png + :width: 600 + :alt: Dropdown to select the colored shape pipeline type. + +Only the settings used for the current target shape are available. + +* Shape Simplification - This is the only setting available for polygon, triangle, and quadrilateral target shapes. If you are having issues with edges being "noisy" or "unclean", adjust this setting to be higher (>75). This high setting helps prevent imperfections in the edge from being counted as a separate edge. + +* Circle Match Distance - How close the centroid of a contour must be to the center of the circle in order for them to be matched. This value is usually pretty small (<25) as you usually only want to identify circles that are nearly centered in the contour. + +* Radius - Percentage of the frame that the radius of the circle represents. + +* Max Canny Threshold - This sets the amount of change between pixels needed to be considered an edge. The smaller it is, the more false circles may be detected. Circles with more points along their ring having high contrast values will be returned first. + +* Circle Accuracy - This determines how perfect the circle contour must be in order to be considered a circle. Low values (<40) are required to detect things that aren't perfect circles. + +.. image:: images/pumpkin.png + :width: 600 + :alt: Dropdown to select the colored shape pipeline type. + +Contour Grouping and Sorting +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These options change how contours are grouped together and sorted. Target grouping can pair adjacent contours, such as the targets found in 2019. Target intersection defines where the targets would intersect if you extended them infinitely, for example, to only group targets tipped "towards" each other in 2019. + +Finally, target sort defines how targets are ranked, from "best" to "worst." The available options are: + +- Largest +- Smallest +- Highest (towards the top of the image) +- Lowest +- Rightmost (Best target on the right, worst on left) +- Leftmost +- Centermost + +.. raw:: html + + diff --git a/docs/source/docs/reflectiveAndShape/images/hsl_top.png b/docs/source/docs/reflectiveAndShape/images/hsl_top.png new file mode 100644 index 0000000000..b203b560bd Binary files /dev/null and b/docs/source/docs/reflectiveAndShape/images/hsl_top.png differ diff --git a/docs/source/docs/reflectiveAndShape/images/pumpkin.png b/docs/source/docs/reflectiveAndShape/images/pumpkin.png new file mode 100644 index 0000000000..f3979b6aad Binary files /dev/null and b/docs/source/docs/reflectiveAndShape/images/pumpkin.png differ diff --git a/docs/source/docs/reflectiveAndShape/images/triangle.png b/docs/source/docs/reflectiveAndShape/images/triangle.png new file mode 100644 index 0000000000..e54e0da8c7 Binary files /dev/null and b/docs/source/docs/reflectiveAndShape/images/triangle.png differ diff --git a/docs/source/docs/reflectiveAndShape/index.rst b/docs/source/docs/reflectiveAndShape/index.rst new file mode 100644 index 0000000000..fab306351b --- /dev/null +++ b/docs/source/docs/reflectiveAndShape/index.rst @@ -0,0 +1,10 @@ +Colored Shape & Reflective +========================== + +.. toctree:: + :maxdepth: 0 + :titlesonly: + + thresholding + contour-filtering + 3D diff --git a/docs/source/docs/reflectiveAndShape/thresholding.rst b/docs/source/docs/reflectiveAndShape/thresholding.rst new file mode 100644 index 0000000000..df6e886090 --- /dev/null +++ b/docs/source/docs/reflectiveAndShape/thresholding.rst @@ -0,0 +1,37 @@ +Thresholding +============ + +For colored shape detection, we want to tune our HSV thresholds such that only the goal color remains after the thresholding. The `HSV color representation `__ is similar to RGB in that it represents colors. However, HSV represents colors with hue, saturation and value components. Hue refers to the color, while saturation and value describe its richness and brightness. + +In PhotonVision, HSV thresholds is available in the "Threshold" tab. + +.. raw:: html + + + +Color Picker +------------ + +The color picker can be used to quickly adjust HSV values. "Set to average" will set the HSV range to the color of the pixel selected, while "shrink range" and "expand range" will change the HSV threshold to include or exclude the selected pixel, respectively. + +.. raw:: html + + + +Tuning Steps +------------ +The following steps were derived from FRC 254's 2016 Championship presentation on computer vision and allows you to accurately tune PhotonVision to track your target. + +In order to properly capture the colors that you want, first turn your exposure low until you have a mostly dark image with the target still showing. A darker image ensures that you don't see things that aren't your target (ex. overhead lights). Be careful not to overexpose your image (you will be able to tell this if a target looks more cyan/white or equivalent instead of green when looking at it through the video feed) since that can give you poor results. + +For HSV tuning, start with Hue, as it is the most important/differentiating factor when it comes to detecting color. You want to make the range for Hue as small as possible in order to get accurate tracking. Feel free to reference the chart below to help. After you have properly tuned Hue, tune for high saturation/color intensity (S), and then brightness (V). Using this method will decrease the likelihood that you need to calibrate on the field. Saturation and Value's upper bounds will often end up needing to be the maximum (255). + +.. image:: images/hsl_top.png + :width: 600 + :alt: HSV chart diff --git a/docs/source/docs/settings.rst b/docs/source/docs/settings.rst new file mode 100644 index 0000000000..6bbccb6ce9 --- /dev/null +++ b/docs/source/docs/settings.rst @@ -0,0 +1,21 @@ +Settings +======== + +.. image:: assets/settings.png + +General +^^^^^^^ +Here, you can view general data on your system, including version, hardware, your platform, and performance statistics. You can also export/import the settings in a .zip file or restart PhotonVision/your coprocessor. + +Networking +^^^^^^^^^^ +Here, you can set your team number, switch your IP between DHCP and static, and specify your host name. For more information about on-robot networking, click `here. `_ + +The "team number" field will accept (in addition to a team number) an IP address or hostname. This is useful for testing PhotonVision on the same computer as a simulated robot program; +you can set the team number to "localhost", and PhotonVision will send data to the network tables in the simulated robot. + +.. note:: Something must be entered into the team number field if using PhotonVision on a robot. Using a team number is recommended (as opposed to an IP address or hostname). + +LEDs +^^^^ +If your coprocessor electronics support hardware-controlled LED's and has the proper hardware configuration set up, here you can adjust the brightness of your LEDs. diff --git a/docs/source/docs/simulation/diagrams/SimArchitecture-deprecated.drawio.svg b/docs/source/docs/simulation/diagrams/SimArchitecture-deprecated.drawio.svg new file mode 100644 index 0000000000..6eff0a7373 --- /dev/null +++ b/docs/source/docs/simulation/diagrams/SimArchitecture-deprecated.drawio.svg @@ -0,0 +1,3 @@ + + +
User's PC
User's PC
SimVisionSystem
SimVisionSystem
Co-Processor
Co-Processor
RoboRIO
RoboRIO
Network Tables
Network Tables
PhotonLib
PhotonLib
User Code
User Code
PhotonVision
PhotonVision
Camera
Camera
Physical Environment
Physical Environ...
SimPhotonCam
SimPhotonCam
Geometry Calculations
Geometry Calcula...
Robot Pose
Robot Pose
Target Pose
Target Pose
System Config
System Con...
Network Tables
Network Tables
PhotonLib
PhotonLib
User Code
User Code
Same for Sim & Real Robot
Same for Sim & Re...
Modeled Physics
Modeled P...
Real Physics
Real Phys...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/source/docs/simulation/diagrams/SimArchitecture.drawio.svg b/docs/source/docs/simulation/diagrams/SimArchitecture.drawio.svg new file mode 100644 index 0000000000..69d941b907 --- /dev/null +++ b/docs/source/docs/simulation/diagrams/SimArchitecture.drawio.svg @@ -0,0 +1,4 @@ + + + +
User's PC
User's PC
VisionSystemSim
VisionSystemSim
Co-Processor
Co-Processor
RoboRIO
RoboRIO
Network Tables
Network Tables
PhotonLib
PhotonLib
User Code
User Code
PhotonVision
PhotonVision
Camera
Camera
Physical Environment
Physical Environ...

Robot Pose

Robot Pose
Network Tables
Network Tables
PhotonLib
PhotonLib
User Code
User Code
Same for Sim & Real Robot
Same for Sim & Re...
Simulated World
Simulated W...
Real World
Real World
PhotonCameraSim
PhotonCameraSim
Frame Generation
Frame Generation
VisionTargetSim
VisionTargetSim
Target Calculations
Target Calculation...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/source/docs/simulation/hardware-in-the-loop-sim.rst b/docs/source/docs/simulation/hardware-in-the-loop-sim.rst new file mode 100644 index 0000000000..1594060c36 --- /dev/null +++ b/docs/source/docs/simulation/hardware-in-the-loop-sim.rst @@ -0,0 +1,38 @@ +Hardware In The Loop Simulation +=============================== + +Hardware in the loop simulation is using a physical device, such as a supported co-processor running PhotonVision, to enhance simulation capabilities. This is useful for developing and validating code before the camera is attached to a robot, as well as reducing the work required to use WPILib simulation with PhotonVision. + +Before continuing, ensure PhotonVision is installed on your target device. Instructions can be found :ref:`here ` for all devices. + +Your coprocessor and computer running simulation will have to be connected to the same network, like a home router. Connecting the coprocessor directly to the computer will not work. + +To simulate with hardware in the loop, a one-line change is required. From the PhotonVision UI, go to the sidebar and select the Settings option. Within the Networking settings, find "Team Number/NetworkTables Server Address". + +During normal robot operation, a team's number would be entered into this field so that the PhotonVision coprocessor connects to the roboRIO as a NT client. Instead, enter the IP address of your computer running the simulation here. + +.. note:: + + To find the IP address of your Windows computer, open command prompt and run ``ipconfig``. + + .. code-block:: console + + C:/Users/you>ipconfig + + Windows IP Configuration + + Ethernet adapter Ethernet: + + Connection-specific DNS Suffix . : home + Link-local IPv6 Address . . . . . : fe80::b41d:e861:ef01:9dba%10 + IPv4 Address. . . . . . . . . . . : 192.168.254.13 + Subnet Mask . . . . . . . . . . . : 255.255.255.0 + Default Gateway . . . . . . . . . : 192.168.254.254 + +.. image:: images/coproc-client-to-desktop-sim.png + +No code changes are required, PhotonLib should function similarly to normal operation. + +Now launch simulation, and you should be able to see the PhotonVision table on your simulation's NetworkTables dashboard. + +.. image:: images/hardware-in-the-loop-sim.png diff --git a/docs/source/docs/simulation/images/SimArchitecture.svg b/docs/source/docs/simulation/images/SimArchitecture.svg new file mode 100644 index 0000000000..6eff0a7373 --- /dev/null +++ b/docs/source/docs/simulation/images/SimArchitecture.svg @@ -0,0 +1,3 @@ + + +
User's PC
User's PC
SimVisionSystem
SimVisionSystem
Co-Processor
Co-Processor
RoboRIO
RoboRIO
Network Tables
Network Tables
PhotonLib
PhotonLib
User Code
User Code
PhotonVision
PhotonVision
Camera
Camera
Physical Environment
Physical Environ...
SimPhotonCam
SimPhotonCam
Geometry Calculations
Geometry Calcula...
Robot Pose
Robot Pose
Target Pose
Target Pose
System Config
System Con...
Network Tables
Network Tables
PhotonLib
PhotonLib
User Code
User Code
Same for Sim & Real Robot
Same for Sim & Re...
Modeled Physics
Modeled P...
Real Physics
Real Phys...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/source/docs/simulation/images/SimExampleField.png b/docs/source/docs/simulation/images/SimExampleField.png new file mode 100644 index 0000000000..833f1b8bf1 Binary files /dev/null and b/docs/source/docs/simulation/images/SimExampleField.png differ diff --git a/docs/source/docs/simulation/images/SimExampleFrame.png b/docs/source/docs/simulation/images/SimExampleFrame.png new file mode 100644 index 0000000000..6fb76a172c Binary files /dev/null and b/docs/source/docs/simulation/images/SimExampleFrame.png differ diff --git a/docs/source/docs/simulation/images/coproc-client-to-desktop-sim.png b/docs/source/docs/simulation/images/coproc-client-to-desktop-sim.png new file mode 100644 index 0000000000..6d619b73ac Binary files /dev/null and b/docs/source/docs/simulation/images/coproc-client-to-desktop-sim.png differ diff --git a/docs/source/docs/simulation/images/hardware-in-the-loop-sim.png b/docs/source/docs/simulation/images/hardware-in-the-loop-sim.png new file mode 100644 index 0000000000..46af75f6cb Binary files /dev/null and b/docs/source/docs/simulation/images/hardware-in-the-loop-sim.png differ diff --git a/docs/source/docs/simulation/index.rst b/docs/source/docs/simulation/index.rst new file mode 100644 index 0000000000..1ec3a38d0c --- /dev/null +++ b/docs/source/docs/simulation/index.rst @@ -0,0 +1,10 @@ +Simulation +========== + +.. toctree:: + :maxdepth: 0 + :titlesonly: + + simulation + simulation-deprecated + hardware-in-the-loop-sim diff --git a/docs/source/docs/simulation/simulation-deprecated.rst b/docs/source/docs/simulation/simulation-deprecated.rst new file mode 100644 index 0000000000..d47d625d78 --- /dev/null +++ b/docs/source/docs/simulation/simulation-deprecated.rst @@ -0,0 +1,94 @@ +Simulation Support in PhotonLib (Deprecated) +============================================ + +.. attention:: This page details the pre-2024 simulation support. For current Java simulation support, see :doc:`/docs/simulation/simulation`. + +What Is Supported? +------------------ + +PhotonLib supports simulation of a camera and coprocessor running PhotonVision moving about a field on a robot. + +You can use this to help validate your robot code's behavior in simulation without needing a physical robot. + +Simulation Vision World Model +----------------------------- + +Sim-specific classes are provided to model sending one frame of a camera image through PhotonVision. Based on what targets are visible, results are published to NetworkTables. + +While processing, the given robot ``Pose2d`` is used to analyze which targets should be in view, and determine where they would have shown up in the camera image. + +Targets are considered in view if: + +1) Their centroid is within the field of view of the camera. +2) The camera is not in driver mode. +3) The target's in-image pixel size is greater than ``minTargetArea`` +4) The distance from the camera to the target is less than ``maxLEDRange`` + +.. warning:: Not all network tables objects are updated in simulation. The interaction through PhotonLib remains the same. Actual camera images are also not simulated. + +Latency of processing is not yet modeled. + +.. image:: diagrams/SimArchitecture-deprecated.drawio.svg + :alt: A diagram comparing the architecture of a real PhotonVision process to a simulated one. + +Simulated Vision System +----------------------- + +A ``SimVisionSystem`` represents the camera and coprocessor running PhotonVision moving around on the field. + +It requires a number of pieces of configuration to accurately simulate your physical setup. Match them to your configuration in PhotonVision, and to your robot's physical dimensions. + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java + :language: java + :lines: 73-93 + +After declaring the system, you should create and add one ``SimVisionTarget`` per target you are attempting to detect. + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java + :language: java + :lines: 95-111 + +Finally, while running the simulation, process simulated camera frames by providing the robot's pose to the system. + +.. tab-set-code:: + + .. rli:: https://raw.githubusercontent.com/PhotonVision/photonvision/80e16ece87c735e30755dea271a56a2ce217b588/photonlib-java-examples/simaimandrange/src/main/java/frc/robot/sim/DrivetrainSim.java + :language: java + :lines: 138-139 + +This will cause most NetworkTables fields to update properly, representing any targets that are in view of the robot. + +Robot software which uses PhotonLib to interact with a camera running PhotonVision should work the same as though a real camera was hooked up and active. + +Raw-Data Approach +----------------- + +Users may wish to directly provide target information based on an existing detailed simulation. + +A ``SimPhotonCamera`` can be created for this purpose. It provides an interface where the user can supply target data via a list of ``PhotonTrackedTarget`` objects. + +.. tab-set-code:: + + .. code-block:: java + + @Override + public void simulationInit() { + // ... + cam = new SimPhotonCamera("MyCamera"); + // ... + } + + @Override + public void simulationPeriodic() { + // ... + ArrayList visibleTgtList = new ArrayList(); + visibleTgtList.add(new PhotonTrackedTarget(yawDegrees, pitchDegrees, area, skew, camToTargetTrans)); // Repeat for each target that you see + cam.submitProcessedFrame(0.0, visibleTgtList); + // ... + } + +Note that while there is less code and configuration required to get basic data into the simulation, this approach will cause the user to need to implement much more code on their end to calculate the relative positions of the robot and target. If you already have this, the raw interface may be helpful. However, if you don't, you'll likely want to be looking at the Simulated Vision System first. diff --git a/docs/source/docs/simulation/simulation.rst b/docs/source/docs/simulation/simulation.rst new file mode 100644 index 0000000000..8b5780850a --- /dev/null +++ b/docs/source/docs/simulation/simulation.rst @@ -0,0 +1,226 @@ +Simulation Support in PhotonLib +=============================== + +.. attention:: This page details the current simulation support for Java. For other languages, see :doc:`/docs/simulation/simulation-deprecated`. + +What Is Simulated? +------------------ + +Simulation is a powerful tool for validating robot code without access to a physical robot. Read more about `simulation in WPILib `_. + +PhotonLib can simulate cameras on the field and generate target data approximating what would be seen in reality. This simulation attempts to include the following: + +- Camera Properties + - Field of Vision + - Lens distortion + - Image noise + - Framerate + - Latency +- Target Data + - Detected / minimum-area-rectangle corners + - Center yaw/pitch + - Contour image area percentage + - Fiducial ID + - Fiducial ambiguity + - Fiducial solvePNP transform estimation +- Camera Raw/Processed Streams (grayscale) + +.. note:: + + Simulation does NOT include the following: + + - Full physical camera/world simulation (targets are automatically thresholded) + - Image Thresholding Process (camera gain, brightness, etc) + - Pipeline switching + - Snapshots + +This scope was chosen to balance fidelity of the simulation with the ease of setup, in a way that would best benefit most teams. + +.. image:: diagrams/SimArchitecture.drawio.svg + :alt: A diagram comparing the architecture of a real PhotonVision process to a simulated one. + +Drivetrain Simulation Prerequisite +---------------------------------- + +A prerequisite for simulating vision frames is knowing where the camera is on the field-- to utilize PhotonVision simulation, you'll need to supply the simulated robot pose periodically. This requires drivetrain simulation for your robot project if you want to generate camera frames as your robot moves around the field. + +References for using PhotonVision simulation with drivetrain simulation can be found in the `PhotonLib Java Examples `_ for both a differential drivetrain and a swerve drive. + +.. important:: The simulated drivetrain pose must be separate from the drivetrain estimated pose if a pose estimator is utilized. + +Vision System Simulation +------------------------ + +A ``VisionSystemSim`` represents the simulated world for one or more cameras, and contains the vision targets they can see. It is constructed with a unique label: + +.. tab-set-code:: + + .. code-block:: java + + // A vision system sim labelled as "main" in NetworkTables + VisionSystemSim visionSim = new VisionSystemSim("main"); + +PhotonLib will use this label to put a ``Field2d`` widget on NetworkTables at `/VisionSystemSim-[label]/Sim Field`. This label does not need to match any camera name or pipeline name in PhotonVision. + +Vision targets require a ``TargetModel``, which describes the shape of the target. For AprilTags, PhotonLib provides ``TargetModel.kAprilTag16h5`` for the tags used in 2023, and ``TargetModel.kAprilTag36h11`` for the tags used starting in 2024. For other target shapes, convenience constructors exist for spheres, cuboids, and planar rectangles. For example, a planar rectangle can be created with: + +.. tab-set-code:: + + .. code-block:: java + + // A 0.5 x 0.25 meter rectangular target + TargetModel targetModel = new TargetModel(0.5, 0.25); + +These ``TargetModel`` are paired with a target pose to create a ``VisionTargetSim``. A ``VisionTargetSim`` is added to the ``VisionSystemSim`` to become visible to all of its cameras. + +.. tab-set-code:: + + .. code-block:: java + + // The pose of where the target is on the field. + // Its rotation determines where "forward" or the target x-axis points. + // Let's say this target is flat against the far wall center, facing the blue driver stations. + Pose3d targetPose = new Pose3d(16, 4, 2, new Rotation3d(0, 0, Math.PI)); + // The given target model at the given pose + VisionTargetSim visionTarget = new VisionTargetSim(targetPose, targetModel); + + // Add this vision target to the vision system simulation to make it visible + visionSim.addVisionTargets(visionTarget); + +.. note:: The pose of a ``VisionTargetSim`` object can be updated to simulate moving targets. Note, however, that this will break latency simulation for that target. + +For convenience, an ``AprilTagFieldLayout`` can also be added to automatically create a target for each of its AprilTags. + +.. tab-set-code:: + + .. code-block:: java + + // The layout of AprilTags which we want to add to the vision system + AprilTagFieldLayout tagLayout = AprilTagFieldLayout.loadFromResource(AprilTagFields.k2024Crescendo.m_resourceFile); + + visionSim.addAprilTags(tagLayout); + +.. note:: The poses of the AprilTags from this layout depend on its current alliance origin (e.g. blue or red). If this origin is changed later, the targets will have to be cleared from the ``VisionSystemSim`` and re-added. + +Camera Simulation +----------------- + +Now that we have a simulation world with vision targets, we can add simulated cameras to view it. + +Before adding a simulated camera, we need to define its properties. This is done with the ``SimCameraProperties`` class: + +.. tab-set-code:: + + .. code-block:: java + + // The simulated camera properties + SimCameraProperties cameraProp = new SimCameraProperties(); + +By default, this will create a 960 x 720 resolution camera with a 90 degree diagonal FOV(field-of-view) and no noise, distortion, or latency. If we want to change these properties, we can do so: + +.. tab-set-code:: + + .. code-block:: java + + // A 640 x 480 camera with a 100 degree diagonal FOV. + cameraProp.setCalibration(640, 480, Rotation2d.fromDegrees(100)); + // Approximate detection noise with average and standard deviation error in pixels. + cameraProp.setCalibError(0.25, 0.08); + // Set the camera image capture framerate (Note: this is limited by robot loop rate). + cameraProp.setFPS(20); + // The average and standard deviation in milliseconds of image data latency. + cameraProp.setAvgLatencyMs(35); + cameraProp.setLatencyStdDevMs(5); + +These properties are used in a ``PhotonCameraSim``, which handles generating captured frames of the field from the simulated camera's perspective, and calculating the target data which is sent to the ``PhotonCamera`` being simulated. + +.. tab-set-code:: + + .. code-block:: java + + // The PhotonCamera used in the real robot code. + PhotonCamera camera = new PhotonCamera("cameraName"); + + // The simulation of this camera. Its values used in real robot code will be updated. + PhotonCameraSim cameraSim = new PhotonCameraSim(camera, cameraProp); + +The ``PhotonCameraSim`` can now be added to the ``VisionSystemSim``. We have to define a robot-to-camera transform, which describes where the camera is relative to the robot pose (this can be measured in CAD or by hand). + +.. tab-set-code:: + + .. code-block:: java + + // Our camera is mounted 0.1 meters forward and 0.5 meters up from the robot pose, + // (Robot pose is considered the center of rotation at the floor level, or Z = 0) + Translation3d robotToCameraTrl = new Translation3d(0.1, 0, 0.5); + // and pitched 15 degrees up. + Rotation3d robotToCameraRot = new Rotation3d(0, Math.toRadians(-15), 0); + Transform3d robotToCamera = new Transform3d(robotToCameraTrl, robotToCameraRot); + + // Add this camera to the vision system simulation with the given robot-to-camera transform. + visionSim.addCamera(cameraSim, robotToCamera); + +.. important:: You may add multiple cameras to one ``VisionSystemSim``, but not one camera to multiple ``VisionSystemSim``. All targets in the ``VisionSystemSim`` will be visible to all its cameras. + +If the camera is mounted on a mobile mechanism (like a turret) this transform can be updated in a periodic loop. + +.. tab-set-code:: + + .. code-block:: java + + // The turret the camera is mounted on is rotated 5 degrees + Rotation3d turretRotation = new Rotation3d(0, 0, Math.toRadians(5)); + robotToCamera = new Transform3d( + robotToCameraTrl.rotateBy(turretRotation), + robotToCameraRot.rotateBy(turretRotation)); + visionSim.adjustCamera(cameraSim, robotToCamera); + +Updating The Simulation World +----------------------------- + +To update the ``VisionSystemSim``, we simply have to pass in the simulated robot pose periodically (in ``simulationPeriodic()``). + +.. tab-set-code:: + + .. code-block:: java + + // Update with the simulated drivetrain pose. This should be called every loop in simulation. + visionSim.update(robotPoseMeters); + +Targets and cameras can be added and removed, and camera properties can be changed at any time. + +Visualizing Results +------------------- + +Each ``VisionSystemSim`` has its own built-in ``Field2d`` for displaying object poses in the simulation world such as the robot, simulated cameras, and actual/measured target poses. + +.. tab-set-code:: + + .. code-block:: java + + // Get the built-in Field2d used by this VisionSystemSim + visionSim.getDebugField(); + +.. figure:: images/SimExampleField.png + + *A* ``VisionSystemSim``\ *'s internal* ``Field2d`` *customized with target images and colors, as seen in the* `swervedriveposeestsim `_ *example.* + +A ``PhotonCameraSim`` can also draw and publish generated camera frames to a MJPEG stream similar to an actual PhotonVision process. + +.. tab-set-code:: + + .. code-block:: java + + // Enable the raw and processed streams. These are enabled by default. + cameraSim.enableRawStream(true); + cameraSim.enableProcessedStream(true); + + // Enable drawing a wireframe visualization of the field to the camera streams. + // This is extremely resource-intensive and is disabled by default. + cameraSim.enableDrawWireframe(true); + +These streams follow the port order mentioned in :ref:`docs/installation/networking:Camera Stream Ports`. For example, a single simulated camera will have its raw stream at ``localhost:1181`` and processed stream at ``localhost:1182``, which can also be found in the CameraServer tab of Shuffleboard like a normal camera stream. + +.. figure:: images/SimExampleFrame.png + + *A frame from the processed stream of a simulated camera viewing some 2023 AprilTags with the field wireframe enabled, as seen in the* `swervedriveposeestsim example `_. diff --git a/docs/source/docs/troubleshooting/camera-troubleshooting.rst b/docs/source/docs/troubleshooting/camera-troubleshooting.rst new file mode 100644 index 0000000000..82c9249031 --- /dev/null +++ b/docs/source/docs/troubleshooting/camera-troubleshooting.rst @@ -0,0 +1,117 @@ +Camera Troubleshooting +====================== + +Pi Cameras +---------- + +If you haven't yet, please refer to :ref:`the Pi CSI Camera Configuration page ` for information on updating :code:`config.txt` for your use case. If you've tried that, and things still aren't working, restart PhotonVision using the restart button in the settings tab, and press tilde (\`) in the web UI once connection is restored. This should show the most recent boot log. + ++----------------------------------+--------------------------------------------------------+------------------------------------+ +| | Expected output | Bad | ++==================================+========================================================+====================================+ +| LibCamera driver initialization | Successfully loaded libpicam shared object | Failed to load native libraries! | ++----------------------------------+--------------------------------------------------------+------------------------------------+ +| Camera detected | Adding local video device - "unicam" at "/dev/video0" | No output from VisionSourceManager | ++----------------------------------+--------------------------------------------------------+------------------------------------+ +| VisionSource created | Adding 1 configs to VMM. | No output from VisionSourceManager | ++----------------------------------+--------------------------------------------------------+------------------------------------+ + +If the driver isn't loaded, you may be using a non-official Pi image, or an image not new enough. Try updating to the most recent image available (one released for 2023) -- if that doesn't resolve the problem, :ref:`contact us` with your settings ZIP file and Pi version/camera version/config.txt file used. + +If the camera is not detected, the most likely cause is either a config.txt file incorrectly set-up, or a ribbon cable attached backwards. Review the :ref:`picam configuration page `, and verify the ribbon cable is properly oriented at both ends, and that it is _fully_ inserted into the FFC connector. Then, :ref:`contact us` with your settings ZIP file and Pi version/camera version/config.txt file used. + +USB cameras +----------- + +USB cameras supported by CSCore require no libcamera driver initialization to work -- however, similar troubleshooting steps apply. Restart PhotonVision using the restart button in the settings tab, and press tilde on your keyboard (\`) when you're in the web UI once connection is restored. We expect to see the following output: + ++----------------------------------+--------------------------------------------------------+------------------------------------+ +| | Expected output | Bad | ++==================================+========================================================+====================================+ +| Camera detected | Adding local video device - "foobar" at "/dev/foobar" | No output from VisionSourceManager | ++----------------------------------+--------------------------------------------------------+------------------------------------+ +| VisionSource created | Adding 1 configs to VMM. | No output from VisionSourceManager | ++----------------------------------+--------------------------------------------------------+------------------------------------+ + +Determining detected cameras in Video4Linux (v4l2) +-------------------------------------------------- + +On Linux devices (including Raspberry Pi), PhotonVision uses WPILib's CSCore to interact with video devices, which internally uses Video4Linux (v4l2). CSCore, and therefore Photon, requires that cameras attached have good v4l drivers for proper functionality. These should be built into the Linux kernel, and do not need to be installed manually. Valid picamera setup (from /boot/config.txt) can also be determined using these steps. The list-devices command will show all valid video devices detected, and list-formats the list of "video modes" each camera can be in. + +- For picams: edit the config.txt file as described in the :ref:`picam configuration page ` +- SSH into your Pi: :code:`ssh pi@photonvision.local` and enter the username "pi" & password "raspberry" +- run :code:`v4l2-ctl --list-devices` and :code:`v4l2-ctl --list-formats` + +We expect an output similar to the following. For picameras, note the "unicam" entry with path :code:`platform:3f801000.csi` (if we don't see this, that's bad), and a huge list of valid video formats. USB cameras should show up similarly in the output of these commands. + +.. tab-set:: + .. tab-item:: Working + + .. code-block:: + + pi@photonvision:~ $ v4l2-ctl --list-devices + unicam (platform:3f801000.csi): + /dev/video0 + /dev/media3 + + bcm2835-codec-decode (platform:bcm2835-codec): + /dev/video10 + /dev/video11 + /dev/video12 + /dev/video18 + /dev/video31 + /dev/media2 + + bcm2835-isp (platform:bcm2835-isp): + /dev/video13 + /dev/video14 + /dev/video15 + /dev/video16 + /dev/video20 + /dev/video21 + /dev/video22 + /dev/video23 + /dev/media0 + /dev/media1 + + pi@photonvision:~ $ v4l2-ctl --list-formats + ioctl: VIDIOC_ENUM_FMT + Type: Video Capture + + [0]: 'YUYV' (YUYV 4:2:2) + [1]: 'UYVY' (UYVY 4:2:2) + [2]: 'YVYU' (YVYU 4:2:2) + [3]: 'VYUY' (VYUY 4:2:2) + + [42]: 'Y12P' (12-bit Greyscale (MIPI Packed)) + [43]: 'Y12 ' (12-bit Greyscale) + [44]: 'Y14P' (14-bit Greyscale (MIPI Packed)) + [45]: 'Y14 ' (14-bit Greyscale) + + .. tab-item:: Not Working + + .. code-block:: + + pi@photonvision:~ $ v4l2-ctl --list-devices + bcm2835-codec-decode (platform:bcm2835-codec): + /dev/video10 + /dev/video11 + /dev/video12 + /dev/video18 + /dev/video31 + /dev/media3 + bcm2835-isp (platform:bcm2835-isp): + /dev/video13 + /dev/video14 + /dev/video15 + /dev/video16 + /dev/video20 + /dev/video21 + /dev/video22 + /dev/video23 + /dev/media0 + /dev/media1 + rpivid (platform:rpivid): + /dev/video19 + /dev/media2 + Cannot open device /dev/video0, exiting. diff --git a/docs/source/docs/troubleshooting/common-errors.rst b/docs/source/docs/troubleshooting/common-errors.rst new file mode 100644 index 0000000000..7f391c86b4 --- /dev/null +++ b/docs/source/docs/troubleshooting/common-errors.rst @@ -0,0 +1,69 @@ +Common Issues / Questions +========================= + +This page will grow as needed in order to cover commonly seen issues by teams. If this page doesn't help you and you need further assistance, feel free to :ref:`Contact Us`. + +Known Issues +------------ +All known issues can be found on our `GitHub page `_. + +PS3Eye +^^^^^^ +Due to an issue with Linux kernels, the drivers for the PS3Eye are no longer supported. If you would still like to use the PS3Eye, you can downgrade your kernel with the following command: ``sudo CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt rpi-update 866751bfd023e72bd96a8225cf567e03c334ecc4``. Note: You must be connected to the internet to run the command. + +LED Control +^^^^^^^^^^^ + +The logic for controlling LED mode when `multiple cameras are connected` is not fully fleshed out. In its current state, LED control is only enabled when a Pi Camera Module is not in driver mode—meaning a USB camera on its own is unable to control the LEDs. + +For now, if you are using multiple cameras, it is recommended that teams set the value of the NetworkTables entry :code:`photonvision/ledMode` from the robot code to control LED state. + +Commonly Seen Issues +-------------------- + + +Networking Issues +^^^^^^^^^^^^^^^^^ + +Please refer to our comprehensive :ref:`networking troubleshooting tips ` for debugging suggestions and possible causes. + +Camera won't show up +^^^^^^^^^^^^^^^^^^^^ +Try these steps to :ref:`troubleshoot your camera connection `. + +If you are using a USB camera, it is possible your USB Camera isn't supported by CSCore and therefore won't work with PhotonVision. See :ref:`supported hardware page for more information `, or the above Camera Troubleshooting page for more information on determining this locally. + +Camera is consistently returning incorrect values when in 3D mode +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Read the tips on the :ref:`camera calibration page`, follow the advice there, and redo the calibration. + +Not getting data from PhotonLib +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. Ensure your coprocessor version and PhotonLib version match. This can be checked by the settings tab and examining the .json itself (respectively). + +2. Ensure that you have your team number set properly. + +3. Use Glass to verify that PhotonVision has connected to the NetworkTables server served by your robot. With Glass connected in client mode to your RoboRIO, we expect to see "photonvision" listed under the Clients tab of the NetworkTables Info pane. + +.. image:: images/glass-connections.png + :width: 600 + :alt: Using Glass to check NT connections + +4. When creating a `PhotonCamera` in code, does the `cameraName` provided match the name in the upper-right card of the web interface? Glass can be used to verify the RoboRIO is receiving NetworkTables data by inspecting the `photonvision` subtable for your camera nickname. + +.. image:: images/camera-subtable.png + :width: 600 + :alt: Using Glass to check camera publishing + +Unable to download PhotonLib +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Ensure all of your network firewalls are disabled and you aren't on a school-network. + +PhotonVision prompts for login on startup +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +This is normal. You don't need to connect a display to your Raspberry Pi to use PhotonVision, just navigate to the relevant webpage (ex. ``photonvision.local:5800``) in order to see the dashboard. + +Raspberry Pi enters into boot looping state when using PhotonVision +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +This is most commonly seen when your Pi doesn't have adequate power / is being undervolted. Ensure that your power supply is functioning properly. diff --git a/docs/source/docs/troubleshooting/images/camera-subtable.png b/docs/source/docs/troubleshooting/images/camera-subtable.png new file mode 100644 index 0000000000..6014f965ff Binary files /dev/null and b/docs/source/docs/troubleshooting/images/camera-subtable.png differ diff --git a/docs/source/docs/troubleshooting/images/glass-connections.png b/docs/source/docs/troubleshooting/images/glass-connections.png new file mode 100644 index 0000000000..2cccf1f8ec Binary files /dev/null and b/docs/source/docs/troubleshooting/images/glass-connections.png differ diff --git a/docs/source/docs/troubleshooting/index.rst b/docs/source/docs/troubleshooting/index.rst new file mode 100644 index 0000000000..22d50f1457 --- /dev/null +++ b/docs/source/docs/troubleshooting/index.rst @@ -0,0 +1,10 @@ +Troubleshooting +================ + +.. toctree:: + :maxdepth: 1 + + common-errors + logging + camera-troubleshooting + networking-troubleshooting diff --git a/docs/source/docs/troubleshooting/logging.rst b/docs/source/docs/troubleshooting/logging.rst new file mode 100644 index 0000000000..162e7d3fd6 --- /dev/null +++ b/docs/source/docs/troubleshooting/logging.rst @@ -0,0 +1,17 @@ +Logging +======= + +.. note:: Logging is very helpful when trying to debug issues within PhotonVision, as it allows us to see what is happening within the program after it is ran. Whenever reporting an issue to PhotonVision, we request that you include logs whenever possible. + +In addition to storing logs in timestamped files in the config directory, PhotonVision streams logs to the web dashboard. These logs can be viewed later by pressing the \` key. In this view, logs can be filtered by level or downloaded. + +.. note:: When the program first starts, it sends logs from startup to the client that first connects. This does not happen on subsequent connections. + +.. note:: Logs are stored inside the :code:`photonvision_config/logs` directory. Exporting the settings ZIP will also download all old logs for further review. + +.. raw:: html + + diff --git a/docs/source/docs/troubleshooting/networking-troubleshooting.rst b/docs/source/docs/troubleshooting/networking-troubleshooting.rst new file mode 100644 index 0000000000..07e4586c00 --- /dev/null +++ b/docs/source/docs/troubleshooting/networking-troubleshooting.rst @@ -0,0 +1,53 @@ +Networking Troubleshooting +========================== + +Before reading further, ensure that you follow all the recommendations :ref:`in our networking section `. You should follow these guidelines in order for PhotonVision to work properly; other networking setups are not officially supported. + + +Checklist +^^^^^^^^^ + +A few issues make up the majority of support requests. Run through this checklist quickly to catch some common mistakes. + +- Is your camera connected to the robot's radio through a :ref:`network switch `? + - Ethernet straight from a laptop to a coprocessor will not work (most likely), due to the unreliability of link-local connections. + - Even if there's a switch between your laptop and coprocessor, you'll still want a radio or router in the loop somehow. + - The FRC radio is the *only* router we will officially support due to the innumerable variations between routers. +- (Raspberry Pi, Orange Pi & Limelight only) have you flashed the correct image, and is it up to date? + - Limelights 2/2+ and Gloworms should be flashed using the Limelight 2 image (eg, `photonvision-v2024.2.8-linuxarm64_limelight2.img.xz`). + - Limelights 3 should be flashed using the Limelight 3 image (eg, `photonvision-v2024.2.8-linuxarm64_limelight3.img.xz`). + - Raspberry Pi devices (including Pi 3, Pi 4, CM3 and CM4) should be flashed using the Raspberry Pi image (eg, `photonvision-v2024.2.8-linuxarm64_RaspberryPi.img.xz`). + - Orange Pi 5 devices should be flashed using the Orange Pi 5 image (eg, `photonvision-v2024.2.8-linuxarm64_orangepi5.img.xz`). + - Orange Pi 5+ devices should be flashed using the Orange Pi 5+ image (eg, `photonvision-v2024.2.8-linuxarm64_orangepi5plus.img.xz`). +- Is your robot code using a **2024** version of WPILib, and is your coprocessor using the most up to date **2024** release? + - 2022, 2023 and 2024 versions of either cannot be mix-and-matched! + - Your PhotonVision version can be checked on the :ref:`settings tab`. +- Is your team number correctly set on the :ref:`settings tab`? + + +photonvision.local Not Found +---------------------------- + +Use `Angry IP Scanner `_ and look for an IP that has port 5800 open. Then go to your web browser and do :5800. + +Alternatively, you can plug your coprocessor into a display, plug in a keyboard, and run ``hostname -I`` in the terminal. This should give you the IP Address of your coprocessor, then go to your web browser and do :5800. + +If nothing shows up, ensure your coprocessor has power, and you are following all of our networking recommendations, feel free to :ref:`contact us ` and we will help you. + +Can't Connect To Robot +---------------------- + +Please check that: +1. You don't have the NetworkTables Server on (toggleable in the settings tab). Turn this off when doing work on a robot. +2. You have your team number set properly in the settings tab. +3. Your camera name in the ``PhotonCamera`` constructor matches the name in the UI. +4. You are using the 2024 version of WPILib and RoboRIO image. +5. Your robot is on. + +If all of the above are met and you still have issues, feel free to :ref:`contact us ` and provide the following information: + +- The WPILib version used by your robot code +- PhotonLib vendor dependency version +- PhotonVision version (from the UI) +- Your settings exported from your coprocessor (if you're able to access it) +- How your RoboRIO/coprocessor are networked together diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000000..62a87b4c05 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,112 @@ +.. image:: assets/PhotonVision-Header-onWhite.png + :alt: PhotonVision + +Welcome to the official documentation of PhotonVision! PhotonVision is the free, fast, and easy-to-use vision processing solution for the *FIRST*\ Robotics Competition. PhotonVision is designed to get vision working on your robot *quickly*, without the significant cost of other similar solutions. PhotonVision supports a variety of COTS hardware, including the Raspberry Pi 3 and 4, the `Gloworm smart camera `_, the `SnakeEyes Pi hat `_, and the Orange Pi 5. + +Content +------- + +.. grid:: 2 + + .. grid-item-card:: Getting Started + :link: docs/installation/index + :link-type: doc + + Get started with installing PhotonVision, creating a pipeline, and tuning it for usage in competitions. + + .. grid-item-card:: Programming Reference and PhotonLib + :link: docs/programming/index + :link-type: doc + + Learn more about PhotonLib, our vendor dependency which makes it easier for teams to retrieve vision data, make various calculations, and more. + +.. grid:: 2 + + .. grid-item-card:: Integration + :link: docs/integration/index + :link-type: doc + + Pick how to use vision processing results to control a physical robot. + + .. grid-item-card:: Code Examples + :link: docs/examples/index + :link-type: doc + + View various step by step guides on how to use data from PhotonVision in your code, along with game-specific examples. + +.. grid:: 2 + + .. grid-item-card:: Hardware + :link: docs/hardware/index + :link-type: doc + + Select appropriate hardware for high-quality and easy vision target detection. + + .. grid-item-card:: Contributing + :link: docs/contributing/index + :link-type: doc + + Interested in helping with PhotonVision? Learn more about how to contribute to our main code base, documentation, and more. + +Source Code +----------- + +The source code for all PhotonVision projects is available through our `GitHub organization `_. + +* `PhotonVision `_ +* `PhotonVision ReadTheDocs `_ + +Contact Us +---------- + +To report a bug or submit a feature request in PhotonVision, please `submit an issue on the PhotonVision GitHub `_ or `contact the developers on Discord `_. + +If you find a problem in this documentation, please submit an issue on the `PhotonVision Documentation GitHub `_. + +License +------- + +PhotonVision is licensed under the `GNU GPL v3 `_. + + +.. toctree:: + :maxdepth: 0 + :caption: Getting Started + :hidden: + + docs/description + docs/hardware/index + docs/installation/index + docs/settings + +.. toctree:: + :maxdepth: 0 + :caption: Pipeline Tuning and Calibration + :hidden: + + docs/pipelines/index + docs/apriltag-pipelines/index + docs/reflectiveAndShape/index + docs/objectDetection/index + docs/calibration/calibration + +.. toctree:: + :maxdepth: 1 + :caption: Programming Reference + :hidden: + + docs/programming/photonlib/index + docs/simulation/index + docs/integration/index + docs/examples/index + +.. toctree:: + :maxdepth: 1 + :caption: Additional Resources + :hidden: + + docs/troubleshooting/index + docs/additional-resources/best-practices + docs/additional-resources/config + docs/additional-resources/nt-api + docs/contributing/index diff --git a/docs/source/make.bat b/docs/source/make.bat new file mode 100644 index 0000000000..9534b01813 --- /dev/null +++ b/docs/source/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/build.gradle b/photon-docs/build.gradle similarity index 100% rename from docs/build.gradle rename to photon-docs/build.gradle diff --git a/docs/theme.css b/photon-docs/theme.css similarity index 100% rename from docs/theme.css rename to photon-docs/theme.css diff --git a/settings.gradle b/settings.gradle index 117ee70a52..f23ca6ecf8 100644 --- a/settings.gradle +++ b/settings.gradle @@ -2,4 +2,4 @@ include 'photon-targeting' include 'photon-core' include 'photon-server' include 'photon-lib' -include 'docs' +include 'photon-docs'