diff --git a/.github/workflows/hello-world-python.yml b/.github/workflows/hello-world-python.yml index eecb612..4c86aa1 100644 --- a/.github/workflows/hello-world-python.yml +++ b/.github/workflows/hello-world-python.yml @@ -14,11 +14,6 @@ jobs: env: EXREPO: acap-computer-vision-examples EXNAME: hello-world-python - strategy: - matrix: - include: - - arch: armv7hf - - arch: aarch64 steps: - uses: actions/checkout@v2 - uses: docker/setup-buildx-action@v2 @@ -30,4 +25,4 @@ jobs: imagetag: ${{ env.EXREPO }}_${{ env.EXNAME }}:1.0 run: | cd $EXNAME - docker build --no-cache --build-arg ARCH=${{ matrix.arch }} --tag $imagetag . + docker build --no-cache --tag $imagetag . diff --git a/.github/workflows/minimal-ml-inference.yml b/.github/workflows/minimal-ml-inference.yml index 87b7a8d..4377059 100644 --- a/.github/workflows/minimal-ml-inference.yml +++ b/.github/workflows/minimal-ml-inference.yml @@ -17,12 +17,8 @@ jobs: strategy: matrix: include: - - arch: armv7hf - chip: cpu - - arch: armv7hf - chip: edgetpu - - arch: aarch64 - chip: artpec8 + - chip: cpu + - chip: artpec8 steps: - uses: actions/checkout@v2 - uses: docker/setup-buildx-action@v2 @@ -36,5 +32,5 @@ jobs: run: | cd $EXNAME docker run --rm --privileged multiarch/qemu-user-static --credential yes --persistent yes - docker build --no-cache --build-arg CHIP=${{ matrix.chip }} --build-arg ARCH=${{ matrix.arch }} --tag $imagetag . - docker build --file Dockerfile.model --tag $modeltag --build-arg ARCH=${{ matrix.arch }} . + docker build --no-cache --build-arg CHIP=${{ matrix.chip }} --tag $imagetag . + docker build --file Dockerfile.model --tag $modeltag . diff --git a/.github/workflows/object-detector-python.yml b/.github/workflows/object-detector-python.yml index 7fa5759..b846326 100644 --- a/.github/workflows/object-detector-python.yml +++ b/.github/workflows/object-detector-python.yml @@ -14,12 +14,8 @@ jobs: strategy: matrix: include: - - arch: armv7hf - chip: cpu - - arch: armv7hf - chip: edgetpu - - arch: aarch64 - chip: artpec8 + - chip: cpu + - chip: artpec8 env: EXREPO: acap-computer-vision-examples EXNAME: object-detector-python @@ -35,5 +31,5 @@ jobs: modeltag: ${{ env.EXREPO }}_${{ env.EXNAME }}-${{ matrix.chip }}-model:1.0 run: | cd $EXNAME - docker build --no-cache --build-arg CHIP=${{ matrix.chip }} --build-arg ARCH=${{ matrix.arch }} --tag $imagetag . - docker build --file Dockerfile.model --tag $modeltag --build-arg ARCH=${{ matrix.arch }} . + docker build --no-cache --build-arg CHIP=${{ matrix.chip }} --tag $imagetag . + docker build --file Dockerfile.model --tag $modeltag . diff --git a/.github/workflows/opencv-qr-decoder-python.yml b/.github/workflows/opencv-qr-decoder-python.yml index 328830f..740394e 100644 --- a/.github/workflows/opencv-qr-decoder-python.yml +++ b/.github/workflows/opencv-qr-decoder-python.yml @@ -14,11 +14,6 @@ jobs: env: EXREPO: acap-computer-vision-examples EXNAME: opencv-qr-decoder-python - strategy: - matrix: - include: - - arch: armv7hf - - arch: aarch64 steps: - uses: actions/checkout@v2 - uses: docker/setup-buildx-action@v2 @@ -30,4 +25,4 @@ jobs: imagetag: ${{ env.EXREPO }}_${{ env.EXNAME }}:1.0 run: | cd $EXNAME - docker build --no-cache --build-arg ARCH=${{ matrix.arch }} --tag $imagetag . + docker build --no-cache --tag $imagetag . diff --git a/.github/workflows/parameter-api-python.yml b/.github/workflows/parameter-api-python.yml index 6ed3e8c..8c3729d 100644 --- a/.github/workflows/parameter-api-python.yml +++ b/.github/workflows/parameter-api-python.yml @@ -14,11 +14,6 @@ jobs: env: EXREPO: acap-computer-vision-examples EXNAME: parameter-api-python - strategy: - matrix: - include: - - arch: armv7hf - - arch: aarch64 steps: - uses: actions/checkout@v2 - uses: docker/setup-buildx-action@v2 @@ -30,4 +25,4 @@ jobs: imagetag: ${{ env.EXREPO }}_${{ env.EXNAME }}:1.0 run: | cd $EXNAME - docker build --no-cache --build-arg ARCH=${{ matrix.arch }} --tag $imagetag . + docker build --no-cache --tag $imagetag . diff --git a/.github/workflows/pose-estimator-with-flask.yml b/.github/workflows/pose-estimator-with-flask.yml index df614b4..bf43c0d 100644 --- a/.github/workflows/pose-estimator-with-flask.yml +++ b/.github/workflows/pose-estimator-with-flask.yml @@ -17,12 +17,8 @@ jobs: strategy: matrix: include: - - arch: armv7hf - chip: cpu - - arch: armv7hf - chip: edgetpu - - arch: aarch64 - chip: artpec8 + - chip: cpu + - chip: artpec8 steps: - uses: actions/checkout@v2 @@ -37,5 +33,5 @@ jobs: run: | cd $EXNAME docker run --rm --privileged multiarch/qemu-user-static --credential yes --persistent yes - docker build --no-cache --build-arg CHIP=${{ matrix.chip }} --build-arg ARCH=${{ matrix.arch }} --tag $imagetag . - docker build --file Dockerfile.model --tag $modeltag --build-arg ARCH=${{ matrix.arch }} . + docker build --no-cache --build-arg CHIP=${{ matrix.chip }} --tag $imagetag . + docker build --file Dockerfile.model --tag $modeltag . diff --git a/.github/workflows/web-server.yml b/.github/workflows/web-server.yml index f76246f..8ba9876 100644 --- a/.github/workflows/web-server.yml +++ b/.github/workflows/web-server.yml @@ -14,11 +14,6 @@ jobs: env: EXREPO: acap-computer-vision-examples EXNAME: web-server - strategy: - matrix: - include: - - arch: armv7hf - - arch: aarch64 steps: - uses: actions/checkout@v2 - uses: docker/setup-buildx-action@v2 @@ -31,4 +26,4 @@ jobs: run: | cd $EXNAME docker run --rm --privileged multiarch/qemu-user-static --credential yes --persistent yes - docker build --no-cache --build-arg ARCH=${{ matrix.arch }} --tag $imagetag . + docker build --no-cache --tag $imagetag . diff --git a/README.md b/README.md index 2862560..880155f 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,6 @@ step-by-step instructions on how to run applications on the camera. The examples support the following architectures: -* armv7hf * aarch64 ### Example applications for video analytics diff --git a/hello-world-python/Dockerfile b/hello-world-python/Dockerfile index 1c42a55..0cc8463 100644 --- a/hello-world-python/Dockerfile +++ b/hello-world-python/Dockerfile @@ -1,15 +1,13 @@ # syntax=docker/dockerfile:1 -ARG ARCH=armv7hf ARG REPO=axisecp ARG SDK_VERSION=1.15 ARG UBUNTU_VERSION=22.04 -FROM arm32v7/ubuntu:${UBUNTU_VERSION} as runtime-image-armv7hf -FROM arm64v8/ubuntu:${UBUNTU_VERSION} as runtime-image-aarch64 -FROM ${REPO}/acap-computer-vision-sdk:${SDK_VERSION}-${ARCH} AS cv-sdk -FROM runtime-image-${ARCH} +FROM ${REPO}/acap-computer-vision-sdk:${SDK_VERSION}-aarch64 AS cv-sdk + +FROM arm64v8/ubuntu:${UBUNTU_VERSION} # Get the Python package from the CV SDK COPY --from=cv-sdk /axis/python / diff --git a/hello-world-python/README.md b/hello-world-python/README.md index 0807bf3..7875bb9 100755 --- a/hello-world-python/README.md +++ b/hello-world-python/README.md @@ -28,36 +28,23 @@ hello-world-python Meet the following requirements to ensure compatibility with the example: * Axis device - * Chip: ARTPEC-{7-8} DLPU devices (e.g., Q1615 MkIII) - * Firmware: 10.9 or higher - * [Docker ACAP](https://github.com/AxisCommunications/docker-acap#installing) installed and started, using TLS and SD card as storage + * Chip: ARTPEC-8 DLPU devices (e.g., Q1656) + * Firmware: 11.10 or higher + * [Docker ACAP](https://github.com/AxisCommunications/docker-acap#installing) version 3.0 installed and started, using TLS with TCP and IPC socket and SD card as storage * Computer * Either [Docker Desktop](https://docs.docker.com/desktop/) version 4.11.1 or higher, * or [Docker Engine](https://docs.docker.com/engine/) version 20.10.17 or higher with BuildKit enabled using Docker Compose version 1.29.2 or higher ## How to run the code -### Export the environment variable for the architecture - -Export the `ARCH` variable depending on the architecture of your camera: - -```sh -# For arm32 -export ARCH=armv7hf - -# For arm64 -export ARCH=aarch64 -``` - ### Build the Docker image -With the architecture defined, the `hello-world-python` image can be built. The environment variables are supplied as build arguments such that they are made available to Docker during the build process: +Define and export the application image name in `APP_NAME` for use in the Docker Compose file. ```sh -# Define app name export APP_NAME=hello-world-python -docker build --tag $APP_NAME --build-arg ARCH . +docker build --tag $APP_NAME . ``` ### Set your device IP address and clear Docker memory diff --git a/minimal-ml-inference/Dockerfile b/minimal-ml-inference/Dockerfile index 57179c9..1b9c539 100644 --- a/minimal-ml-inference/Dockerfile +++ b/minimal-ml-inference/Dockerfile @@ -1,18 +1,14 @@ # syntax=docker/dockerfile:1 -ARG ARCH=armv7hf ARG REPO=axisecp ARG SDK_VERSION=1.15 ARG UBUNTU_VERSION=22.04 -FROM arm32v7/ubuntu:${UBUNTU_VERSION} as runtime-image-armv7hf -FROM arm64v8/ubuntu:${UBUNTU_VERSION} as runtime-image-aarch64 - # Specify which ACAP Computer Vision SDK to use -FROM ${REPO}/acap-computer-vision-sdk:${SDK_VERSION}-${ARCH} AS cv-sdk +FROM ${REPO}/acap-computer-vision-sdk:${SDK_VERSION}-aarch64 AS cv-sdk # Define the runtime image -FROM runtime-image-${ARCH} +FROM arm64v8/ubuntu:${UBUNTU_VERSION} # Get packages from the CV SDK COPY --from=cv-sdk /axis/python / diff --git a/minimal-ml-inference/Dockerfile.model b/minimal-ml-inference/Dockerfile.model index 41dbc83..a14cf97 100644 --- a/minimal-ml-inference/Dockerfile.model +++ b/minimal-ml-inference/Dockerfile.model @@ -1,31 +1,13 @@ # syntax=docker/dockerfile:1 -ARG ARCH=armv7hf ARG UBUNTU_VERSION=22.04 -FROM arm32v7/alpine as model-image-armv7hf -FROM arm64v8/alpine as model-image-aarch64 +FROM arm64v8/alpine as model-image -FROM arm32v7/ubuntu:${UBUNTU_VERSION} as local-armv7hf -FROM arm64v8/ubuntu:${UBUNTU_VERSION} as local-aarch64 - -FROM local-${ARCH} as local - -RUN <` is the password to the `root` user. Finally install the Docker image to the device: Navigate to the application page of the Axis device `http:///index.html#apps` -and enable `Allow unsigned apps` toggle. This will allow the installation of unsigned +and enable `Allow unsigned apps` toggle. This will allow the installation of unsigned applications. Use the following command to proceed with the application installation. ```sh diff --git a/pose-estimator-with-flask/Dockerfile b/pose-estimator-with-flask/Dockerfile index fc15d85..d6896d2 100644 --- a/pose-estimator-with-flask/Dockerfile +++ b/pose-estimator-with-flask/Dockerfile @@ -1,15 +1,12 @@ # syntax=docker/dockerfile:1 -ARG ARCH=armv7hf ARG REPO=axisecp ARG SDK_VERSION=1.15 ARG UBUNTU_VERSION=22.04 -FROM arm32v7/ubuntu:${UBUNTU_VERSION} as runtime-image-armv7hf -FROM arm64v8/ubuntu:${UBUNTU_VERSION} as runtime-image-aarch64 -FROM ${REPO}/acap-computer-vision-sdk:${SDK_VERSION}-${ARCH} AS cv-sdk -FROM runtime-image-${ARCH} +FROM ${REPO}/acap-computer-vision-sdk:${SDK_VERSION}-aarch64 AS cv-sdk +FROM arm64v8/ubuntu:${UBUNTU_VERSION} COPY --from=cv-sdk /axis/python / COPY --from=cv-sdk /axis/python-numpy / diff --git a/pose-estimator-with-flask/Dockerfile.model b/pose-estimator-with-flask/Dockerfile.model index 3b82b69..f1e376e 100644 --- a/pose-estimator-with-flask/Dockerfile.model +++ b/pose-estimator-with-flask/Dockerfile.model @@ -1,11 +1,7 @@ # syntax=docker/dockerfile:1 -ARG ARCH=armv7hf -FROM arm32v7/alpine as model-image-armv7hf -FROM arm64v8/alpine as model-image-aarch64 - -FROM model-image-${ARCH} +FROM arm64v8/alpine # Get SSD Mobilenet V2 ADD https://raw.githubusercontent.com/google-coral/test_data/master/movenet_single_pose_lightning_ptq_edgetpu.tflite models/ diff --git a/pose-estimator-with-flask/README.md b/pose-estimator-with-flask/README.md index 078c759..0011683 100755 --- a/pose-estimator-with-flask/README.md +++ b/pose-estimator-with-flask/README.md @@ -12,12 +12,12 @@ The model [MoveNet SinglePose Lightning](https://coral.ai/models/pose-estimation This example composes three different container images into an application that performs object detection using a deep learning model. The first container contains the actual program built in this example. It then uses [gRPC](https://grpc.io/)/[protobuf](https://developers.google.com/protocol-buffers) to call the second container, the *inference-server*, that is used to capture images from the camera and perform the actual inference by implementing the [TensorFlow Serving API](https://github.com/tensorflow/serving). You can find more documentation on the [Machine Learning API documentation page](https://axiscommunications.github.io/acap-documentation/docs/api/computer-vision-sdk-apis.html#machine-learning-api). This example uses a containerized version of the [ACAP Runtime](https://github.com/AxisCommunications/acap-runtime#containerized-version) as the *inference-server*. -Lastly, there is a third container that holds the deep learning model, which is put into a volume that is accessible by the other two images. The layout of the Docker image containing the model is shown below. The *MODEL_PATH* variable in the configuration file you're using specifies what model to use. By default, the armv7hf configuration file uses the edgetpu model, while the aarch64 configuration file uses the vanilla model. +Lastly, there is a third container that holds the deep learning model, which is put into a volume that is accessible by the other two images. The layout of the Docker image containing the model is shown below. The *MODEL_PATH* variable in the configuration file you're using specifies what model to use. ```text model -├── movenet_single_pose_lightning_ptq.tflite - model for CPU and DLPU -└── movenet_single_pose_lightning_ptq_edgetpu.tflite - model for TPU +└── movenet_single_pose_lightning_ptq.tflite - model for CPU and DLPU + ``` ### Applications @@ -39,8 +39,8 @@ pose-estimator-with-flask │ │ └── index.html │ └── detector_with_flask.py ├── config -│ ├── env.aarch64 -│ └── env.armv7hf +│ ├── env.aarch64.cpu +│ └── env.aarch64.artpec8 ├── docker-compose.yml ├── Dockerfile ├── Dockerfile.model @@ -49,8 +49,7 @@ pose-estimator-with-flask * **app/templates/index.html** - Simple HTML page used to render the video stream * **app/pose-estimator-with-flask.py** - The inference client main program -* **config/env.aarch64** - Configuration file for Docker Compose to run on aarch64 devices -* **config/env.armv7hf** - Configuration file for Docker Compose to run on armv7hf devices +* **config/** - Configuration file for Docker Compose * **docker-compose.yml** - Docker Compose file for streaming camera video example using larod inference server * **Dockerfile** - Docker image with inference client for camera * **Dockerfile.model** - Docker image with inference model @@ -60,52 +59,33 @@ pose-estimator-with-flask Meet the following requirements to ensure compatibility with the example: * Axis device - * Chip: ARTPEC-{7-8} DLPU devices (e.g., Q1615 MkIII) - * Firmware: 10.9 or higher - * [Docker ACAP](https://github.com/AxisCommunications/docker-acap#installing) installed and started, using TLS and SD card as storage + * Chip: ARTPEC-8 DLPU devices (e.g., Q1656) + * Firmware: 11.10 or higher + * [Docker ACAP](https://github.com/AxisCommunications/docker-acap#installing) version 3.0 installed and started, using TLS with TCP and IPC socket and SD card as storage * Computer * Either [Docker Desktop](https://docs.docker.com/desktop/) version 4.11.1 or higher, * or [Docker Engine](https://docs.docker.com/engine/) version 20.10.17 or higher with BuildKit enabled using Docker Compose version 1.29.2 or higher ## How to run the code -### Export the environment variable for the architecture - -Export the `ARCH` variable depending on the architecture of your camera: - -```sh -# For arm32 -export ARCH=armv7hf - -# Valid options for chip on armv7hf are 'tpu' (hardware accelerator) or 'cpu' -export CHIP=tpu -``` - -```sh -# For arm64 -export ARCH=aarch64 - -# Valid options for chip on aarch64 are 'artpec8' (hardware accelerator) or 'cpu' -export CHIP=artpec8 -``` - ### Build the Docker images -With the architecture defined, the `acap4-pose-estimator-python` and `acap-dl-models` images can be built. The environment variables are supplied as build arguments such that they are made available to Docker during the build process: +Define and export the application image name in `APP_NAME` and the model image name in `MODEL_NAME` for use in the Docker Compose file. +Define and export also the `CHIP` parameter to be used during the build to select the right manifest file. ```sh -# Define app name export APP_NAME=acap4-pose-estimator-python export MODEL_NAME=acap-dl-models +export CHIP=artpec8 # Valid options are 'artpec8' (hardware accelerator) or 'cpu' -# Install qemu to allow build flask for a different architecture +# Install qemu to allow build for a different architecture docker run --rm --privileged multiarch/qemu-user-static --credential yes --persistent yes -# Build app -docker build --tag $APP_NAME --build-arg ARCH . +# Build application image +docker build --tag $APP_NAME . -# Build inference model -docker build --file Dockerfile.model --tag $MODEL_NAME --build-arg ARCH . +# Build inference model image +docker build --file Dockerfile.model --tag $MODEL_NAME . ``` ### Set your device IP address and clear Docker memory @@ -148,10 +128,10 @@ docker save $MODEL_NAME | docker --tlsverify --host tcp://$DEVICE_IP:$DOCKER_POR With the application image on the device, it can be started using `docker-compose.yml`: ```sh -docker --tlsverify --host tcp://$DEVICE_IP:$DOCKER_PORT compose --env-file ./config/env.$ARCH.$CHIP up +docker --tlsverify --host tcp://$DEVICE_IP:$DOCKER_PORT compose --env-file ./config/env.aarch64.$CHIP up # Terminate with Ctrl-C and cleanup -docker --tlsverify --host tcp://$DEVICE_IP:$DOCKER_PORT compose --env-file ./config/env.$ARCH.$CHIP compose down --volumes +docker --tlsverify --host tcp://$DEVICE_IP:$DOCKER_PORT compose --env-file ./config/env.aarch64.$CHIP down --volumes ``` ### The expected output @@ -188,12 +168,8 @@ pose-estimator_1 | 0.02048427 0.01638742 0.15568045 0.07374337 0.05735596] ### Hardware acceleration -The `./config` folder contains configuration files with the parameters to run the inference on different camera models. The parameters also gives you the possibility to use the hardware accelerator. - -To achieve the best performance we recommend using: - -* the TPU (Tensor Processing Unit) equipped with ARTPEC-7 cameras (for example [Axis-Q1615 Mk III](https://www.axis.com/products/axis-q1615-mk-iii)) -* or the DLPU (Deep Learning Processing Unit) equipped in ARTPEC-8 cameras (e.g. [Axis-Q1656](https://www.axis.com/products/axis-q1656)) +The `./config` folder contains configuration files with the parameters to run the inference on different camera models, also giving the possibility to use the hardware accelerator. +To achieve the best performance we recommend using DLPU (Deep Learning Processing Unit) equipped ARTPEC-8 cameras. See [ACAP Computer Vision SDK hardware and compatibility](https://axiscommunications.github.io/acap-documentation/docs/axis-devices-and-compatibility/#acap-computer-vision-sdk-hardware-compatibility) ## License diff --git a/pose-estimator-with-flask/config/env.armv7hf.cpu b/pose-estimator-with-flask/config/env.armv7hf.cpu deleted file mode 100644 index cf147f4..0000000 --- a/pose-estimator-with-flask/config/env.armv7hf.cpu +++ /dev/null @@ -1,4 +0,0 @@ -MODEL_PATH=/models/movenet_single_pose_lightning_ptq.tflite -INFERENCE_SERVER_IMAGE=axisecp/acap-runtime:1.3.1-armv7hf-containerized -INFERENCE_CHIP=2 - diff --git a/pose-estimator-with-flask/config/env.armv7hf.tpu b/pose-estimator-with-flask/config/env.armv7hf.tpu deleted file mode 100644 index e9b5a94..0000000 --- a/pose-estimator-with-flask/config/env.armv7hf.tpu +++ /dev/null @@ -1,4 +0,0 @@ -MODEL_PATH=/models/movenet_single_pose_lightning_ptq_edgetpu.tflite -INFERENCE_SERVER_IMAGE=axisecp/acap-runtime:1.3.1-armv7hf-containerized -INFERENCE_CHIP=4 - diff --git a/web-server/Dockerfile b/web-server/Dockerfile index c4177d5..78c4606 100644 --- a/web-server/Dockerfile +++ b/web-server/Dockerfile @@ -1,39 +1,25 @@ # syntax=docker/dockerfile:1 -ARG ARCH=armv7hf ARG UBUNTU_VERSION=22.04 -FROM arm32v7/ubuntu:${UBUNTU_VERSION} as runtime-image-armv7hf -FROM arm64v8/ubuntu:${UBUNTU_VERSION} as runtime-image-aarch64 +FROM arm64v8/ubuntu:${UBUNTU_VERSION} as runtime-image -FROM runtime-image-${ARCH} # Setup environment variables ENV DEBIAN_FRONTEND=noninteractive ENV BUILD_ROOT=/build-root ## Install dependencies -ARG ARCH RUN < or . -### Export the environment variable for the architecture - -Export the `ARCH` variable depending on the architecture of your camera: - -```sh -# For arm32 -export ARCH=armv7hf - -# For arm64 -export ARCH=aarch64 -``` - ### Build the Docker image -With the architecture defined, the `monkey` image can be built. The environment variables are supplied as build arguments such that they are made available to Docker during the build process: +Define the application image name in an environment variable `APP_NAME` and build the image: ```sh # Define app name @@ -77,7 +65,7 @@ export APP_NAME=monkey docker run --rm --privileged multiarch/qemu-user-static --credential yes --persistent yes # Build the container -docker build --tag $APP_NAME --build-arg ARCH . +docker build --tag $APP_NAME . ``` ### Set your device IP address and clear Docker memory