From 39927db87513c12ec3b98a72a72ce26016d98e11 Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Thu, 27 Jul 2023 21:44:40 +0100 Subject: [PATCH] minify build scripts --- a2t-whisper/build.sh | 25 ++---------- cdr-replit/build.sh | 12 +----- cdr-t5/build.sh | 13 +----- cht-dolly-v2/build.sh | 12 +----- cht-falcon/build.sh | 12 +----- cht-gorilla/build.sh | 24 ++--------- cht-llama-cpp/build.sh | 26 ++---------- cht-llama-v2/build.sh | 48 +++------------------- cht-mpt/build.sh | 36 ++--------------- cht-xgen/build.sh | 12 +----- dfs-dalle/build.sh | 19 +++------ dfs-diffusers/build.sh | 88 +++++------------------------------------ ebd-all-minilm/build.sh | 25 ++---------- t2a-bark/build.sh | 25 ++---------- utils.sh | 30 ++++++++++++++ 15 files changed, 80 insertions(+), 327 deletions(-) create mode 100644 utils.sh diff --git a/a2t-whisper/build.sh b/a2t-whisper/build.sh index ca13ec1..2ae4aab 100755 --- a/a2t-whisper/build.sh +++ b/a2t-whisper/build.sh @@ -1,26 +1,7 @@ #!/bin/bash set -e export VERSION=1.0.1 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/audio-to-text-whisper-tiny-cpu -docker buildx build ${@:1} \ - --file ./docker/cpu/Dockerfile \ - --build-arg="MODEL_ID=tiny" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - --platform ${BUILDX_PLATFORM:-linux/arm64,linux/amd64} \ - . -if test -z $TESTS_SKIP_CPU; then - docker run --rm $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/audio-to-text-whisper-large-v2-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=large-v2" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_cpu ghcr.io/premai-io/audio-to-text-whisper-tiny-cpu tiny ${@:1} +build_gpu ghcr.io/premai-io/audio-to-text-whisper-large-v2-gpu large-v2 ${@:1} diff --git a/cdr-replit/build.sh b/cdr-replit/build.sh index 7e02bd2..ed25682 100755 --- a/cdr-replit/build.sh +++ b/cdr-replit/build.sh @@ -1,14 +1,6 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/coder-replit-code-v1-3b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=replit/replit-code-v1-3b" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/coder-replit-code-v1-3b-gpu replit/replit-code-v1-3b ${@:1} diff --git a/cdr-t5/build.sh b/cdr-t5/build.sh index d95c854..57d5c25 100755 --- a/cdr-t5/build.sh +++ b/cdr-t5/build.sh @@ -1,15 +1,6 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/coder-codet5p-220m-py-cpu -docker buildx build ${@:1} \ - --file ./docker/cpu/Dockerfile \ - --build-arg="MODEL_ID=Salesforce/codet5p-220m-py" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - --platform ${BUILDX_PLATFORM:-linux/arm64,linux/amd64} \ - . -if test -z $TESTS_SKIP_CPU; then - docker run --rm $IMAGE:$VERSION pytest -fi +build_cpu ghcr.io/premai-io/coder-codet5p-220m-py-cpu Salesforce/codet5p-220m-py ${@:1} diff --git a/cht-dolly-v2/build.sh b/cht-dolly-v2/build.sh index d57d433..0628bcc 100755 --- a/cht-dolly-v2/build.sh +++ b/cht-dolly-v2/build.sh @@ -1,14 +1,6 @@ #!/bin/bash set -e export VERSION=1.0.3 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/chat-dolly-v2-12b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=databricks/dolly-v2-12b" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/chat-dolly-v2-12b-gpu databricks/dolly-v2-12b ${@:1} diff --git a/cht-falcon/build.sh b/cht-falcon/build.sh index e1127ea..8e8cbad 100755 --- a/cht-falcon/build.sh +++ b/cht-falcon/build.sh @@ -1,14 +1,6 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/chat-falcon-7b-instruct-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=tiiuae/falcon-7b-instruct" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/chat-falcon-7b-instruct-gpu tiiuae/falcon-7b-instruct ${@:1} diff --git a/cht-gorilla/build.sh b/cht-gorilla/build.sh index 207e7c9..632b99f 100755 --- a/cht-gorilla/build.sh +++ b/cht-gorilla/build.sh @@ -1,25 +1,7 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/chat-gorilla-falcon-7b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=gorilla-llm/gorilla-falcon-7b-hf-v0" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/chat-gorilla-mpt-7b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=gorilla-llm/gorilla-mpt-7b-hf-v0" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/chat-gorilla-falcon-7b-gpu gorilla-llm/gorilla-falcon-7b-hf-v0 ${@:1} +build_gpu ghcr.io/premai-io/chat-gorilla-mpt-7b-gpu gorilla-llm/gorilla-mpt-7b-hf-v0 ${@:1} diff --git a/cht-llama-cpp/build.sh b/cht-llama-cpp/build.sh index df252d8..f844ac7 100755 --- a/cht-llama-cpp/build.sh +++ b/cht-llama-cpp/build.sh @@ -1,27 +1,7 @@ #!/bin/bash set -e export VERSION=1.0.4 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/chat-gpt4all-lora-q4-cpu -docker buildx build ${@:1} \ - --file ./docker/cpu/Dockerfile \ - --build-arg="MODEL_ID=gpt4all-lora-q4" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - --platform ${BUILDX_PLATFORM:-linux/arm64,linux/amd64} \ - . -if test -z $TESTS_SKIP_CPU; then - docker run --rm $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/chat-vicuna-7b-q4-cpu -docker buildx build ${@:1} \ - --file ./docker/cpu/Dockerfile \ - --build-arg="MODEL_ID=vicuna-7b-q4" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - --platform ${BUILDX_PLATFORM:-linux/arm64,linux/amd64} \ - . -if test -z $TESTS_SKIP_CPU; then - docker run --rm $IMAGE:$VERSION pytest -fi +build_cpu ghcr.io/premai-io/chat-gpt4all-lora-q4-cpu gpt4all-lora-q4 ${@:1} +build_cpu ghcr.io/premai-io/chat-vicuna-7b-q4-cpu vicuna-7b-q4 ${@:1} diff --git a/cht-llama-v2/build.sh b/cht-llama-v2/build.sh index c3f4de7..523c5f5 100755 --- a/cht-llama-v2/build.sh +++ b/cht-llama-v2/build.sh @@ -1,47 +1,9 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/chat-llama-2-7b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=llama-2-7b-hf" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/chat-llama-2-7b-chat-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=llama-2-7b-chat-hf" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/chat-llama-2-13b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=llama-2-13b-hf" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/chat-llama-2-13b-chat-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=llama-2-13b-chat-hf" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/chat-llama-2-7b-gpu llama-2-7b-hf ${@:1} +build_gpu ghcr.io/premai-io/chat-llama-2-7b-chat-gpu llama-2-7b-chat-hf ${@:1} +build_gpu ghcr.io/premai-io/chat-llama-2-13b-gpu llama-2-13b-hf ${@:1} +build_gpu ghcr.io/premai-io/chat-llama-2-13b-chat-gpu llama-2-13b-chat-hf ${@:1} diff --git a/cht-mpt/build.sh b/cht-mpt/build.sh index 5a79492..c620ac9 100755 --- a/cht-mpt/build.sh +++ b/cht-mpt/build.sh @@ -1,36 +1,8 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/chat-mpt-7b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=mosaicml/mpt-7b-chat" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/mpt-7b-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=mosaicml/mpt-7b" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all ghcr.io/premai-io/mpt-7b-gpu:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/mpt-7b-instruct-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=mosaicml/mpt-7b-instruct" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all ghcr.io/premai-io/mpt-7b-instruct-gpu:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/chat-mpt-7b-gpu mosaicml/mpt-7b-chat ${@:1} +build_gpu ghcr.io/premai-io/mpt-7b-gpu mosaicml/mpt-7b ${@:1} +build_gpu ghcr.io/premai-io/mpt-7b-instruct-gpu mosaicml/mpt-7b-instruct ${@:1} diff --git a/cht-xgen/build.sh b/cht-xgen/build.sh index fab88cf..e6c7c66 100755 --- a/cht-xgen/build.sh +++ b/cht-xgen/build.sh @@ -1,14 +1,6 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/chat-xgen-7b-8k-inst-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=Salesforce/xgen-7b-8k-inst" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/chat-xgen-7b-8k-inst-gpu Salesforce/xgen-7b-8k-inst ${@:1} diff --git a/dfs-dalle/build.sh b/dfs-dalle/build.sh index b534cc5..e41e7a3 100755 --- a/dfs-dalle/build.sh +++ b/dfs-dalle/build.sh @@ -1,17 +1,10 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/diffuser-dalle-mini-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="DALLE_MODEL_ID=dalle-mini/dalle-mini" \ - --build-arg="DALLE_REVISION_ID=''" \ - --build-arg="VQGAN_MODEL_ID=dalle-mini/vqgan_imagenet_f16_16384" \ - --build-arg="VQGAN_REVISION_ID=e93a26e7707683d349bf5d5c41c5b0ef69b677a9" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_gpu ghcr.io/premai-io/diffuser-dalle-mini-gpu - ${@:1} \ + --build-arg="DALLE_MODEL_ID=dalle-mini/dalle-mini" \ + --build-arg="DALLE_REVISION_ID=''" \ + --build-arg="VQGAN_MODEL_ID=dalle-mini/vqgan_imagenet_f16_16384" \ + --build-arg="VQGAN_REVISION_ID=e93a26e7707683d349bf5d5c41c5b0ef69b677a9" diff --git a/dfs-diffusers/build.sh b/dfs-diffusers/build.sh index e4357fc..41a1f05 100755 --- a/dfs-diffusers/build.sh +++ b/dfs-diffusers/build.sh @@ -1,81 +1,13 @@ #!/bin/bash set -e export VERSION=1.0.3 - -IMAGE=ghcr.io/premai-io/diffuser-stable-diffusion-2-1-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=stabilityai/stable-diffusion-2-1" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/diffuser-stable-diffusion-1-5-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=runwayml/stable-diffusion-v1-5" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/diffuser-stable-diffusion-2-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=stabilityai/stable-diffusion-2" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/diffuser-stable-diffusion-xl-gpu -docker buildx build --push \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=stabilityai/stable-diffusion-xl-base-1.0" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/diffuser-stable-diffusion-xl-with-refiner-gpu -docker buildx build --push \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=stabilityai/stable-diffusion-xl-base-1.0" \ - --build-arg="REFINER_ID=stabilityai/stable-diffusion-xl-refiner-1.0" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/upscaler-stable-diffusion-x4-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=stabilityai/stable-diffusion-x4-upscaler" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/upscaler-stable-diffusion-x2-latent-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=stabilityai/sd-x2-latent-upscaler" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" + +build_gpu ghcr.io/premai-io/diffuser-stable-diffusion-2-1-gpu stabilityai/stable-diffusion-2-1 ${@:1} +build_gpu ghcr.io/premai-io/diffuser-stable-diffusion-1-5-gpu runwayml/stable-diffusion-v1-5 ${@:1} +build_gpu ghcr.io/premai-io/diffuser-stable-diffusion-2-gpu stabilityai/stable-diffusion-2 ${@:1} +build_gpu ghcr.io/premai-io/diffuser-stable-diffusion-xl-gpu stabilityai/stable-diffusion-xl-base-1.0 ${@:1} +build_gpu ghcr.io/premai-io/diffuser-stable-diffusion-xl-with-refiner-gpu stabilityai/stable-diffusion-xl-base-1.0 ${@:1} \ + --build-arg="REFINER_ID=stabilityai/stable-diffusion-xl-refiner-1.0" +build_gpu ghcr.io/premai-io/upscaler-stable-diffusion-x4-gpu stabilityai/stable-diffusion-x4-upscaler ${@:1} +build_gpu ghcr.io/premai-io/upscaler-stable-diffusion-x2-latent-gpu stabilityai/sd-x2-latent-upscaler ${@:1} diff --git a/ebd-all-minilm/build.sh b/ebd-all-minilm/build.sh index 1387a02..b5267e3 100755 --- a/ebd-all-minilm/build.sh +++ b/ebd-all-minilm/build.sh @@ -1,26 +1,7 @@ #!/bin/bash set -e export VERSION=1.0.2 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/embeddings-all-minilm-l6-v2-cpu -docker buildx build ${@:1} \ - --file ./docker/cpu/Dockerfile \ - --build-arg="MODEL_ID=all-MiniLM-L6-v2" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - --platform ${BUILDX_PLATFORM:-linux/arm64,linux/amd64} \ - . -if test -z $TESTS_SKIP_CPU; then - docker run --rm $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/embeddings-all-minilm-l6-v2-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=all-MiniLM-L6-v2" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi +build_cpu ghcr.io/premai-io/embeddings-all-minilm-l6-v2-cpu all-MiniLM-L6-v2 ${@:1} +build_gpu ghcr.io/premai-io/embeddings-all-minilm-l6-v2-gpu all-MiniLM-L6-v2 ${@:1} diff --git a/t2a-bark/build.sh b/t2a-bark/build.sh index 8554a5c..3ff7c52 100755 --- a/t2a-bark/build.sh +++ b/t2a-bark/build.sh @@ -1,26 +1,7 @@ #!/bin/bash set -e export VERSION=1.0.0 +source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" -IMAGE=ghcr.io/premai-io/text-to-audio-bark-gpu -docker buildx build ${@:1} \ - --file ./docker/gpu/Dockerfile \ - --build-arg="MODEL_ID=bark/t2a-bark" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - . -if test -z $TESTS_SKIP_GPU; then - docker run --rm --gpus all $IMAGE:$VERSION pytest -fi - -IMAGE=ghcr.io/premai-io/text-to-audio-bark-cpu -docker buildx build ${@:1} \ - --file ./docker/cpu/Dockerfile \ - --build-arg="MODEL_ID=bark/t2a-bark" \ - --tag $IMAGE:latest \ - --tag $IMAGE:$VERSION \ - --platform ${BUILDX_PLATFORM:-linux/arm64,linux/amd64} \ - . -if test -z $TESTS_SKIP_CPU; then - docker run --rm $IMAGE:$VERSION pytest -fi +build_cpu ghcr.io/premai-io/text-to-audio-bark-cpu bark/t2a-bark ${@:1} +build_gpu ghcr.io/premai-io/text-to-audio-bark-gpu bark/t2a-bark ${@:1} diff --git a/utils.sh b/utils.sh new file mode 100644 index 0000000..c6e0133 --- /dev/null +++ b/utils.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# common utilities used in ./*-*/build.sh +# usage: +# build_[cg]pu [...] +# env: +# required: VERSION +# optional: BUILDX_PLATFORM (on CPU), TESTS_SKIP_[CG]PU + +build_cpu(){ + IMAGE=$1; MODEL_ID=$2 + docker buildx build ${@:3} \ + --file ./docker/cpu/Dockerfile \ + --build-arg="MODEL_ID=$MODEL_ID" \ + --tag $IMAGE:latest \ + --tag $IMAGE:$VERSION \ + --platform ${BUILDX_PLATFORM:-linux/arm64,linux/amd64} \ + . + if test -z $TESTS_SKIP_CPU; then docker run --rm $IMAGE:$VERSION pytest; fi +} + +build_gpu(){ + IMAGE=$1; MODEL_ID=$2 + docker buildx build ${@:3} \ + --file ./docker/gpu/Dockerfile \ + --build-arg="MODEL_ID=$MODEL_ID" \ + --tag $IMAGE:latest \ + --tag $IMAGE:$VERSION \ + . + if test -z $TESTS_SKIP_GPU; then docker run --rm --gpus all $IMAGE:$VERSION pytest; fi +}