Skip to content

Commit

Permalink
Dev. container: Added support for CUDA and CUDNN
Browse files Browse the repository at this point in the history
Also:
* Dev. container: Updated for Torch 1.10.2
* Added /build to .gitignore
  • Loading branch information
stemann committed Apr 29, 2024
1 parent ece9654 commit c8f1e9c
Show file tree
Hide file tree
Showing 5 changed files with 101 additions and 17 deletions.
66 changes: 56 additions & 10 deletions .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,18 +1,64 @@
ARG BASE_IMAGE_TAG
ARG BASE_IMAGE_VARIANT=debian
ARG BASE_IMAGE_VERSION=11

ARG BASE_IMAGE_TAG=$BASE_IMAGE_VARIANT-$BASE_IMAGE_VERSION

FROM mcr.microsoft.com/devcontainers/cpp:$BASE_IMAGE_TAG

ARG OCAML_VERSION
ARG OPAM_VERSION
ARG TORCH_VERSION
ARG CUDA_VERSION=11.8.0
ARG CUDNN_VERSION=8.9.4

ARG OCAML_VERSION=4
ARG OPAM_VERSION=2

RUN sudo apt-get update \
&& sudo apt-get satisfy -y "ocaml (>= $OCAML_VERSION)" "opam (>= $OPAM_VERSION)" \
ARG TORCH_VARIANT
ARG TORCH_VERSION=2.1.1

RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
cmake \
&& apt-get satisfy -y "ocaml (>= $OCAML_VERSION)" "opam (>= $OPAM_VERSION)" \
&& rm -rf /var/lib/apt/lists/*

RUN cd /usr/local \
&& sudo wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-$TORCH_VERSION%2Bcpu.zip \
&& sudo unzip libtorch-*.zip \
&& sudo rm libtorch-*.zip
ENV JULIA_DEPOT_PATH=/opt/julia_depot
ENV JULIAUP_DEPOT_PATH=/opt/juliaup
RUN curl -fsSL https://install.julialang.org | sh -s -- --default-channel 1.9 --path /opt/juliaup --yes
ENV PATH=/opt/juliaup/bin:$PATH

ENV CUDA_VERSION=$CUDA_VERSION
ENV CUDNN_VERSION=$CUDNN_VERSION

RUN <<EOF
CUDA_VERSION_MAJOR_MINOR=$(echo $CUDA_VERSION | cut -d . -f 1-2)
TMP_PROJECT=$(mktemp -d)
cd $TMP_PROJECT
touch Project.toml
cat <<EOT > LocalPreferences.toml
[CUDA_Runtime_jll]
version = "$CUDA_VERSION_MAJOR_MINOR"
EOT
CUDA_ROOT=$(julia --project --eval '
using Pkg
CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"])
CUDA_SDK_jll_pkg = :CUDA_SDK_jll
if CUDA_VERSION < v"11.4"
CUDA_SDK_jll_pkg = :CUDA_full_jll
end
Pkg.add(name=string(CUDA_SDK_jll_pkg), version=ENV["CUDA_VERSION"])
@eval using $CUDA_SDK_jll_pkg
println(@eval $CUDA_SDK_jll_pkg.artifact_dir)
')
ln -s $CUDA_ROOT/cuda /usr/local/cuda
EOF
ENV PATH=$PATH:/usr/local/cuda/bin

SHELL ["/bin/bash", "-o", "pipefail", "-c"]

RUN if [ -z "$TORCH_VARIANT" ]; then export TORCH_VARIANT="cu$(echo $CUDA_VERSION | cut -d . -f 1-2 | tr -d '.')"; fi \
&& cd /usr/local \
&& wget -q "https://download.pytorch.org/libtorch/$TORCH_VARIANT/libtorch-cxx11-abi-shared-with-deps-$TORCH_VERSION%2B$TORCH_VARIANT.zip" \
&& unzip -q libtorch-*.zip \
&& rm libtorch-*.zip

ENV CMAKE_PREFIX_PATH=/usr/local/libtorch
14 changes: 9 additions & 5 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,25 @@
"build": {
"dockerfile": "Dockerfile",
"args": {
"BASE_IMAGE_TAG": "debian-11",
"BASE_IMAGE_VARIANT": "debian",
"BASE_IMAGE_VERSION": "11",
"CUDA_VERSION": "11.3.1",
"CUDNN_VERSION": "8.2.4",
"OCAML_VERSION": "4",
"OPAM_VERSION": "2",
"TORCH_VERSION": "1.4.0"
"TORCH_VERSION": "1.10.2"
}
},
"customizations": {
"vscode": {
"extensions": [
"julialang.language-julia",
"ms-vscode.cpptools-extension-pack"
]
}
},
"features": {
"ghcr.io/julialang/devcontainer-features/julia:1": {}
"hostRequirements": {
"gpu": "optional"
},
"postCreateCommand": "opam init --auto-setup"
"postCreateCommand": ".devcontainer/postCreate.sh"
}
27 changes: 27 additions & 0 deletions .devcontainer/postCreate.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
sudo chown -R vscode:vscode /opt/juliaup /opt/julia_depot

CUDA_VERSION_MAJOR_MINOR=$(echo $CUDA_VERSION | cut -d . -f 1-2)
TMP_PROJECT=$(mktemp -d)
cd $TMP_PROJECT
touch Project.toml
cat <<EOT > LocalPreferences.toml
[CUDA_Runtime_jll]
version = "$CUDA_VERSION_MAJOR_MINOR"
EOT
CUDNN_ROOT=$(julia --project --eval '
using Pkg;
CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"])
if CUDA_VERSION < v"11"
Pkg.add(name="CUDA_Runtime_jll", version="0.2")
elseif CUDA_VERSION < v"11.4"
Pkg.add(name="CUDA_Runtime_jll", version="0.7")
else
Pkg.add(name="CUDA_Runtime_jll")
end
Pkg.add(name="CUDNN_jll", version=ENV["CUDNN_VERSION"]);
using CUDNN_jll;
println(CUDNN_jll.artifact_dir)') \
&& for F in $CUDNN_ROOT/include/cudnn*.h; do ln -s $F /usr/local/cuda/include/$(basename $F); done \
&& for F in $CUDNN_ROOT/lib/libcudnn*; do ln -s $F /usr/local/cuda/lib64/$(basename $F); done

opam init --disable-sandboxing --auto-setup
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
/Manifest.toml
/build
/.vscode
10 changes: 8 additions & 2 deletions deps/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,14 @@ Since Torch is a C++-library, a C wrapper is needed for Julia to interact with T

The C wrapper can be generated from the `Declarations.yaml`-file included with `Torch_jll`:
```sh
mkdir c_wrapper_generator/data
curl https://raw.githubusercontent.com/LaurentMazare/ocaml-torch/main/third_party/pytorch/Declarations-v1.4.0.yaml -o c_wrapper_generator/data/Declarations.yaml
mkdir -p c_wrapper_generator/data
cp -v `julia --eval '
using Pkg
Pkg.activate(; temp=true)
Pkg.add(name="Torch_jll", version="1.10")
import Torch_jll
print(joinpath(dirname(Torch_jll.libtorch_path), "..", "share", "ATen", "Declarations.yaml"))
'` c_wrapper_generator/data/
```

The C wrapper can then be generated by building and running the (OCaml-based) C wrapper generator, e.g. by using the dev. container (which includes OCaml and OPAM):
Expand Down

0 comments on commit c8f1e9c

Please sign in to comment.