From 814e654515f93700cc7f76345ce21d8592dabcc0 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Thu, 24 Oct 2024 16:36:57 +0100 Subject: [PATCH] Start an Asahi version Asahi has a forked version of mesa while it upstream. Signed-off-by: Eric Curtin --- container-images/asahi/Containerfile | 29 +++++++++++++++++++++++++ container-images/cuda/Containerfile | 5 +---- container-images/ramalama/Containerfile | 6 ----- ramalama/model.py | 13 +++++++++-- test/ci.sh | 2 ++ 5 files changed, 43 insertions(+), 12 deletions(-) create mode 100644 container-images/asahi/Containerfile diff --git a/container-images/asahi/Containerfile b/container-images/asahi/Containerfile new file mode 100644 index 000000000..27a274c4d --- /dev/null +++ b/container-images/asahi/Containerfile @@ -0,0 +1,29 @@ +FROM fedora:41 + +# renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?.*) +ARG OMLMD_VERSION=0.1.6 +ARG LLAMA_CPP_SHA=1329c0a75e6a7defc5c380eaf80d8e0f66d7da78 +# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest +ARG WHISPER_CPP_SHA=0377596b77a3602e36430320cbe45f8c305ef04a + +RUN dnf install -y dnf-plugins-core && \ + dnf copr enable -y @asahi/fedora-remix-branding && \ + dnf install -y asahi-repos && \ + dnf install -y mesa-vulkan-drivers vulkan-headers vulkan-loader-devel \ + vulkan-tools spirv-tools glslc glslang git procps-ng vim cmake gcc-c++ \ + python3-pip python3-argcomplete clang && \ + dnf clean all && \ + rm -rf /var/cache/*dnf* + +RUN /usr/bin/python3 --version +RUN pip install "omlmd==${OMLMD_VERSION}" + +COPY ../scripts /scripts +RUN export CC=clang && \ + export CXX=clang++ && \ + chmod +x /scripts/*.sh && \ + /scripts/build_llama_and_whisper.sh "$LLAMA_CPP_SHA" "$WHISPER_CPP_SHA" \ + "/usr" "-DGGML_VULKAN=1" + +ENV WHISPER_CPP_SHA=${WHISPER_CPP_SHA} +ENV LLAMA_CPP_SHA=${LLAMA_CPP_SHA} diff --git a/container-images/cuda/Containerfile b/container-images/cuda/Containerfile index 535b2d984..3628c8e0d 100644 --- a/container-images/cuda/Containerfile +++ b/container-images/cuda/Containerfile @@ -21,8 +21,6 @@ RUN chmod +x /scripts/*.sh && \ # Final runtime image FROM docker.io/nvidia/cuda:12.6.2-runtime-ubi9 -# renovate: datasource=github-releases depName=huggingface/huggingface_hub extractVersion=^v(?.*) -ARG HUGGINGFACE_HUB_VERSION=0.26.2 # renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?.*) ARG OMLMD_VERSION=0.1.6 @@ -32,8 +30,7 @@ RUN dnf install -y python3 python3-pip && \ rm -rf /var/cache/*dnf* # Install Python packages in the runtime image -RUN pip install "huggingface_hub==${HUGGINGFACE_HUB_VERSION}" \ - "omlmd==${OMLMD_VERSION}" +RUN pip install "omlmd==${OMLMD_VERSION}" # Copy the entire installation directory from the builder COPY --from=builder /tmp/install /usr diff --git a/container-images/ramalama/Containerfile b/container-images/ramalama/Containerfile index af510a59b..e606d226d 100644 --- a/container-images/ramalama/Containerfile +++ b/container-images/ramalama/Containerfile @@ -1,11 +1,7 @@ FROM registry.access.redhat.com/ubi9/ubi:9.4-1214.1729773476 -# renovate: datasource=github-releases depName=huggingface/huggingface_hub extractVersion=^v(?.*) -ARG HUGGINGFACE_HUB_VERSION=0.26.2 # renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?.*) ARG OMLMD_VERSION=0.1.6 -# renovate: datasource=github-releases depName=tqdm/tqdm extractVersion=^v(?.*) -ARG TQDM_VERSION=4.66.6 ARG LLAMA_CPP_SHA=1329c0a75e6a7defc5c380eaf80d8e0f66d7da78 # renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest ARG WHISPER_CPP_SHA=0377596b77a3602e36430320cbe45f8c305ef04a @@ -24,9 +20,7 @@ RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.n rm -rf /var/cache/*dnf* RUN /usr/bin/python3 --version -RUN pip install "huggingface_hub==${HUGGINGFACE_HUB_VERSION}" RUN pip install "omlmd==${OMLMD_VERSION}" -RUN pip install "tqdm==${TQDM_VERSION}" RUN dnf config-manager --add-repo \ https://mirror.stream.centos.org/9-stream/AppStream/$(uname -m)/os/ diff --git a/ramalama/model.py b/ramalama/model.py index 61193d383..9dbe86cf9 100644 --- a/ramalama/model.py +++ b/ramalama/model.py @@ -103,10 +103,19 @@ def model_path(self, args): raise NotImplementedError(f"model_path for {self.type} not implemented") def _image(self, args): + if args.image != default_image(): + return args.image + gpu_type, _ = get_gpu() if gpu_type == "HIP_VISIBLE_DEVICES": - if args.image == default_image(): - return "quay.io/ramalama/rocm:latest" + return "quay.io/ramalama/rocm:latest" + + if os.path.exists('/etc/os-release'): + with open('/etc/os-release', 'r') as file: + content = file.read() + if "asahi" in content.lower(): + return "quay.io/ramalama/asahi:latest" + return args.image def setup_container(self, args): diff --git a/test/ci.sh b/test/ci.sh index ebcaa3e06..8ceef307a 100755 --- a/test/ci.sh +++ b/test/ci.sh @@ -32,6 +32,8 @@ main() { # verify llama.cpp version matches grep "$(grep "ARG LLAMA_CPP_SHA=" container-images/ramalama/Containerfile)" \ container-images/cuda/Containerfile + grep "$(grep "ARG LLAMA_CPP_SHA=" container-images/ramalama/Containerfile)" \ + container-images/asahi/Containerfile local os os="$(uname -s)"