Skip to content

Commit

Permalink
Start an Asahi version
Browse files Browse the repository at this point in the history
Asahi has a forked version of mesa while it upstream.

Signed-off-by: Eric Curtin <[email protected]>
  • Loading branch information
ericcurtin committed Nov 5, 2024
1 parent 0048ee3 commit 814e654
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 12 deletions.
29 changes: 29 additions & 0 deletions container-images/asahi/Containerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
FROM fedora:41

# renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?<version>.*)
ARG OMLMD_VERSION=0.1.6
ARG LLAMA_CPP_SHA=1329c0a75e6a7defc5c380eaf80d8e0f66d7da78
# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest
ARG WHISPER_CPP_SHA=0377596b77a3602e36430320cbe45f8c305ef04a

RUN dnf install -y dnf-plugins-core && \
dnf copr enable -y @asahi/fedora-remix-branding && \
dnf install -y asahi-repos && \
dnf install -y mesa-vulkan-drivers vulkan-headers vulkan-loader-devel \
vulkan-tools spirv-tools glslc glslang git procps-ng vim cmake gcc-c++ \
python3-pip python3-argcomplete clang && \
dnf clean all && \
rm -rf /var/cache/*dnf*

RUN /usr/bin/python3 --version
RUN pip install "omlmd==${OMLMD_VERSION}"

COPY ../scripts /scripts
RUN export CC=clang && \
export CXX=clang++ && \
chmod +x /scripts/*.sh && \
/scripts/build_llama_and_whisper.sh "$LLAMA_CPP_SHA" "$WHISPER_CPP_SHA" \
"/usr" "-DGGML_VULKAN=1"

ENV WHISPER_CPP_SHA=${WHISPER_CPP_SHA}
ENV LLAMA_CPP_SHA=${LLAMA_CPP_SHA}
5 changes: 1 addition & 4 deletions container-images/cuda/Containerfile
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ RUN chmod +x /scripts/*.sh && \
# Final runtime image
FROM docker.io/nvidia/cuda:12.6.2-runtime-ubi9

# renovate: datasource=github-releases depName=huggingface/huggingface_hub extractVersion=^v(?<version>.*)
ARG HUGGINGFACE_HUB_VERSION=0.26.2
# renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?<version>.*)
ARG OMLMD_VERSION=0.1.6

Expand All @@ -32,8 +30,7 @@ RUN dnf install -y python3 python3-pip && \
rm -rf /var/cache/*dnf*

# Install Python packages in the runtime image
RUN pip install "huggingface_hub==${HUGGINGFACE_HUB_VERSION}" \
"omlmd==${OMLMD_VERSION}"
RUN pip install "omlmd==${OMLMD_VERSION}"

# Copy the entire installation directory from the builder
COPY --from=builder /tmp/install /usr
Expand Down
6 changes: 0 additions & 6 deletions container-images/ramalama/Containerfile
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
FROM registry.access.redhat.com/ubi9/ubi:9.4-1214.1729773476

# renovate: datasource=github-releases depName=huggingface/huggingface_hub extractVersion=^v(?<version>.*)
ARG HUGGINGFACE_HUB_VERSION=0.26.2
# renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?<version>.*)
ARG OMLMD_VERSION=0.1.6
# renovate: datasource=github-releases depName=tqdm/tqdm extractVersion=^v(?<version>.*)
ARG TQDM_VERSION=4.66.6
ARG LLAMA_CPP_SHA=1329c0a75e6a7defc5c380eaf80d8e0f66d7da78
# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest
ARG WHISPER_CPP_SHA=0377596b77a3602e36430320cbe45f8c305ef04a
Expand All @@ -24,9 +20,7 @@ RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.n
rm -rf /var/cache/*dnf*

RUN /usr/bin/python3 --version
RUN pip install "huggingface_hub==${HUGGINGFACE_HUB_VERSION}"
RUN pip install "omlmd==${OMLMD_VERSION}"
RUN pip install "tqdm==${TQDM_VERSION}"

RUN dnf config-manager --add-repo \
https://mirror.stream.centos.org/9-stream/AppStream/$(uname -m)/os/
Expand Down
13 changes: 11 additions & 2 deletions ramalama/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,19 @@ def model_path(self, args):
raise NotImplementedError(f"model_path for {self.type} not implemented")

def _image(self, args):
if args.image != default_image():
return args.image

gpu_type, _ = get_gpu()
if gpu_type == "HIP_VISIBLE_DEVICES":
if args.image == default_image():
return "quay.io/ramalama/rocm:latest"
return "quay.io/ramalama/rocm:latest"

if os.path.exists('/etc/os-release'):
with open('/etc/os-release', 'r') as file:
content = file.read()
if "asahi" in content.lower():
return "quay.io/ramalama/asahi:latest"

return args.image

def setup_container(self, args):
Expand Down
2 changes: 2 additions & 0 deletions test/ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ main() {
# verify llama.cpp version matches
grep "$(grep "ARG LLAMA_CPP_SHA=" container-images/ramalama/Containerfile)" \
container-images/cuda/Containerfile
grep "$(grep "ARG LLAMA_CPP_SHA=" container-images/ramalama/Containerfile)" \
container-images/asahi/Containerfile

local os
os="$(uname -s)"
Expand Down

0 comments on commit 814e654

Please sign in to comment.