summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--misc/ollama/Makefile7
-rw-r--r--misc/ollama/files/patch-FreeBSD-compatibility12
-rw-r--r--misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt9
3 files changed, 20 insertions, 8 deletions
diff --git a/misc/ollama/Makefile b/misc/ollama/Makefile
index d388e976d172..b44f692681bd 100644
--- a/misc/ollama/Makefile
+++ b/misc/ollama/Makefile
@@ -1,7 +1,7 @@
PORTNAME= ollama
DISTVERSIONPREFIX= v
DISTVERSION= 0.3.4
-PORTREVISION= 2
+PORTREVISION= 3
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
@@ -16,6 +16,7 @@ ONLY_FOR_ARCHS_REASON= bundled patched llama-cpp is placed into the arch-specifi
BUILD_DEPENDS= bash:shells/bash \
cmake:devel/cmake-core \
+ glslc:graphics/shaderc \
vulkan-headers>0:graphics/vulkan-headers
LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader
@@ -27,12 +28,14 @@ GO_TARGET= .
USE_GITHUB= nodefault
GH_TUPLE= ggerganov:llama.cpp:6eeaeba:llama_cpp/llm/llama.cpp
+MAKE_ENV= PATH=${PATH}:${WRKSRC}/llm/build/bsd/x86_64_static/bin # workaround to find vulkan-shaders-gen
+
PLIST_FILES= bin/${PORTNAME}
post-patch: # workaround for https://github.com/ollama/ollama/issues/6259 (use of extenral libllama.so)
@${REINPLACE_CMD} \
-e '\
- s| llama | llama omp |; \
+ s| llama | llama ${LOCALBASE}/lib/libvulkan.so omp pthread |; \
s| llama | ${WRKSRC}/llm/build/bsd/x86_64_static/src/libllama.a |; \
s| ggml | ${WRKSRC}/llm/build/bsd/x86_64_static/ggml/src/libggml.a |; \
' \
diff --git a/misc/ollama/files/patch-FreeBSD-compatibility b/misc/ollama/files/patch-FreeBSD-compatibility
index 03cc31011ab1..ca206363f042 100644
--- a/misc/ollama/files/patch-FreeBSD-compatibility
+++ b/misc/ollama/files/patch-FreeBSD-compatibility
@@ -143,38 +143,38 @@ new file mode 100644
+COMMON_BSD_DEFS="-DCMAKE_SYSTEM_NAME=$(uname -s)"
+CMAKE_TARGETS="--target llama --target ggml"
+
-+COMMON_CPU_DEFS="${COMMON_BSD_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH}"
++COMMON_CPU_DEFS="${COMMON_BSD_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DBUILD_SHARED_LIBS=off"
+
+# Static build for linking into the Go binary
+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
++CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_VULKAN=on -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}_static"
+echo "Building static library"
+build
+
+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
++CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/cpu"
+echo "Building LCD CPU"
+build
+compress
+
+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
++CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/cpu_avx"
+echo "Building AVX CPU"
+build
+compress
+
+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
++CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/cpu_avx2"
+echo "Building AVX2 CPU"
+build
+compress
+
+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_VULKAN=on ${CMAKE_DEFS}"
++CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_VULKAN=on ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/vulkan"
+echo "Building Vulkan GPU"
+build
diff --git a/misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt b/misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt
new file mode 100644
index 000000000000..3ada7c2debc2
--- /dev/null
+++ b/misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt
@@ -0,0 +1,9 @@
+--- llm/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt.orig 2024-08-08 21:55:59 UTC
++++ llm/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt
+@@ -1,5 +1,6 @@ add_executable(${TARGET} vulkan-shaders-gen.cpp)
+
+ set(TARGET vulkan-shaders-gen)
+ add_executable(${TARGET} vulkan-shaders-gen.cpp)
++target_link_libraries(${TARGET} PRIVATE pthread)
+ install(TARGETS ${TARGET} RUNTIME)
+ target_compile_features(${TARGET} PRIVATE cxx_std_11)