summaryrefslogtreecommitdiff
path: root/misc/llama-cpp
diff options
context:
space:
mode:
Diffstat (limited to 'misc/llama-cpp')
-rw-r--r--misc/llama-cpp/Makefile3
-rw-r--r--misc/llama-cpp/distinfo6
-rw-r--r--misc/llama-cpp/pkg-plist3
3 files changed, 7 insertions, 5 deletions
diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile
index 6a6f8f36ca37..463389d10d70 100644
--- a/misc/llama-cpp/Makefile
+++ b/misc/llama-cpp/Makefile
@@ -1,6 +1,6 @@
PORTNAME= llama-cpp
DISTVERSIONPREFIX= b
-DISTVERSION= 5897
+DISTVERSION= 6374
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
@@ -44,7 +44,6 @@ CURL_LIB_DEPENDS= libcurl.so:ftp/curl
EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES
-VULKAN_DESC= Vulkan GPU offload support
VULKAN_CMAKE_BOOL= GGML_VULKAN
VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \
vulkan-headers>0:graphics/vulkan-headers
diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo
index 0b0d820bedc1..ac00c83b7dba 100644
--- a/misc/llama-cpp/distinfo
+++ b/misc/llama-cpp/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1752529454
-SHA256 (ggerganov-llama.cpp-b5897_GH0.tar.gz) = 95f920d2cd25ab878fb1ad115fa0d810ec44c3582a72c4ce02d362a0000a2455
-SIZE (ggerganov-llama.cpp-b5897_GH0.tar.gz) = 25179873
+TIMESTAMP = 1756957718
+SHA256 (ggerganov-llama.cpp-b6374_GH0.tar.gz) = fe4392f8331a13fcd6ccf5fa4a7e9a79d58958394ef62bf8df6d214208619ed5
+SIZE (ggerganov-llama.cpp-b6374_GH0.tar.gz) = 25632053
SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc
SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496
diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist
index 5cc1998007e9..9c53359aabc9 100644
--- a/misc/llama-cpp/pkg-plist
+++ b/misc/llama-cpp/pkg-plist
@@ -5,6 +5,7 @@ bin/convert_hf_to_gguf.py
%%EXAMPLES%%bin/llama-cli
%%EXAMPLES%%bin/llama-convert-llama2c-to-ggml
%%EXAMPLES%%bin/llama-cvector-generator
+%%EXAMPLES%%bin/llama-diffusion-cli
%%EXAMPLES%%bin/llama-embedding
%%EXAMPLES%%bin/llama-eval-callback
%%EXAMPLES%%bin/llama-export-lora
@@ -15,6 +16,7 @@ bin/convert_hf_to_gguf.py
%%EXAMPLES%%bin/llama-gguf-split
%%EXAMPLES%%bin/llama-gritlm
%%EXAMPLES%%bin/llama-imatrix
+%%EXAMPLES%%bin/llama-logits
%%EXAMPLES%%bin/llama-lookahead
%%EXAMPLES%%bin/llama-lookup
%%EXAMPLES%%bin/llama-lookup-create
@@ -47,6 +49,7 @@ include/ggml-opt.h
include/ggml-rpc.h
include/ggml-sycl.h
include/ggml-vulkan.h
+include/ggml-webgpu.h
include/ggml.h
include/gguf.h
include/llama-cpp.h