summaryrefslogtreecommitdiff
path: root/misc/llama-cpp
diff options
context:
space:
mode:
Diffstat (limited to 'misc/llama-cpp')
-rw-r--r--misc/llama-cpp/Makefile2
-rw-r--r--misc/llama-cpp/distinfo6
-rw-r--r--misc/llama-cpp/pkg-plist12
3 files changed, 10 insertions, 10 deletions
diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile
index edcfc0829cf0..1866f235b18f 100644
--- a/misc/llama-cpp/Makefile
+++ b/misc/llama-cpp/Makefile
@@ -1,6 +1,6 @@
PORTNAME= llama-cpp
DISTVERSIONPREFIX= b
-DISTVERSION= 5287
+DISTVERSION= 5943
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo
index 5682dec0737a..7769fc9f69f5 100644
--- a/misc/llama-cpp/distinfo
+++ b/misc/llama-cpp/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1746510097
-SHA256 (ggerganov-llama.cpp-b5287_GH0.tar.gz) = 2b62ce01b09300aa5ed628e520ea56a72aff64fda92c02c2576880e0810225a4
-SIZE (ggerganov-llama.cpp-b5287_GH0.tar.gz) = 21099950
+TIMESTAMP = 1753077187
+SHA256 (ggerganov-llama.cpp-b5943_GH0.tar.gz) = abb05cabbb7f2b18e762f0db17c4842836a768e33c4a8b840a4d3dbf8cdc47d4
+SIZE (ggerganov-llama.cpp-b5943_GH0.tar.gz) = 25229732
SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc
SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496
diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist
index fb952a574b2e..d6690897b050 100644
--- a/misc/llama-cpp/pkg-plist
+++ b/misc/llama-cpp/pkg-plist
@@ -5,17 +5,17 @@ bin/convert_hf_to_gguf.py
%%EXAMPLES%%bin/llama-cli
%%EXAMPLES%%bin/llama-convert-llama2c-to-ggml
%%EXAMPLES%%bin/llama-cvector-generator
+%%EXAMPLES%%bin/llama-diffusion-cli
%%EXAMPLES%%bin/llama-embedding
%%EXAMPLES%%bin/llama-eval-callback
%%EXAMPLES%%bin/llama-export-lora
+%%EXAMPLES%%bin/llama-finetune
%%EXAMPLES%%bin/llama-gen-docs
%%EXAMPLES%%bin/llama-gguf
%%EXAMPLES%%bin/llama-gguf-hash
%%EXAMPLES%%bin/llama-gguf-split
%%EXAMPLES%%bin/llama-gritlm
%%EXAMPLES%%bin/llama-imatrix
-%%EXAMPLES%%bin/llama-infill
-%%EXAMPLES%%bin/llama-llava-clip-quantize-cli
%%EXAMPLES%%bin/llama-lookahead
%%EXAMPLES%%bin/llama-lookup
%%EXAMPLES%%bin/llama-lookup-create
@@ -36,7 +36,6 @@ bin/convert_hf_to_gguf.py
%%EXAMPLES%%bin/llama-speculative-simple
%%EXAMPLES%%bin/llama-tokenize
%%EXAMPLES%%bin/llama-tts
-%%VULKAN%%bin/vulkan-shaders-gen
include/ggml-alloc.h
include/ggml-backend.h
include/ggml-blas.h
@@ -44,16 +43,18 @@ include/ggml-cann.h
include/ggml-cpp.h
include/ggml-cpu.h
include/ggml-cuda.h
-include/ggml-kompute.h
include/ggml-metal.h
include/ggml-opt.h
include/ggml-rpc.h
include/ggml-sycl.h
include/ggml-vulkan.h
include/ggml.h
+include/ggml-webgpu.h
include/gguf.h
include/llama-cpp.h
include/llama.h
+include/mtmd-helper.h
+include/mtmd.h
lib/cmake/ggml/ggml-config.cmake
lib/cmake/ggml/ggml-version.cmake
lib/cmake/llama/llama-config.cmake
@@ -63,6 +64,5 @@ lib/libggml-cpu.so
%%VULKAN%%lib/libggml-vulkan.so
lib/libggml.so
lib/libllama.so
-%%EXAMPLES%%lib/libllava_shared.so
-lib/libmtmd_shared.so
+lib/libmtmd.so
libdata/pkgconfig/llama.pc