summaryrefslogtreecommitdiff
path: root/misc
diff options
context:
space:
mode:
Diffstat (limited to 'misc')
-rw-r--r--misc/llama-cpp/Makefile2
-rw-r--r--misc/llama-cpp/distinfo6
-rw-r--r--misc/llama-cpp/pkg-plist4
-rw-r--r--misc/py-gguf/Makefile4
-rw-r--r--misc/py-gguf/distinfo6
5 files changed, 10 insertions, 12 deletions
diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile
index edcfc0829cf0..e880ababbdc1 100644
--- a/misc/llama-cpp/Makefile
+++ b/misc/llama-cpp/Makefile
@@ -1,6 +1,6 @@
PORTNAME= llama-cpp
DISTVERSIONPREFIX= b
-DISTVERSION= 5287
+DISTVERSION= 5371
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo
index 5682dec0737a..065e1989894b 100644
--- a/misc/llama-cpp/distinfo
+++ b/misc/llama-cpp/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1746510097
-SHA256 (ggerganov-llama.cpp-b5287_GH0.tar.gz) = 2b62ce01b09300aa5ed628e520ea56a72aff64fda92c02c2576880e0810225a4
-SIZE (ggerganov-llama.cpp-b5287_GH0.tar.gz) = 21099950
+TIMESTAMP = 1747201270
+SHA256 (ggerganov-llama.cpp-b5371_GH0.tar.gz) = cce50220507565b78423fc45a1c534dc088289ab898517a379fdbf733ffd72bf
+SIZE (ggerganov-llama.cpp-b5371_GH0.tar.gz) = 21147325
SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc
SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496
diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist
index fb952a574b2e..5b36c5809aaa 100644
--- a/misc/llama-cpp/pkg-plist
+++ b/misc/llama-cpp/pkg-plist
@@ -8,14 +8,13 @@ bin/convert_hf_to_gguf.py
%%EXAMPLES%%bin/llama-embedding
%%EXAMPLES%%bin/llama-eval-callback
%%EXAMPLES%%bin/llama-export-lora
+%%EXAMPLES%%bin/llama-finetune
%%EXAMPLES%%bin/llama-gen-docs
%%EXAMPLES%%bin/llama-gguf
%%EXAMPLES%%bin/llama-gguf-hash
%%EXAMPLES%%bin/llama-gguf-split
%%EXAMPLES%%bin/llama-gritlm
%%EXAMPLES%%bin/llama-imatrix
-%%EXAMPLES%%bin/llama-infill
-%%EXAMPLES%%bin/llama-llava-clip-quantize-cli
%%EXAMPLES%%bin/llama-lookahead
%%EXAMPLES%%bin/llama-lookup
%%EXAMPLES%%bin/llama-lookup-create
@@ -63,6 +62,5 @@ lib/libggml-cpu.so
%%VULKAN%%lib/libggml-vulkan.so
lib/libggml.so
lib/libllama.so
-%%EXAMPLES%%lib/libllava_shared.so
lib/libmtmd_shared.so
libdata/pkgconfig/llama.pc
diff --git a/misc/py-gguf/Makefile b/misc/py-gguf/Makefile
index b03b9d096471..05b335f0f495 100644
--- a/misc/py-gguf/Makefile
+++ b/misc/py-gguf/Makefile
@@ -1,5 +1,5 @@
PORTNAME= gguf
-DISTVERSION= 0.16.2.${GH_TAGNAME:S/b//} # the base version is in pyproject.toml, updated by post-patch
+DISTVERSION= 0.16.3.${GH_TAGNAME:S/b//} # the base version is in pyproject.toml, updated by post-patch
CATEGORIES= misc python # machine-learning
#MASTER_SITES= PYPI # the PYPI version is way behind of llama-cpp
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
@@ -24,7 +24,7 @@ USE_PYTHON= pep517 autoplist pytest
USE_GITHUB= yes
GH_ACCOUNT= ggml-org
GH_PROJECT= llama.cpp
-GH_TAGNAME= b5287
+GH_TAGNAME= b5371
WRKSRC= ${WRKDIR}/${GH_PROJECT}-${GH_TAGNAME}/gguf-py
diff --git a/misc/py-gguf/distinfo b/misc/py-gguf/distinfo
index 71d363f1c2b8..1b9000f89f7c 100644
--- a/misc/py-gguf/distinfo
+++ b/misc/py-gguf/distinfo
@@ -1,3 +1,3 @@
-TIMESTAMP = 1746512008
-SHA256 (ggml-org-llama.cpp-0.16.2.5287-b5287_GH0.tar.gz) = 2b62ce01b09300aa5ed628e520ea56a72aff64fda92c02c2576880e0810225a4
-SIZE (ggml-org-llama.cpp-0.16.2.5287-b5287_GH0.tar.gz) = 21099950
+TIMESTAMP = 1747201428
+SHA256 (ggml-org-llama.cpp-0.16.3.5371-b5371_GH0.tar.gz) = cce50220507565b78423fc45a1c534dc088289ab898517a379fdbf733ffd72bf
+SIZE (ggml-org-llama.cpp-0.16.3.5371-b5371_GH0.tar.gz) = 21147325