summaryrefslogtreecommitdiff
path: root/misc/py-gguf/Makefile
blob: b4b655f4ac86dbd1876822c0ee74bc3723be55fe (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
PORTNAME=	gguf
DISTVERSION=	0.16.3.${GH_TAGNAME:S/b//} # the base version is in pyproject.toml, updated by post-patch
CATEGORIES=	misc python # machine-learning
#MASTER_SITES=	PYPI # the PYPI version is way behind of llama-cpp
PKGNAMEPREFIX=	${PYTHON_PKGNAMEPREFIX}

MAINTAINER=	yuri@FreeBSD.org
COMMENT=	Read and write ML models in GGUF for GGML
WWW=		https://ggml.ai \
		https://github.com/ggml-org/llama.cpp

LICENSE=	MIT
LICENSE_FILE=	${WRKSRC}/LICENSE

BUILD_DEPENDS=	${PYTHON_PKGNAMEPREFIX}poetry-core>=1.0.0:devel/py-poetry-core@${PY_FLAVOR}
RUN_DEPENDS=	${PYNUMPY} \
		${PYTHON_PKGNAMEPREFIX}pyyaml>=5.1:devel/py-pyyaml@${PY_FLAVOR} \
		${PYTHON_PKGNAMEPREFIX}sentencepiece>=0.1.98:textproc/py-sentencepiece@${PY_FLAVOR} \
		${PYTHON_PKGNAMEPREFIX}tqdm>=4.27:misc/py-tqdm@${PY_FLAVOR}

USES=		python shebangfix
USE_PYTHON=	pep517 autoplist pytest

USE_GITHUB=	yes
GH_ACCOUNT=	ggml-org
GH_PROJECT=	llama.cpp
GH_TAGNAME=	b5401

WRKSRC=		${WRKDIR}/${GH_PROJECT}-${GH_TAGNAME}/gguf-py

SHEBANG_GLOB=	*.py

NO_ARCH=	yes

post-patch: # update version to reflect llama-cpp revision, have to use '.' since '-' is interpreted as "post"
	@${REINPLACE_CMD} \
		-e 's/version = "${DISTVERSION:R}"/version = "${DISTVERSION:R}.${GH_TAGNAME:S/b//}"/' \
		${WRKSRC}/pyproject.toml

# tests as of 0.16.2.5280: 5 passed in 10.59s

.include <bsd.port.mk>