No particular reason ... they release *a lot* and it's been a few weeks :-)

ok?
Index: Makefile
===================================================================
RCS file: /cvs/ports/misc/llama.cpp/Makefile,v
diff -u -p -r1.7 Makefile
--- Makefile    18 Feb 2025 00:02:17 -0000      1.7
+++ Makefile    27 Mar 2025 16:21:12 -0000
@@ -8,7 +8,7 @@ COMMENT =               LLM inference system
 
 GH_ACCOUNT =           ggerganov
 GH_PROJECT =           llama.cpp
-GH_TAGNAME =           b4706
+GH_TAGNAME =           b4974
 PKGNAME =              llama-cpp-0.0.${GH_TAGNAME:S/b//}
 
 SHARED_LIBS +=         ggml-base 0.0
Index: distinfo
===================================================================
RCS file: /cvs/ports/misc/llama.cpp/distinfo,v
diff -u -p -r1.2 distinfo
--- distinfo    13 Feb 2025 12:21:58 -0000      1.2
+++ distinfo    27 Mar 2025 16:21:12 -0000
@@ -1,2 +1,2 @@
-SHA256 (llama.cpp-b4706.tar.gz) = jpINppeW9Vu/jeqf9gnJPsZ1Hkpkj6YWOHbJSAcPwxc=
-SIZE (llama.cpp-b4706.tar.gz) = 20705861
+SHA256 (llama.cpp-b4974.tar.gz) = k63lHQ9lhM9R7QBRZqOaOpFIWK+23MjefNIzJZtHyrk=
+SIZE (llama.cpp-b4974.tar.gz) = 20857221
Index: pkg/PLIST
===================================================================
RCS file: /cvs/ports/misc/llama.cpp/pkg/PLIST,v
diff -u -p -r1.3 PLIST
--- pkg/PLIST   13 Feb 2025 12:21:59 -0000      1.3
+++ pkg/PLIST   27 Mar 2025 16:21:12 -0000
@@ -9,6 +9,7 @@ bin/convert_hf_to_gguf.py
 @bin bin/llama-eval-callback
 @bin bin/llama-export-lora
 @bin bin/llama-gbnf-validator
+@bin bin/llama-gemma3-cli
 @bin bin/llama-gen-docs
 @bin bin/llama-gguf
 @bin bin/llama-gguf-hash
@@ -45,6 +46,7 @@ include/ggml-alloc.h
 include/ggml-backend.h
 include/ggml-blas.h
 include/ggml-cann.h
+include/ggml-cpp.h
 include/ggml-cpu.h
 include/ggml-cuda.h
 include/ggml-kompute.h

Reply via email to