diff options
author | Nicolas Graves <ngraves@ngraves.fr> | 2023-08-14 01:24:40 -0700 |
---|---|---|
committer | Ludovic Courtès <ludo@gnu.org> | 2023-08-15 00:33:51 +0200 |
commit | bef4697d4b8a8998c2dc471b30c0c9d09e90208c (patch) | |
tree | ae722fcd7eabff9c1c690349398cc42d24d7dc1f /gnu/packages/machine-learning.scm | |
parent | 03bc7bee942cb14edd709dbd14725e5e2c0e289f (diff) | |
download | guix-bef4697d4b8a8998c2dc471b30c0c9d09e90208c.tar.gz guix-bef4697d4b8a8998c2dc471b30c0c9d09e90208c.zip |
gnu: llama-cpp: Update to 0.0.0-0.f31b539.
* gnu/packages/machine-learning.scm (llama-cpp): Update to 0.0.0-0.f31b539.
[#:phases](install-python-scripts): Adapt python scripts.
(install): Remove deleted quantize script installation.
Co-authored-by: Andy Tai <atai@atai.org>
Signed-off-by: Ludovic Courtès <ludo@gnu.org>
Diffstat (limited to 'gnu/packages/machine-learning.scm')
-rw-r--r-- | gnu/packages/machine-learning.scm | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm index f931626cd6..c32180615b 100644 --- a/gnu/packages/machine-learning.scm +++ b/gnu/packages/machine-learning.scm @@ -408,7 +408,7 @@ Performance is achieved by using the LLVM JIT compiler.") (deprecated-package "guile-aiscm-next" guile-aiscm)) (define-public llama-cpp - (let ((commit "3cd8dde0d1357b7f11bdd25c45d5bf5e97e284a0") + (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb") (revision "0")) (package (name "llama-cpp") @@ -421,7 +421,7 @@ Performance is achieved by using the LLVM JIT compiler.") (commit (string-append "master-" (string-take commit 7))))) (file-name (git-file-name name version)) (sha256 - (base32 "0i7c92cxqs31xklrn688978kk29agivgxjgvsb45wzm65gc6hm5c")))) + (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p")))) (build-system cmake-build-system) (arguments (list @@ -449,18 +449,13 @@ Performance is achieved by using the LLVM JIT compiler.") (chmod (string-append bin script) #o555))) (mkdir-p bin) (make-script "convert-pth-to-ggml") - (make-script "convert-gptq-to-ggml") - (make-script "quantize.py") - (substitute* (string-append bin "quantize.py") - (("os\\.getcwd\\(\\), quantize_script_binary") - (string-append "\"" bin "\", quantize_script_binary")))))) + (make-script "convert-lora-to-ggml") + (make-script "convert")))) (add-after 'install-python-scripts 'wrap-python-scripts (assoc-ref python:%standard-phases 'wrap)) (replace 'install (lambda _ - (let ((bin (string-append #$output "/bin/"))) - (install-file "bin/quantize" bin) - (copy-file "bin/main" (string-append bin "llama")))))))) + (copy-file "bin/main" (string-append #$output "/bin/llama"))))))) (inputs (list python)) (propagated-inputs (list python-numpy python-pytorch python-sentencepiece)) |