llama_cpp 0.5.2 → 0.5.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/ext/llama_cpp/llama_cpp.cpp +2 -2
- data/ext/llama_cpp/src/ggml-alloc.c +6 -6
- data/ext/llama_cpp/src/ggml-cuda.cu +99 -46
- data/ext/llama_cpp/src/ggml-metal.m +37 -10
- data/ext/llama_cpp/src/ggml-metal.metal +144 -45
- data/ext/llama_cpp/src/ggml-opencl.cpp +3 -3
- data/ext/llama_cpp/src/ggml.c +68 -40
- data/ext/llama_cpp/src/ggml.h +43 -33
- data/ext/llama_cpp/src/llama.cpp +420 -57
- data/ext/llama_cpp/src/llama.h +5 -1
- data/lib/llama_cpp/version.rb +2 -2
- metadata +2 -2
data/ext/llama_cpp/src/llama.h
CHANGED
@@ -374,6 +374,7 @@ extern "C" {
|
|
374
374
|
LLAMA_API int llama_tokenize(
|
375
375
|
struct llama_context * ctx,
|
376
376
|
const char * text,
|
377
|
+
int text_len,
|
377
378
|
llama_token * tokens,
|
378
379
|
int n_max_tokens,
|
379
380
|
bool add_bos);
|
@@ -381,6 +382,7 @@ extern "C" {
|
|
381
382
|
LLAMA_API int llama_tokenize_with_model(
|
382
383
|
const struct llama_model * model,
|
383
384
|
const char * text,
|
385
|
+
int text_len,
|
384
386
|
llama_token * tokens,
|
385
387
|
int n_max_tokens,
|
386
388
|
bool add_bos);
|
@@ -540,7 +542,9 @@ extern "C" {
|
|
540
542
|
|
541
543
|
struct ggml_tensor;
|
542
544
|
|
543
|
-
const std::vector<std::pair<std::string, struct ggml_tensor
|
545
|
+
const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
|
546
|
+
struct llama_context * ctx
|
547
|
+
);
|
544
548
|
|
545
549
|
#endif // LLAMA_API_INTERNAL
|
546
550
|
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LLaMACpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.5.
|
6
|
+
VERSION = '0.5.3'
|
7
7
|
|
8
8
|
# The version of llama.cpp bundled with llama_cpp.rb.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b1266'
|
10
10
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.5.
|
4
|
+
version: 0.5.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-09-
|
11
|
+
date: 2023-09-22 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
14
14
|
email:
|