llama_cpp 0.12.5 → 0.12.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/ext/llama_cpp/llama_cpp.cpp +46 -0
- data/lib/llama_cpp/version.rb +2 -2
- data/sig/llama_cpp.rbs +7 -0
- data/vendor/tmp/llama.cpp/Makefile +9 -1
- data/vendor/tmp/llama.cpp/ggml-alloc.c +563 -490
- data/vendor/tmp/llama.cpp/ggml-alloc.h +39 -65
- data/vendor/tmp/llama.cpp/ggml-backend.c +250 -262
- data/vendor/tmp/llama.cpp/ggml-backend.h +8 -12
- data/vendor/tmp/llama.cpp/ggml-metal.m +2 -0
- data/vendor/tmp/llama.cpp/ggml-quants.c +347 -40
- data/vendor/tmp/llama.cpp/ggml-quants.h +14 -14
- data/vendor/tmp/llama.cpp/ggml-sycl.cpp +14 -61
- data/vendor/tmp/llama.cpp/ggml-vulkan.cpp +89 -6
- data/vendor/tmp/llama.cpp/ggml.c +134 -60
- data/vendor/tmp/llama.cpp/ggml.h +26 -6
- data/vendor/tmp/llama.cpp/llama.cpp +654 -130
- data/vendor/tmp/llama.cpp/llama.h +6 -0
- data/vendor/tmp/llama.cpp/unicode.h +42 -30
- metadata +2 -2
@@ -6,88 +6,62 @@
|
|
6
6
|
extern "C" {
|
7
7
|
#endif
|
8
8
|
|
9
|
-
struct
|
10
|
-
struct ggml_backend_buffer;
|
11
|
-
struct
|
9
|
+
typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
|
10
|
+
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
11
|
+
typedef struct ggml_backend * ggml_backend_t;
|
12
12
|
|
13
|
-
//
|
14
|
-
|
15
|
-
//
|
16
|
-
|
17
|
-
typedef struct ggml_allocr * ggml_allocr_t;
|
18
|
-
|
19
|
-
// initialize allocator for use with CPU backend only
|
20
|
-
GGML_API ggml_allocr_t ggml_allocr_new(void * data, size_t size, size_t alignment);
|
21
|
-
GGML_API ggml_allocr_t ggml_allocr_new_measure(size_t alignment);
|
22
|
-
|
23
|
-
// initialize allocator for use with ggml-backend
|
24
|
-
GGML_API ggml_allocr_t ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer);
|
25
|
-
GGML_API ggml_allocr_t ggml_allocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer
|
26
|
-
GGML_API ggml_allocr_t ggml_allocr_new_measure_from_backend(struct ggml_backend * backend);
|
27
|
-
|
28
|
-
GGML_API struct ggml_backend_buffer * ggml_allocr_get_buffer(ggml_allocr_t alloc);
|
29
|
-
|
30
|
-
// tell the allocator to parse nodes following the order described in the list
|
31
|
-
// you should call this if your graph are optimized to execute out-of-order
|
32
|
-
GGML_API void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n);
|
33
|
-
|
34
|
-
GGML_API void ggml_allocr_free (ggml_allocr_t alloc);
|
35
|
-
GGML_API bool ggml_allocr_is_measure (ggml_allocr_t alloc);
|
36
|
-
GGML_API void ggml_allocr_reset (ggml_allocr_t alloc);
|
37
|
-
GGML_API void ggml_allocr_alloc (ggml_allocr_t alloc, struct ggml_tensor * tensor);
|
38
|
-
GGML_API size_t ggml_allocr_max_size (ggml_allocr_t alloc);
|
39
|
-
|
40
|
-
GGML_API size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph * graph);
|
13
|
+
// Tensor allocator
|
14
|
+
typedef struct ggml_tallocr * ggml_tallocr_t;
|
41
15
|
|
42
|
-
|
43
|
-
|
44
|
-
|
16
|
+
GGML_API ggml_tallocr_t ggml_tallocr_new(ggml_backend_buffer_t buffer);
|
17
|
+
GGML_API void ggml_tallocr_free(ggml_tallocr_t talloc);
|
18
|
+
GGML_API void ggml_tallocr_alloc(ggml_tallocr_t talloc, struct ggml_tensor * tensor);
|
45
19
|
|
46
|
-
//
|
47
|
-
|
48
|
-
|
20
|
+
// Graph allocator
|
21
|
+
/*
|
22
|
+
Example usage:
|
23
|
+
ggml_gallocr_t galloc = ggml_gallocr_new(ggml_bacckend_cpu_buffer_type());
|
49
24
|
|
50
|
-
//
|
51
|
-
|
25
|
+
// optional: create a worst-case graph and reserve the buffers to avoid reallocations
|
26
|
+
ggml_gallocr_reserve(galloc, build_graph(max_batch));
|
52
27
|
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
GGML_API ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer
|
57
|
-
GGML_API ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer);
|
58
|
-
GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_buft(struct ggml_backend_buffer_type * buft);
|
59
|
-
GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend);
|
28
|
+
// allocate the graph
|
29
|
+
struct ggml_cgraph * graph = build_graph(batch);
|
30
|
+
ggml_gallocr_alloc_graph(galloc, graph);
|
60
31
|
|
61
|
-
|
32
|
+
printf("compute buffer size: %zu bytes\n", ggml_gallocr_get_buffer_size(galloc, 0));
|
62
33
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
GGML_API void ggml_tallocr_alloc (ggml_tallocr_t talloc, struct ggml_tensor * tensor);
|
67
|
-
GGML_API size_t ggml_tallocr_max_size (ggml_tallocr_t talloc);
|
34
|
+
// evaluate the graph
|
35
|
+
ggml_backend_graph_compute(backend, graph);
|
36
|
+
*/
|
68
37
|
|
38
|
+
// special tensor flags for use with the graph allocator:
|
39
|
+
// ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses
|
40
|
+
// ggml_set_output(): output tensors are never freed and never overwritten
|
69
41
|
|
70
|
-
// Graph allocator
|
71
42
|
typedef struct ggml_gallocr * ggml_gallocr_t;
|
72
43
|
|
73
|
-
GGML_API ggml_gallocr_t ggml_gallocr_new(
|
74
|
-
GGML_API
|
44
|
+
GGML_API ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft);
|
45
|
+
GGML_API ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs);
|
46
|
+
GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc);
|
75
47
|
|
76
|
-
|
77
|
-
|
48
|
+
// pre-allocate buffers from a measure graph - does not allocate or modify the graph
|
49
|
+
// call with a worst-case graph to avoid buffer reallocations
|
50
|
+
// not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed
|
51
|
+
// returns false if the buffer allocation failed
|
52
|
+
GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph);
|
53
|
+
GGML_API bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids);
|
78
54
|
|
79
|
-
//
|
80
|
-
|
81
|
-
|
82
|
-
struct ggml_cgraph * graph,
|
83
|
-
struct ggml_hash_set hash_set,
|
84
|
-
ggml_tallocr_t * hash_node_talloc);
|
55
|
+
// automatic reallocation if the topology changes when using a single buffer
|
56
|
+
// returns false if using multiple buffers and a re-allocation is needed (call ggml_gallocr_reserve_n first to set the node buffers)
|
57
|
+
GGML_API bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph);
|
85
58
|
|
59
|
+
GGML_API size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id);
|
86
60
|
|
87
61
|
// Utils
|
88
62
|
// Create a buffer and allocate all the tensors in a ggml_context
|
89
|
-
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx,
|
90
|
-
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx,
|
63
|
+
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft);
|
64
|
+
GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend);
|
91
65
|
|
92
66
|
#ifdef __cplusplus
|
93
67
|
}
|