cui-llama.rn 1.3.0 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/android/src/main/CMakeLists.txt +6 -1
  2. package/android/src/main/jni.cpp +6 -6
  3. package/cpp/amx/amx.cpp +196 -0
  4. package/cpp/amx/amx.h +20 -0
  5. package/cpp/amx/common.h +101 -0
  6. package/cpp/amx/mmq.cpp +2524 -0
  7. package/cpp/amx/mmq.h +16 -0
  8. package/cpp/common.cpp +1981 -1682
  9. package/cpp/common.h +636 -600
  10. package/cpp/ggml-aarch64.c +129 -129
  11. package/cpp/ggml-aarch64.h +19 -19
  12. package/cpp/ggml-alloc.c +1038 -1040
  13. package/cpp/ggml-alloc.h +76 -76
  14. package/cpp/ggml-backend-impl.h +238 -216
  15. package/cpp/ggml-backend-reg.cpp +423 -195
  16. package/cpp/ggml-backend.cpp +1999 -1997
  17. package/cpp/ggml-backend.h +351 -328
  18. package/cpp/ggml-common.h +1859 -1853
  19. package/cpp/ggml-cpp.h +38 -38
  20. package/cpp/ggml-cpu-aarch64.c +3823 -3560
  21. package/cpp/ggml-cpu-aarch64.h +32 -30
  22. package/cpp/ggml-cpu-impl.h +386 -371
  23. package/cpp/ggml-cpu-quants.c +10835 -10822
  24. package/cpp/ggml-cpu-quants.h +63 -63
  25. package/cpp/ggml-cpu.c +99 -103
  26. package/cpp/ggml-cpu.cpp +69 -17
  27. package/cpp/ggml-cpu.h +152 -177
  28. package/cpp/ggml-impl.h +556 -550
  29. package/cpp/ggml-metal.h +66 -66
  30. package/cpp/ggml-metal.m +4426 -4294
  31. package/cpp/ggml-quants.c +5247 -5247
  32. package/cpp/ggml-quants.h +100 -100
  33. package/cpp/ggml-threading.cpp +12 -12
  34. package/cpp/ggml-threading.h +12 -12
  35. package/cpp/ggml.c +7618 -8180
  36. package/cpp/ggml.h +2255 -2411
  37. package/cpp/json-schema-to-grammar.cpp +1045 -0
  38. package/cpp/json-schema-to-grammar.h +8 -0
  39. package/cpp/json.hpp +24766 -0
  40. package/cpp/llama-grammar.cpp +1138 -1138
  41. package/cpp/llama-grammar.h +144 -144
  42. package/cpp/llama-impl.h +181 -181
  43. package/cpp/llama-sampling.cpp +2348 -2348
  44. package/cpp/llama-sampling.h +48 -48
  45. package/cpp/llama-vocab.cpp +1984 -1984
  46. package/cpp/llama-vocab.h +170 -170
  47. package/cpp/llama.cpp +22332 -22132
  48. package/cpp/llama.h +1259 -1253
  49. package/cpp/log.cpp +401 -401
  50. package/cpp/log.h +121 -121
  51. package/cpp/rn-llama.hpp +6 -6
  52. package/cpp/sampling.cpp +505 -466
  53. package/cpp/sampling.h +22 -1
  54. package/cpp/sgemm.cpp +1884 -1884
  55. package/cpp/speculative.cpp +270 -0
  56. package/cpp/speculative.h +28 -0
  57. package/cpp/unicode.cpp +11 -0
  58. package/ios/RNLlamaContext.mm +13 -0
  59. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  60. package/lib/commonjs/grammar.js +4 -2
  61. package/lib/commonjs/grammar.js.map +1 -1
  62. package/lib/commonjs/index.js.map +1 -1
  63. package/lib/module/NativeRNLlama.js.map +1 -1
  64. package/lib/module/grammar.js +2 -1
  65. package/lib/module/grammar.js.map +1 -1
  66. package/lib/module/index.js.map +1 -1
  67. package/lib/typescript/NativeRNLlama.d.ts +94 -4
  68. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  69. package/lib/typescript/grammar.d.ts +5 -6
  70. package/lib/typescript/grammar.d.ts.map +1 -1
  71. package/lib/typescript/index.d.ts +4 -2
  72. package/lib/typescript/index.d.ts.map +1 -1
  73. package/package.json +2 -1
  74. package/src/NativeRNLlama.ts +97 -10
  75. package/src/grammar.ts +10 -8
  76. package/src/index.ts +22 -1
@@ -13,12 +13,17 @@ set(
13
13
  ${RNLLAMA_LIB_DIR}/llama-sampling.cpp
14
14
  ${RNLLAMA_LIB_DIR}/llama-vocab.cpp
15
15
  ${RNLLAMA_LIB_DIR}/log.cpp
16
+
17
+ ${RNLLAMA_LIB_DIR}/amx/amx.cpp
18
+ ${RNLLAMA_LIB_DIR}/amx/mmq.cpp
16
19
 
17
20
  ${RNLLAMA_LIB_DIR}/ggml-aarch64.c
18
21
  ${RNLLAMA_LIB_DIR}/llama-grammar.cpp
19
22
  ${RNLLAMA_LIB_DIR}/llama-sampling.cpp
20
23
  ${RNLLAMA_LIB_DIR}/llama-vocab.cpp
21
24
  ${RNLLAMA_LIB_DIR}/log.cpp
25
+ ${RNLLAMA_LIB_DIR}/json.hpp
26
+ ${RNLLAMA_LIB_DIR}/json-schema-to-grammar.cpp
22
27
 
23
28
  ${RNLLAMA_LIB_DIR}/ggml-aarch64.c
24
29
  ${RNLLAMA_LIB_DIR}/ggml-alloc.c
@@ -53,7 +58,7 @@ function(build_library target_name cpu_flags)
53
58
 
54
59
  target_link_libraries(${target_name} ${LOG_LIB} android)
55
60
 
56
- target_compile_options(${target_name} PRIVATE -pthread ${cpu_flags})
61
+ target_compile_options(${target_name} PRIVATE -pthread ${cpu_flags} -DLM_GGML_USE_CPU -DLM_GGML_USE_CPU_AARCH64)
57
62
 
58
63
  if (${CMAKE_BUILD_TYPE} STREQUAL "Debug")
59
64
  target_compile_options(${target_name} PRIVATE -DRNLLAMA_ANDROID_ENABLE_LOGGING)
@@ -259,7 +259,7 @@ Java_com_rnllama_LlamaContext_initContext(
259
259
 
260
260
  const char *model_path_chars = env->GetStringUTFChars(model_path_str, nullptr);
261
261
  defaultParams.model = model_path_chars;
262
-
262
+
263
263
  defaultParams.n_ctx = n_ctx;
264
264
  defaultParams.n_batch = n_batch;
265
265
 
@@ -281,7 +281,7 @@ Java_com_rnllama_LlamaContext_initContext(
281
281
  int default_n_threads = max_threads == 4 ? 2 : min(4, max_threads);
282
282
  defaultParams.cpuparams.n_threads = n_threads > 0 ? n_threads : default_n_threads;
283
283
 
284
- defaultParams.n_gpu_layers = n_gpu_layers;
284
+ // defaultParams.n_gpu_layers = n_gpu_layers;
285
285
  defaultParams.flash_attn = flash_attn;
286
286
 
287
287
  const char *cache_type_k_chars = env->GetStringUTFChars(cache_type_k, nullptr);
@@ -558,7 +558,7 @@ Java_com_rnllama_LlamaContext_doCompletion(
558
558
  //llama_reset_timings(llama->ctx);
559
559
 
560
560
  llama->params.prompt = env->GetStringUTFChars(prompt, nullptr);
561
- llama->params.sparams.seed = (seed == -1) ? time(NULL) : seed;
561
+ llama->params.sampling.seed = (seed == -1) ? time(NULL) : seed;
562
562
 
563
563
  int max_threads = std::thread::hardware_concurrency();
564
564
  // Use 2 threads by default on 4-core devices, 4 threads on more cores
@@ -566,9 +566,9 @@ Java_com_rnllama_LlamaContext_doCompletion(
566
566
  llama->params.cpuparams.n_threads = n_threads > 0 ? n_threads : default_n_threads;
567
567
 
568
568
  llama->params.n_predict = n_predict;
569
- llama->params.sparams.ignore_eos = ignore_eos;
569
+ llama->params.sampling.ignore_eos = ignore_eos;
570
570
 
571
- auto & sparams = llama->params.sparams;
571
+ auto & sparams = llama->params.sampling;
572
572
  sparams.temp = temperature;
573
573
  sparams.penalty_last_n = penalty_last_n;
574
574
  sparams.penalty_repeat = penalty_repeat;
@@ -693,7 +693,7 @@ Java_com_rnllama_LlamaContext_doCompletion(
693
693
  auto tokenResult = createWriteableMap(env);
694
694
  putString(env, tokenResult, "token", to_send.c_str());
695
695
 
696
- if (llama->params.sparams.n_probs > 0) {
696
+ if (llama->params.sampling.n_probs > 0) {
697
697
  const std::vector<llama_token> to_send_toks = common_tokenize(llama->ctx, to_send, false);
698
698
  size_t probs_pos = std::min(sent_token_probs_index, llama->generated_token_probs.size());
699
699
  size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama->generated_token_probs.size());
@@ -0,0 +1,196 @@
1
+ #include "amx.h"
2
+ #include "common.h"
3
+ #include "mmq.h"
4
+ #include "ggml-backend-impl.h"
5
+ #include "ggml-backend.h"
6
+ #include "ggml-impl.h"
7
+ #include "ggml-cpu.h"
8
+
9
+ #if defined(__gnu_linux__)
10
+ #include <sys/syscall.h>
11
+ #include <unistd.h>
12
+ #endif
13
+
14
+ #include <cstdlib>
15
+ #include <cstring>
16
+ #include <memory>
17
+
18
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
19
+
20
+ // AMX buffer interface
21
+ static void lm_ggml_backend_amx_buffer_free_buffer(lm_ggml_backend_buffer_t buffer) {
22
+ free(buffer->context);
23
+ }
24
+
25
+ static void * lm_ggml_backend_amx_buffer_get_base(lm_ggml_backend_buffer_t buffer) {
26
+ return (void *)(buffer->context);
27
+ }
28
+
29
+ static void lm_ggml_backend_amx_buffer_memset_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
30
+ memset((char *)tensor->data + offset, value, size);
31
+
32
+ LM_GGML_UNUSED(buffer);
33
+ }
34
+
35
+ static void lm_ggml_backend_amx_buffer_set_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
36
+ if (qtype_has_amx_kernels(tensor->type)) {
37
+ lm_ggml_backend_amx_convert_weight(tensor, data, offset, size);
38
+ } else {
39
+ memcpy((char *)tensor->data + offset, data, size);
40
+ }
41
+
42
+ LM_GGML_UNUSED(buffer);
43
+ }
44
+
45
+ static void lm_ggml_backend_amx_buffer_get_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
46
+ LM_GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
47
+ memcpy(data, (const char *)tensor->data + offset, size);
48
+
49
+ LM_GGML_UNUSED(buffer);
50
+ }
51
+
52
+ static bool lm_ggml_backend_amx_buffer_cpy_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst) {
53
+ if (lm_ggml_backend_buffer_is_host(src->buffer)) {
54
+ if (qtype_has_amx_kernels(src->type)) {
55
+ lm_ggml_backend_amx_convert_weight(dst, src->data, 0, lm_ggml_nbytes(dst));
56
+ } else {
57
+ memcpy(dst->data, src->data, lm_ggml_nbytes(src));
58
+ }
59
+ return true;
60
+ }
61
+ return false;
62
+
63
+ LM_GGML_UNUSED(buffer);
64
+ }
65
+
66
+ static void lm_ggml_backend_amx_buffer_clear(lm_ggml_backend_buffer_t buffer, uint8_t value) {
67
+ memset(buffer->context, value, buffer->size);
68
+ }
69
+
70
+ static lm_ggml_backend_buffer_i lm_ggml_backend_amx_buffer_interface = {
71
+ /* .free_buffer = */ lm_ggml_backend_amx_buffer_free_buffer,
72
+ /* .get_base = */ lm_ggml_backend_amx_buffer_get_base,
73
+ /* .init_tensor = */ NULL, // no initialization required
74
+ /* .memset_tensor = */ lm_ggml_backend_amx_buffer_memset_tensor,
75
+ /* .set_tensor = */ lm_ggml_backend_amx_buffer_set_tensor,
76
+ /* .get_tensor = */ lm_ggml_backend_amx_buffer_get_tensor,
77
+ /* .cpy_tensor = */ lm_ggml_backend_amx_buffer_cpy_tensor,
78
+ /* .clear = */ lm_ggml_backend_amx_buffer_clear,
79
+ /* .reset = */ NULL,
80
+ };
81
+
82
+ static const char * lm_ggml_backend_amx_buffer_type_get_name(lm_ggml_backend_buffer_type_t buft) {
83
+ return "AMX";
84
+
85
+ LM_GGML_UNUSED(buft);
86
+ }
87
+
88
+ static lm_ggml_backend_buffer_t lm_ggml_backend_amx_buffer_type_alloc_buffer(lm_ggml_backend_buffer_type_t buft, size_t size) {
89
+ void * data = aligned_alloc(TENSOR_ALIGNMENT, size);
90
+ if (data == NULL) {
91
+ fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
92
+ return NULL;
93
+ }
94
+
95
+ return lm_ggml_backend_buffer_init(buft, lm_ggml_backend_amx_buffer_interface, data, size);
96
+ }
97
+
98
+ static size_t lm_ggml_backend_amx_buffer_type_get_alignment(lm_ggml_backend_buffer_type_t buft) {
99
+ return TENSOR_ALIGNMENT;
100
+
101
+ LM_GGML_UNUSED(buft);
102
+ }
103
+
104
+ static size_t lm_ggml_backend_amx_buffer_type_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const lm_ggml_tensor* tensor) {
105
+ return lm_ggml_backend_amx_get_alloc_size(tensor);
106
+
107
+ LM_GGML_UNUSED(buft);
108
+ }
109
+
110
+ static bool lm_ggml_backend_amx_buffer_type_is_host(lm_ggml_backend_buffer_type_t buft) {
111
+ return false;
112
+
113
+ LM_GGML_UNUSED(buft);
114
+ }
115
+
116
+ #define ARCH_GET_XCOMP_PERM 0x1022
117
+ #define ARCH_REQ_XCOMP_PERM 0x1023
118
+ #define XFEATURE_XTILECFG 17
119
+ #define XFEATURE_XTILEDATA 18
120
+
121
+ static bool lm_ggml_amx_init() {
122
+ #if defined(__gnu_linux__)
123
+ if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
124
+ fprintf(stderr, "AMX is not ready to be used!\n");
125
+ return false;
126
+ }
127
+ return true;
128
+ #elif defined(_WIN32)
129
+ return true;
130
+ #endif
131
+ }
132
+ lm_ggml_backend_buffer_type_t lm_ggml_backend_amx_buffer_type() {
133
+ static struct lm_ggml_backend_buffer_type lm_ggml_backend_buffer_type_amx = {
134
+ /* .iface = */ {
135
+ /* .get_name = */ lm_ggml_backend_amx_buffer_type_get_name,
136
+ /* .alloc_buffer = */ lm_ggml_backend_amx_buffer_type_alloc_buffer,
137
+ /* .get_alignment = */ lm_ggml_backend_amx_buffer_type_get_alignment,
138
+ /* .get_max_size = */ NULL, // defaults to SIZE_MAX
139
+ /* .get_alloc_size = */ lm_ggml_backend_amx_buffer_type_get_alloc_size,
140
+ /* .is_host = */ lm_ggml_backend_amx_buffer_type_is_host,
141
+ },
142
+ /* .device = */ lm_ggml_backend_reg_dev_get(lm_ggml_backend_cpu_reg(), 0),
143
+ /* .context = */ NULL,
144
+ };
145
+
146
+ if (!lm_ggml_amx_init()) {
147
+ return NULL;
148
+ }
149
+
150
+ return &lm_ggml_backend_buffer_type_amx;
151
+ }
152
+
153
+ bool lm_ggml_backend_amx_buft_is_amx(lm_ggml_backend_buffer_type_t buft) {
154
+ return buft->iface.get_name == lm_ggml_backend_amx_buffer_type_get_name;
155
+ }
156
+
157
+ bool lm_ggml_backend_amx_device_supports_op(const struct lm_ggml_tensor * op) {
158
+ // handle only 2d gemm for now
159
+ auto is_contiguous_2d = [](const struct lm_ggml_tensor * t) {
160
+ return lm_ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
161
+ };
162
+
163
+ switch (op->op) {
164
+ case LM_GGML_OP_NONE:
165
+ case LM_GGML_OP_RESHAPE:
166
+ case LM_GGML_OP_VIEW:
167
+ case LM_GGML_OP_PERMUTE:
168
+ case LM_GGML_OP_TRANSPOSE:
169
+ return true;
170
+
171
+ case LM_GGML_OP_MUL_MAT: {
172
+ const struct lm_ggml_tensor * src0 = op->src[0];
173
+ const struct lm_ggml_tensor * src1 = op->src[1];
174
+
175
+ const enum lm_ggml_type type = src0->type;
176
+ const int64_t ne0 = op->ne[0];
177
+
178
+ // amx kernels enables for Q4_0, Q4_1, Q8_0, F16
179
+ // Q4_K, Q5_K, Q6_K, IQ4_XS enabled for QK_K = 256
180
+ bool has_amx_kernels = qtype_has_amx_kernels(type) || (type == LM_GGML_TYPE_F16);
181
+
182
+ bool can_use_amx =
183
+ is_contiguous_2d(src0) && // src0 must be contiguous
184
+ is_contiguous_2d(src1) && // src1 must be contiguous
185
+ src1->type == LM_GGML_TYPE_F32 && // src1 must be float32
186
+ has_amx_kernels && // with amx kernel impls
187
+ ne0 % (TILE_N * 2) == 0; // out_features is 32x
188
+
189
+ return can_use_amx;
190
+ }
191
+ default:
192
+ return false;
193
+ }
194
+ }
195
+
196
+ #endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
package/cpp/amx/amx.h ADDED
@@ -0,0 +1,20 @@
1
+ #include "ggml-backend.h"
2
+ #include "ggml-cpu-impl.h"
3
+
4
+ #ifdef __cplusplus
5
+ extern "C" {
6
+ #endif
7
+
8
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
9
+
10
+ lm_ggml_backend_buffer_type_t lm_ggml_backend_amx_buffer_type(void);
11
+ bool lm_ggml_backend_amx_buft_is_amx(lm_ggml_backend_buffer_type_t buft);
12
+ bool lm_ggml_backend_amx_device_supports_op(const struct lm_ggml_tensor * op);
13
+ void lm_ggml_backend_amx_mul_mat(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
14
+ size_t lm_ggml_backend_amx_desired_wsize(const struct lm_ggml_tensor * dst);
15
+
16
+ #endif
17
+
18
+ #ifdef __cplusplus
19
+ }
20
+ #endif
@@ -0,0 +1,101 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-cpu-impl.h"
5
+
6
+ #include <algorithm>
7
+ #include <memory>
8
+ #include <type_traits>
9
+
10
+ #if defined(_OPENMP)
11
+ #include <omp.h>
12
+ #endif
13
+
14
+ #define TILE_M 16
15
+ #define TILE_N 16
16
+ #define TILE_K 32
17
+ #define VNNI_BLK 4
18
+
19
+ #define AMX_BLK_SIZE 32
20
+
21
+ #define TMM0 0
22
+ #define TMM1 1
23
+ #define TMM2 2
24
+ #define TMM3 3
25
+ #define TMM4 4
26
+ #define TMM5 5
27
+ #define TMM6 6
28
+ #define TMM7 7
29
+
30
+ // parallel routines
31
+ template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
32
+ inline T div_up(T x, T y) { return (x + y - 1) / y; }
33
+
34
+ template <typename T>
35
+ inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
36
+ #if 0
37
+ // onednn partition pattern
38
+ T& n_my = n_end;
39
+ if (nth <= 1 || n == 0) {
40
+ n_start = 0;
41
+ n_my = n;
42
+ } else {
43
+ T n1 = div_up(n, nth);
44
+ T n2 = n1 - 1;
45
+ T T1 = n - n2 * nth;
46
+ n_my = ith < T1 ? n1 : n2;
47
+ n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
48
+ }
49
+ n_end += n_start;
50
+ #else
51
+ // pytorch aten partition pattern
52
+ T n_my = div_up(n, nth);
53
+ n_start = ith * n_my;
54
+ n_end = std::min(n_start + n_my, n);
55
+ #endif
56
+ }
57
+
58
+ template <typename func_t>
59
+ inline void parallel_for(int nth, int n, const func_t& f) {
60
+ #if defined(_OPENMP)
61
+ #pragma omp parallel num_threads(nth)
62
+ {
63
+ //int nth = omp_get_num_threads();
64
+ int ith = omp_get_thread_num();
65
+ int tbegin, tend;
66
+ balance211(n, nth, ith, tbegin, tend);
67
+ f(tbegin, tend);
68
+ }
69
+ #else
70
+ f(0, n);
71
+
72
+ LM_GGML_UNUSED(nth);
73
+ #endif
74
+ }
75
+
76
+ template <typename func_t>
77
+ inline void parallel_for_ggml(const lm_ggml_compute_params * params, int n, const func_t & f) {
78
+ int tbegin, tend;
79
+ balance211(n, params->nth, params->ith, tbegin, tend);
80
+ f(tbegin, tend);
81
+ lm_ggml_barrier(params->threadpool); // TODO: might not always be needed
82
+ }
83
+
84
+ // quantized types that have AMX support
85
+ inline bool qtype_has_amx_kernels(const enum lm_ggml_type type) {
86
+ // TODO: fix padding for vnni format
87
+ return (type == LM_GGML_TYPE_Q4_0) ||
88
+ (type == LM_GGML_TYPE_Q4_1) ||
89
+ (type == LM_GGML_TYPE_Q8_0) ||
90
+ (type == LM_GGML_TYPE_Q4_K) ||
91
+ (type == LM_GGML_TYPE_Q5_K) ||
92
+ (type == LM_GGML_TYPE_Q6_K) ||
93
+ (type == LM_GGML_TYPE_IQ4_XS);
94
+ }
95
+
96
+ // ggml backend context
97
+ struct lm_ggml_backend_amx_context {
98
+ int n_threads = LM_GGML_DEFAULT_N_THREADS;
99
+ std::unique_ptr<char[]> work_data;
100
+ size_t work_size = 0;
101
+ };