cui-llama.rn 1.2.1 → 1.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +2 -2
- package/android/src/main/java/com/rnllama/LlamaContext.java +32 -7
- package/cpp/common.cpp +36 -1
- package/cpp/common.h +5 -1
- package/cpp/ggml-aarch64.c +2 -11
- package/cpp/ggml-alloc.h +1 -1
- package/cpp/ggml-backend-impl.h +151 -78
- package/cpp/{ggml-backend.c → ggml-backend.cpp} +565 -269
- package/cpp/ggml-backend.h +147 -62
- package/cpp/ggml-impl.h +15 -0
- package/cpp/ggml-metal.h +8 -9
- package/cpp/ggml-metal.m +2428 -2111
- package/cpp/ggml-quants.c +2 -2
- package/cpp/ggml-quants.h +0 -4
- package/cpp/ggml.c +799 -1121
- package/cpp/ggml.h +79 -72
- package/cpp/llama-vocab.cpp +189 -106
- package/cpp/llama-vocab.h +18 -9
- package/cpp/llama.cpp +736 -341
- package/cpp/llama.h +9 -4
- package/cpp/unicode-data.cpp +6 -4
- package/cpp/unicode-data.h +4 -4
- package/cpp/unicode.cpp +14 -7
- package/package.json +1 -1
@@ -16,7 +16,7 @@ set(
|
|
16
16
|
|
17
17
|
${RNLLAMA_LIB_DIR}/ggml-aarch64.c
|
18
18
|
${RNLLAMA_LIB_DIR}/ggml-alloc.c
|
19
|
-
${RNLLAMA_LIB_DIR}/ggml-backend.
|
19
|
+
${RNLLAMA_LIB_DIR}/ggml-backend.cpp
|
20
20
|
${RNLLAMA_LIB_DIR}/ggml.c
|
21
21
|
${RNLLAMA_LIB_DIR}/ggml-quants.c
|
22
22
|
${RNLLAMA_LIB_DIR}/common.cpp
|
@@ -55,7 +55,7 @@ function(build_library target_name cpu_flags)
|
|
55
55
|
# NOTE: If you want to debug the native code, you can uncomment if and endif
|
56
56
|
# Note that it will be extremely slow
|
57
57
|
# if (NOT ${CMAKE_BUILD_TYPE} STREQUAL "Debug")
|
58
|
-
target_compile_options(${target_name} PRIVATE -O3 -DNDEBUG)
|
58
|
+
target_compile_options(${target_name} PRIVATE -O3 -DNDEBUG -DRNLLAMA_USE_FD_FILE)
|
59
59
|
target_compile_options(${target_name} PRIVATE -fvisibility=hidden -fvisibility-inlines-hidden)
|
60
60
|
target_compile_options(${target_name} PRIVATE -ffunction-sections -fdata-sections)
|
61
61
|
|
@@ -10,6 +10,9 @@ import com.facebook.react.modules.core.DeviceEventManagerModule;
|
|
10
10
|
|
11
11
|
import android.util.Log;
|
12
12
|
import android.os.Build;
|
13
|
+
import android.os.ParcelFileDescriptor;
|
14
|
+
import android.net.Uri;
|
15
|
+
import android.content.Intent;
|
13
16
|
import android.content.res.AssetManager;
|
14
17
|
|
15
18
|
import java.lang.StringBuilder;
|
@@ -17,6 +20,7 @@ import java.io.BufferedReader;
|
|
17
20
|
import java.io.FileReader;
|
18
21
|
import java.io.File;
|
19
22
|
import java.io.IOException;
|
23
|
+
import java.io.InputStream;
|
20
24
|
import java.io.FileInputStream;
|
21
25
|
|
22
26
|
public class LlamaContext {
|
@@ -31,11 +35,18 @@ public class LlamaContext {
|
|
31
35
|
|
32
36
|
private byte[] ggufHeader = {0x47, 0x47, 0x55, 0x46};
|
33
37
|
|
34
|
-
private boolean isGGUF(final String filepath) {
|
38
|
+
private boolean isGGUF(final String filepath, final ReactApplicationContext reactContext) {
|
35
39
|
byte[] fileHeader = new byte[4];
|
36
|
-
|
40
|
+
InputStream fis = null;
|
37
41
|
try {
|
38
|
-
|
42
|
+
if (filepath.startsWith("content")) {
|
43
|
+
Uri uri = Uri.parse(filepath);
|
44
|
+
reactContext.getApplicationContext().getContentResolver().takePersistableUriPermission(uri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
|
45
|
+
fis = reactContext.getApplicationContext().getContentResolver().openInputStream(uri);
|
46
|
+
} else {
|
47
|
+
fis = new FileInputStream(filepath);
|
48
|
+
}
|
49
|
+
|
39
50
|
int bytesRead = fis.read(fileHeader);
|
40
51
|
if(bytesRead < 4) {
|
41
52
|
return false;
|
@@ -46,13 +57,14 @@ public class LlamaContext {
|
|
46
57
|
}
|
47
58
|
return true;
|
48
59
|
} catch (Exception e) {
|
60
|
+
Log.e(NAME, "Failed to check GGUF: " + e.getMessage());
|
49
61
|
return false;
|
50
62
|
}finally {
|
51
63
|
if (fis != null) {
|
52
64
|
try {
|
53
65
|
fis.close();
|
54
66
|
} catch (Exception e) {
|
55
|
-
Log.d(NAME, "Closing
|
67
|
+
Log.d(NAME, "Closing InputStream failed.");
|
56
68
|
}
|
57
69
|
}
|
58
70
|
}
|
@@ -65,16 +77,29 @@ public class LlamaContext {
|
|
65
77
|
if (!params.hasKey("model")) {
|
66
78
|
throw new IllegalArgumentException("Missing required parameter: model");
|
67
79
|
}
|
68
|
-
|
69
|
-
|
80
|
+
|
81
|
+
String modelName = params.getString("model");
|
82
|
+
|
83
|
+
if(!isGGUF(modelName, reactContext)) {
|
70
84
|
throw new IllegalArgumentException("File is not in GGUF format");
|
71
85
|
}
|
72
86
|
|
87
|
+
if ( modelName.startsWith("content://")) {
|
88
|
+
Uri uri = Uri.parse(modelName);
|
89
|
+
try {
|
90
|
+
ParcelFileDescriptor pfd = reactContext.getApplicationContext().getContentResolver().openFileDescriptor(uri, "r");
|
91
|
+
modelName = "" + pfd.getFd();
|
92
|
+
} catch (Exception e) {
|
93
|
+
Log.e(NAME, "Failed to convert to FD!");
|
94
|
+
}
|
95
|
+
}
|
96
|
+
|
97
|
+
// Check if file has GGUF magic numbers
|
73
98
|
this.id = id;
|
74
99
|
eventEmitter = reactContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter.class);
|
75
100
|
this.context = initContext(
|
76
101
|
// String model,
|
77
|
-
|
102
|
+
modelName,
|
78
103
|
// boolean embedding,
|
79
104
|
params.hasKey("embedding") ? params.getBoolean("embedding") : false,
|
80
105
|
// int n_ctx,
|
package/cpp/common.cpp
CHANGED
@@ -844,6 +844,31 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
|
844
844
|
return iparams;
|
845
845
|
}
|
846
846
|
|
847
|
+
if (params.reranking) {
|
848
|
+
bool ok = true;
|
849
|
+
|
850
|
+
if (llama_token_bos(model) == LLAMA_TOKEN_NULL) {
|
851
|
+
LOG_WRN("%s: warning: model does not have a BOS token, reranking will not work\n", __func__);
|
852
|
+
ok = false;
|
853
|
+
}
|
854
|
+
|
855
|
+
if (llama_token_eos(model) == LLAMA_TOKEN_NULL) {
|
856
|
+
LOG_WRN("%s: warning: model does not have an EOS token, reranking will not work\n", __func__);
|
857
|
+
ok = false;
|
858
|
+
}
|
859
|
+
|
860
|
+
if (llama_token_sep(model) == LLAMA_TOKEN_NULL) {
|
861
|
+
LOG_WRN("%s: warning: model does not have a SEP token, reranking will not work\n", __func__);
|
862
|
+
ok = false;
|
863
|
+
}
|
864
|
+
|
865
|
+
if (!ok) {
|
866
|
+
llama_free_model(model);
|
867
|
+
|
868
|
+
return iparams;
|
869
|
+
}
|
870
|
+
}
|
871
|
+
|
847
872
|
auto cparams = llama_context_params_from_gpt_params(params);
|
848
873
|
|
849
874
|
llama_context * lctx = llama_new_context_with_model(model, cparams);
|
@@ -861,6 +886,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
|
861
886
|
if (cvec.n_embd == -1) {
|
862
887
|
llama_free(lctx);
|
863
888
|
llama_free_model(model);
|
889
|
+
|
864
890
|
return iparams;
|
865
891
|
}
|
866
892
|
|
@@ -873,6 +899,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
|
873
899
|
if (err) {
|
874
900
|
llama_free(lctx);
|
875
901
|
llama_free_model(model);
|
902
|
+
|
876
903
|
return iparams;
|
877
904
|
}
|
878
905
|
}
|
@@ -895,7 +922,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
|
895
922
|
llama_lora_adapters_apply(lctx, iparams.lora_adapters);
|
896
923
|
}
|
897
924
|
|
898
|
-
if (params.sparams.ignore_eos && llama_token_eos(model) ==
|
925
|
+
if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) {
|
899
926
|
LOG_WRN("%s: warning: model does not have an EOS token, ignoring --ignore-eos\n", __func__);
|
900
927
|
params.sparams.ignore_eos = false;
|
901
928
|
}
|
@@ -936,6 +963,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
|
936
963
|
|
937
964
|
iparams.model = model;
|
938
965
|
iparams.context = lctx;
|
966
|
+
|
939
967
|
return iparams;
|
940
968
|
}
|
941
969
|
|
@@ -1033,6 +1061,11 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|
1033
1061
|
cparams.flash_attn = params.flash_attn;
|
1034
1062
|
cparams.no_perf = params.no_perf;
|
1035
1063
|
|
1064
|
+
if (params.reranking) {
|
1065
|
+
cparams.embeddings = true;
|
1066
|
+
cparams.pooling_type = LLAMA_POOLING_TYPE_RANK;
|
1067
|
+
}
|
1068
|
+
|
1036
1069
|
cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
|
1037
1070
|
cparams.type_v = kv_cache_type_from_str(params.cache_type_v);
|
1038
1071
|
|
@@ -1442,6 +1475,8 @@ void llama_batch_add(
|
|
1442
1475
|
llama_pos pos,
|
1443
1476
|
const std::vector<llama_seq_id> & seq_ids,
|
1444
1477
|
bool logits) {
|
1478
|
+
LM_GGML_ASSERT(batch.seq_id[batch.n_tokens] && "llama_batch size exceeded");
|
1479
|
+
|
1445
1480
|
batch.token [batch.n_tokens] = id;
|
1446
1481
|
batch.pos [batch.n_tokens] = pos;
|
1447
1482
|
batch.n_seq_id[batch.n_tokens] = seq_ids.size();
|
package/cpp/common.h
CHANGED
@@ -290,6 +290,7 @@ struct gpt_params {
|
|
290
290
|
int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
291
291
|
std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix
|
292
292
|
std::string embd_sep = "\n"; // separator of embendings
|
293
|
+
bool reranking = false; // enable reranking support on server
|
293
294
|
|
294
295
|
// server params
|
295
296
|
int32_t port = 8080; // server listens on this network port
|
@@ -308,7 +309,10 @@ struct gpt_params {
|
|
308
309
|
std::string ssl_file_key = ""; // NOLINT
|
309
310
|
std::string ssl_file_cert = ""; // NOLINT
|
310
311
|
|
311
|
-
|
312
|
+
// "advanced" endpoints are disabled by default for better security
|
313
|
+
bool webui = true;
|
314
|
+
bool endpoint_slots = false;
|
315
|
+
bool endpoint_props = false; // only control POST requests, not GET
|
312
316
|
bool endpoint_metrics = false;
|
313
317
|
|
314
318
|
bool log_json = false;
|
package/cpp/ggml-aarch64.c
CHANGED
@@ -598,15 +598,6 @@ size_t quantize_q4_0_8x8(const float * restrict src, void * restrict dst, int64_
|
|
598
598
|
return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 8, 8);
|
599
599
|
}
|
600
600
|
|
601
|
-
// Return the number of byte lanes in the SVE vector if SVE is supported; otherwise, returns 0 if SVE is not supported.
|
602
|
-
static int sve_lane_count(void) {
|
603
|
-
#if defined(__ARM_FEATURE_SVE)
|
604
|
-
return lm_ggml_sve_cnt_b;
|
605
|
-
#else
|
606
|
-
return 0;
|
607
|
-
#endif
|
608
|
-
}
|
609
|
-
|
610
601
|
void lm_ggml_gemv_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
611
602
|
const int qk = QK8_0;
|
612
603
|
const int nb = n / qk;
|
@@ -843,7 +834,7 @@ void lm_ggml_gemv_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void
|
|
843
834
|
|
844
835
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
|
845
836
|
#if defined(__ARM_FEATURE_SVE)
|
846
|
-
if (lm_ggml_cpu_has_sve() &&
|
837
|
+
if (lm_ggml_cpu_has_sve() && lm_ggml_cpu_get_sve_cnt() == QK8_0) {
|
847
838
|
const void * b_ptr = vx;
|
848
839
|
const void * a_ptr = vy;
|
849
840
|
float * res_ptr = s;
|
@@ -2020,7 +2011,7 @@ void lm_ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void
|
|
2020
2011
|
|
2021
2012
|
#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
|
2022
2013
|
#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
|
2023
|
-
if (lm_ggml_cpu_has_sve() && lm_ggml_cpu_has_matmul_int8() &&
|
2014
|
+
if (lm_ggml_cpu_has_sve() && lm_ggml_cpu_has_matmul_int8() && lm_ggml_cpu_get_sve_cnt() == QK8_0) {
|
2024
2015
|
const void * b_ptr = vx;
|
2025
2016
|
const void * a_ptr = vy;
|
2026
2017
|
float * res_ptr = s;
|
package/cpp/ggml-alloc.h
CHANGED
@@ -24,7 +24,7 @@ LM_GGML_API void lm_ggml_tallocr_alloc(struct lm_ggml_tallocr * t
|
|
24
24
|
// Graph allocator
|
25
25
|
/*
|
26
26
|
Example usage:
|
27
|
-
lm_ggml_gallocr_t galloc = lm_ggml_gallocr_new(
|
27
|
+
lm_ggml_gallocr_t galloc = lm_ggml_gallocr_new(lm_ggml_backend_cpu_buffer_type());
|
28
28
|
|
29
29
|
// optional: create a worst-case graph and reserve the buffers to avoid reallocations
|
30
30
|
lm_ggml_gallocr_reserve(galloc, build_graph(max_batch));
|
package/cpp/ggml-backend-impl.h
CHANGED
@@ -9,145 +9,218 @@ extern "C" {
|
|
9
9
|
#endif
|
10
10
|
|
11
11
|
//
|
12
|
-
// Backend buffer
|
12
|
+
// Backend buffer type
|
13
13
|
//
|
14
14
|
|
15
|
-
// buffer type
|
16
|
-
typedef void * lm_ggml_backend_buffer_type_context_t;
|
17
|
-
|
18
15
|
struct lm_ggml_backend_buffer_type_i {
|
19
|
-
const char * (*
|
16
|
+
const char * (*get_name) (lm_ggml_backend_buffer_type_t buft);
|
20
17
|
// allocate a buffer of this type
|
21
|
-
lm_ggml_backend_buffer_t (*
|
18
|
+
lm_ggml_backend_buffer_t (*alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
|
22
19
|
// tensor alignment
|
23
|
-
size_t (*
|
24
|
-
// max buffer size that can be allocated
|
25
|
-
size_t (*
|
26
|
-
// data size needed to allocate the tensor, including padding
|
27
|
-
size_t (*
|
28
|
-
// check if tensor data is in host memory
|
29
|
-
bool (*
|
20
|
+
size_t (*get_alignment) (lm_ggml_backend_buffer_type_t buft);
|
21
|
+
// (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
|
22
|
+
size_t (*get_max_size) (lm_ggml_backend_buffer_type_t buft);
|
23
|
+
// (optional) data size needed to allocate the tensor, including padding (defaults to lm_ggml_nbytes)
|
24
|
+
size_t (*get_alloc_size)(lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor);
|
25
|
+
// (optional) check if tensor data is in host memory (defaults to false)
|
26
|
+
bool (*is_host) (lm_ggml_backend_buffer_type_t buft);
|
30
27
|
};
|
31
28
|
|
32
29
|
struct lm_ggml_backend_buffer_type {
|
33
30
|
struct lm_ggml_backend_buffer_type_i iface;
|
34
|
-
|
31
|
+
lm_ggml_backend_dev_t device;
|
32
|
+
void * context;
|
35
33
|
};
|
36
34
|
|
37
|
-
//
|
38
|
-
|
35
|
+
//
|
36
|
+
// Backend buffer
|
37
|
+
//
|
39
38
|
|
40
39
|
struct lm_ggml_backend_buffer_i {
|
41
|
-
const char * (*
|
42
|
-
|
43
|
-
void
|
44
|
-
|
45
|
-
void
|
46
|
-
|
47
|
-
void (*
|
48
|
-
|
49
|
-
void (*
|
50
|
-
void (*
|
40
|
+
const char * (*get_name) (lm_ggml_backend_buffer_t buffer);
|
41
|
+
// (optional) free the buffer
|
42
|
+
void (*free_buffer) (lm_ggml_backend_buffer_t buffer);
|
43
|
+
// base address of the buffer
|
44
|
+
void * (*get_base) (lm_ggml_backend_buffer_t buffer);
|
45
|
+
// (optional) initialize a tensor in the buffer (eg. add tensor extras)
|
46
|
+
void (*init_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
|
47
|
+
// tensor data access
|
48
|
+
void (*memset_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
|
49
|
+
void (*set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
50
|
+
void (*get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
51
|
+
// (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
|
52
|
+
bool (*cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
53
|
+
// clear the entire buffer
|
54
|
+
void (*clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
|
55
|
+
// (optional) reset any internal state due to tensor initialization, such as tensor extras
|
56
|
+
void (*reset) (lm_ggml_backend_buffer_t buffer);
|
51
57
|
};
|
52
58
|
|
53
59
|
struct lm_ggml_backend_buffer {
|
54
60
|
struct lm_ggml_backend_buffer_i iface;
|
55
61
|
lm_ggml_backend_buffer_type_t buft;
|
56
|
-
|
62
|
+
void * context;
|
57
63
|
size_t size;
|
58
64
|
enum lm_ggml_backend_buffer_usage usage;
|
59
65
|
};
|
60
66
|
|
61
|
-
|
62
|
-
lm_ggml_backend_buffer_type_t
|
63
|
-
struct lm_ggml_backend_buffer_i
|
64
|
-
|
65
|
-
size_t
|
67
|
+
lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
|
68
|
+
lm_ggml_backend_buffer_type_t buft,
|
69
|
+
struct lm_ggml_backend_buffer_i iface,
|
70
|
+
void * context,
|
71
|
+
size_t size);
|
66
72
|
|
67
73
|
// do not use directly, use lm_ggml_backend_tensor_copy instead
|
68
74
|
bool lm_ggml_backend_buffer_copy_tensor(const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
69
75
|
|
76
|
+
// multi-buffer
|
70
77
|
// buffer that contains a collection of buffers
|
71
|
-
|
72
|
-
|
73
|
-
|
78
|
+
lm_ggml_backend_buffer_t lm_ggml_backend_multi_buffer_alloc_buffer(lm_ggml_backend_buffer_t * buffers, size_t n_buffers);
|
79
|
+
bool lm_ggml_backend_buffer_is_multi_buffer(lm_ggml_backend_buffer_t buffer);
|
80
|
+
void lm_ggml_backend_multi_buffer_set_usage(lm_ggml_backend_buffer_t buffer, enum lm_ggml_backend_buffer_usage usage);
|
74
81
|
|
75
82
|
//
|
76
|
-
// Backend
|
83
|
+
// Backend (stream)
|
77
84
|
//
|
78
85
|
|
79
|
-
typedef void * lm_ggml_backend_context_t;
|
80
|
-
|
81
86
|
struct lm_ggml_backend_i {
|
82
|
-
const char * (*
|
87
|
+
const char * (*get_name)(lm_ggml_backend_t backend);
|
83
88
|
|
84
|
-
void (*
|
89
|
+
void (*free)(lm_ggml_backend_t backend);
|
85
90
|
|
91
|
+
// Will be moved to the device interface
|
86
92
|
// buffer allocation
|
87
|
-
lm_ggml_backend_buffer_type_t (*
|
93
|
+
lm_ggml_backend_buffer_type_t (*get_default_buffer_type)(lm_ggml_backend_t backend);
|
88
94
|
|
89
95
|
// (optional) asynchronous tensor data access
|
90
|
-
void (*
|
91
|
-
void (*
|
92
|
-
bool (*
|
96
|
+
void (*set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
97
|
+
void (*get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
98
|
+
bool (*cpy_tensor_async)(lm_ggml_backend_t backend_src, lm_ggml_backend_t backend_dst, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
|
93
99
|
|
94
100
|
// (optional) complete all pending operations
|
95
|
-
void (*
|
101
|
+
void (*synchronize)(lm_ggml_backend_t backend);
|
96
102
|
|
97
|
-
// compute graph with a plan (not used currently)
|
98
|
-
|
99
|
-
|
100
|
-
void (*LM_GGML_CALL graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
103
|
+
// (optional) compute graph with a plan (not used currently)
|
104
|
+
lm_ggml_backend_graph_plan_t (*graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
|
105
|
+
void (*graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
101
106
|
// update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
|
102
|
-
void (*
|
107
|
+
void (*graph_plan_update) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan, const struct lm_ggml_cgraph * cgraph);
|
103
108
|
// compute the graph with the plan
|
104
|
-
enum lm_ggml_status (*
|
105
|
-
|
106
|
-
// compute graph without a plan (async)
|
107
|
-
enum lm_ggml_status (*LM_GGML_CALL graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
|
108
|
-
|
109
|
-
// check if the backend can compute an operation
|
110
|
-
bool (*LM_GGML_CALL supports_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
|
109
|
+
enum lm_ggml_status (*graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
|
111
110
|
|
112
|
-
//
|
113
|
-
|
111
|
+
// compute graph (always async if supported by the backend)
|
112
|
+
enum lm_ggml_status (*graph_compute) (lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
|
114
113
|
|
115
|
-
//
|
116
|
-
//
|
117
|
-
//
|
118
|
-
bool (*
|
114
|
+
// IMPORTANT: these functions have been moved to the device interface and will be removed from the backend interface
|
115
|
+
// new backends should implement the device interface instead
|
116
|
+
// These functions are being moved to the device interface
|
117
|
+
bool (*supports_op) (lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
|
118
|
+
bool (*supports_buft)(lm_ggml_backend_t backend, lm_ggml_backend_buffer_type_t buft);
|
119
|
+
bool (*offload_op) (lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
|
119
120
|
|
120
121
|
// (optional) event synchronization
|
121
|
-
//
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
void (*LM_GGML_CALL event_record) (lm_ggml_backend_event_t event);
|
126
|
-
// wait for an event on on a different backend instance
|
127
|
-
void (*LM_GGML_CALL event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
128
|
-
// block until an event is recorded
|
129
|
-
void (*LM_GGML_CALL event_synchronize) (lm_ggml_backend_event_t event);
|
122
|
+
// record an event on this stream
|
123
|
+
void (*event_record)(lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
124
|
+
// wait for an event on on a different stream
|
125
|
+
void (*event_wait) (lm_ggml_backend_t backend, lm_ggml_backend_event_t event);
|
130
126
|
};
|
131
127
|
|
132
128
|
struct lm_ggml_backend {
|
133
129
|
lm_ggml_guid_t guid;
|
134
|
-
|
135
130
|
struct lm_ggml_backend_i iface;
|
136
|
-
|
131
|
+
lm_ggml_backend_dev_t device;
|
132
|
+
void * context;
|
137
133
|
};
|
138
134
|
|
139
135
|
struct lm_ggml_backend_event {
|
140
|
-
|
136
|
+
struct lm_ggml_backend_device * device;
|
137
|
+
void * context;
|
138
|
+
};
|
139
|
+
|
140
|
+
//
|
141
|
+
// Backend device
|
142
|
+
//
|
143
|
+
|
144
|
+
// Note: if additional properties are needed, we should add a struct with all of them
|
145
|
+
// the current functions to obtain the properties can remain, since they are more convenient for often used properties
|
146
|
+
struct lm_ggml_backend_device_i {
|
147
|
+
// device name: short identifier for this device, such as "CPU" or "CUDA0"
|
148
|
+
const char * (*get_name)(lm_ggml_backend_dev_t dev);
|
149
|
+
|
150
|
+
// device description: short informative description of the device, could be the model name
|
151
|
+
const char * (*get_description)(lm_ggml_backend_dev_t dev);
|
152
|
+
|
153
|
+
// device memory in bytes
|
154
|
+
void (*get_memory)(lm_ggml_backend_dev_t dev, size_t * free, size_t * total);
|
155
|
+
|
156
|
+
// device type
|
157
|
+
enum lm_ggml_backend_dev_type (*get_type)(lm_ggml_backend_dev_t dev);
|
158
|
+
|
159
|
+
// device properties
|
160
|
+
void (*get_props)(lm_ggml_backend_dev_t dev, struct lm_ggml_backend_dev_props * props);
|
161
|
+
|
162
|
+
// backend (stream) initialization
|
163
|
+
lm_ggml_backend_t (*init_backend)(lm_ggml_backend_dev_t dev, const char * params);
|
164
|
+
|
165
|
+
// preferred buffer type
|
166
|
+
lm_ggml_backend_buffer_type_t (*get_buffer_type)(lm_ggml_backend_dev_t dev);
|
167
|
+
|
168
|
+
// (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
|
169
|
+
lm_ggml_backend_buffer_type_t (*get_host_buffer_type)(lm_ggml_backend_dev_t dev);
|
170
|
+
|
171
|
+
// (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
|
172
|
+
lm_ggml_backend_buffer_t (*buffer_from_host_ptr)(lm_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
|
173
|
+
|
174
|
+
// check if the backend can compute an operation
|
175
|
+
bool (*supports_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
|
176
|
+
|
177
|
+
// check if the backend can use tensors allocated in a buffer type
|
178
|
+
bool (*supports_buft)(lm_ggml_backend_dev_t dev, lm_ggml_backend_buffer_type_t buft);
|
179
|
+
|
180
|
+
// (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
|
181
|
+
// these should be expensive operations that may benefit from running on this backend instead of the CPU backend
|
182
|
+
bool (*offload_op)(lm_ggml_backend_dev_t dev, const struct lm_ggml_tensor * op);
|
183
|
+
|
184
|
+
// (optional) event synchronization
|
185
|
+
lm_ggml_backend_event_t (*event_new) (lm_ggml_backend_dev_t dev);
|
186
|
+
void (*event_free) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
|
187
|
+
void (*event_synchronize) (lm_ggml_backend_dev_t dev, lm_ggml_backend_event_t event);
|
188
|
+
};
|
189
|
+
|
190
|
+
struct lm_ggml_backend_device {
|
191
|
+
struct lm_ggml_backend_device_i iface;
|
192
|
+
lm_ggml_backend_reg_t reg;
|
141
193
|
void * context;
|
142
194
|
};
|
143
195
|
|
144
196
|
//
|
145
|
-
// Backend
|
197
|
+
// Backend (reg)
|
146
198
|
//
|
147
199
|
|
148
|
-
|
200
|
+
struct lm_ggml_backend_reg_i {
|
201
|
+
const char * (*get_name)(lm_ggml_backend_reg_t reg);
|
202
|
+
|
203
|
+
// enumerate available devices
|
204
|
+
size_t (*get_device_count)(lm_ggml_backend_reg_t reg);
|
205
|
+
lm_ggml_backend_dev_t (*get_device)(lm_ggml_backend_reg_t reg, size_t index);
|
206
|
+
|
207
|
+
// (optional) get a pointer to a function in the backend
|
208
|
+
// backends can add custom functions that are not part of the standard ggml-backend interface
|
209
|
+
void * (*get_proc_address)(lm_ggml_backend_reg_t reg, const char * name);
|
210
|
+
};
|
211
|
+
|
212
|
+
struct lm_ggml_backend_reg {
|
213
|
+
// int api_version; // TODO: for dynamic loading
|
214
|
+
struct lm_ggml_backend_reg_i iface;
|
215
|
+
void * context;
|
216
|
+
};
|
217
|
+
|
149
218
|
|
150
|
-
|
219
|
+
// Internal backend registry API
|
220
|
+
void lm_ggml_backend_register(lm_ggml_backend_reg_t reg);
|
221
|
+
void lm_ggml_backend_device_register(lm_ggml_backend_dev_t device);
|
222
|
+
// TODO: backends can be loaded as a dynamic library, in which case it needs to export this function
|
223
|
+
// typedef lm_ggml_backend_register_t * (*lm_ggml_backend_init)(void);
|
151
224
|
|
152
225
|
#ifdef __cplusplus
|
153
226
|
}
|