khmerns 0.0.3__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
include/ggml-cpu.h ADDED
@@ -0,0 +1,146 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ // the compute plan that needs to be prepared for ggml_graph_compute()
11
+ // since https://github.com/ggml-org/ggml/issues/287
12
+ struct ggml_cplan {
13
+ size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
14
+ uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
15
+
16
+ int n_threads;
17
+ struct ggml_threadpool * threadpool;
18
+
19
+ // abort ggml_graph_compute when true
20
+ ggml_abort_callback abort_callback;
21
+ void * abort_callback_data;
22
+ };
23
+
24
+ // numa strategies
25
+ enum ggml_numa_strategy {
26
+ GGML_NUMA_STRATEGY_DISABLED = 0,
27
+ GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
28
+ GGML_NUMA_STRATEGY_ISOLATE = 2,
29
+ GGML_NUMA_STRATEGY_NUMACTL = 3,
30
+ GGML_NUMA_STRATEGY_MIRROR = 4,
31
+ GGML_NUMA_STRATEGY_COUNT
32
+ };
33
+
34
+ GGML_BACKEND_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
35
+ GGML_BACKEND_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
36
+
37
+ GGML_BACKEND_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
38
+ GGML_BACKEND_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
39
+
40
+ GGML_BACKEND_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
41
+ GGML_BACKEND_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
42
+
43
+ GGML_BACKEND_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
44
+ GGML_BACKEND_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
45
+
46
+ GGML_BACKEND_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
47
+ GGML_BACKEND_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
48
+
49
+ GGML_BACKEND_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
50
+ GGML_BACKEND_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
51
+
52
+ GGML_BACKEND_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
53
+ GGML_BACKEND_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
54
+
55
+ GGML_BACKEND_API struct ggml_threadpool * ggml_threadpool_new (struct ggml_threadpool_params * params);
56
+ GGML_BACKEND_API void ggml_threadpool_free (struct ggml_threadpool * threadpool);
57
+ GGML_BACKEND_API int ggml_threadpool_get_n_threads (struct ggml_threadpool * threadpool);
58
+ GGML_BACKEND_API void ggml_threadpool_pause (struct ggml_threadpool * threadpool);
59
+ GGML_BACKEND_API void ggml_threadpool_resume (struct ggml_threadpool * threadpool);
60
+
61
+ // ggml_graph_plan() has to be called before ggml_graph_compute()
62
+ // when plan.work_size > 0, caller must allocate memory for plan.work_data
63
+ GGML_BACKEND_API struct ggml_cplan ggml_graph_plan(
64
+ const struct ggml_cgraph * cgraph,
65
+ int n_threads, /* = GGML_DEFAULT_N_THREADS */
66
+ struct ggml_threadpool * threadpool /* = NULL */ );
67
+ GGML_BACKEND_API enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
68
+
69
+ // same as ggml_graph_compute() but the work data is allocated as a part of the context
70
+ // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
71
+ GGML_BACKEND_API enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
72
+
73
+ //
74
+ // system info
75
+ //
76
+
77
+ // x86
78
+ GGML_BACKEND_API int ggml_cpu_has_sse3 (void);
79
+ GGML_BACKEND_API int ggml_cpu_has_ssse3 (void);
80
+ GGML_BACKEND_API int ggml_cpu_has_avx (void);
81
+ GGML_BACKEND_API int ggml_cpu_has_avx_vnni (void);
82
+ GGML_BACKEND_API int ggml_cpu_has_avx2 (void);
83
+ GGML_BACKEND_API int ggml_cpu_has_bmi2 (void);
84
+ GGML_BACKEND_API int ggml_cpu_has_f16c (void);
85
+ GGML_BACKEND_API int ggml_cpu_has_fma (void);
86
+ GGML_BACKEND_API int ggml_cpu_has_avx512 (void);
87
+ GGML_BACKEND_API int ggml_cpu_has_avx512_vbmi(void);
88
+ GGML_BACKEND_API int ggml_cpu_has_avx512_vnni(void);
89
+ GGML_BACKEND_API int ggml_cpu_has_avx512_bf16(void);
90
+ GGML_BACKEND_API int ggml_cpu_has_amx_int8 (void);
91
+ // ARM
92
+ GGML_BACKEND_API int ggml_cpu_has_neon (void);
93
+ GGML_BACKEND_API int ggml_cpu_has_arm_fma (void);
94
+ GGML_BACKEND_API int ggml_cpu_has_fp16_va (void);
95
+ GGML_BACKEND_API int ggml_cpu_has_dotprod (void);
96
+ GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void);
97
+ GGML_BACKEND_API int ggml_cpu_has_sve (void);
98
+ GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
99
+ GGML_BACKEND_API int ggml_cpu_has_sme (void);
100
+ // other
101
+ GGML_BACKEND_API int ggml_cpu_has_riscv_v (void);
102
+ GGML_BACKEND_API int ggml_cpu_get_rvv_vlen (void); // risc-v vector length in bytes
103
+ GGML_BACKEND_API int ggml_cpu_has_vsx (void);
104
+ GGML_BACKEND_API int ggml_cpu_has_vxe (void);
105
+ GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void);
106
+ GGML_BACKEND_API int ggml_cpu_has_llamafile (void);
107
+
108
+ // Internal types and functions exposed for tests and benchmarks
109
+
110
+ typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
111
+ const void * GGML_RESTRICT y, size_t by, int nrc);
112
+
113
+ struct ggml_type_traits_cpu {
114
+ ggml_from_float_t from_float;
115
+ ggml_vec_dot_t vec_dot;
116
+ enum ggml_type vec_dot_type;
117
+ int64_t nrows; // number of rows to process simultaneously
118
+ };
119
+
120
+ GGML_BACKEND_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type);
121
+
122
+ GGML_BACKEND_API void ggml_cpu_init(void);
123
+
124
+ //
125
+ // CPU backend
126
+ //
127
+
128
+ GGML_BACKEND_API ggml_backend_t ggml_backend_cpu_init(void);
129
+
130
+ GGML_BACKEND_API bool ggml_backend_is_cpu (ggml_backend_t backend);
131
+ GGML_BACKEND_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
132
+ GGML_BACKEND_API void ggml_backend_cpu_set_threadpool (ggml_backend_t backend_cpu, ggml_threadpool_t threadpool);
133
+ GGML_BACKEND_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
134
+
135
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void);
136
+
137
+ GGML_BACKEND_API void ggml_cpu_fp32_to_fp32(const float *, float *, int64_t);
138
+ GGML_BACKEND_API void ggml_cpu_fp32_to_i32 (const float *, int32_t *, int64_t);
139
+ GGML_BACKEND_API void ggml_cpu_fp32_to_fp16(const float *, ggml_fp16_t *, int64_t);
140
+ GGML_BACKEND_API void ggml_cpu_fp16_to_fp32(const ggml_fp16_t *, float *, int64_t);
141
+ GGML_BACKEND_API void ggml_cpu_fp32_to_bf16(const float *, ggml_bf16_t *, int64_t);
142
+ GGML_BACKEND_API void ggml_cpu_bf16_to_fp32(const ggml_bf16_t *, float *, int64_t);
143
+
144
+ #ifdef __cplusplus
145
+ }
146
+ #endif
include/ggml-cuda.h ADDED
@@ -0,0 +1,47 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ #ifdef GGML_USE_HIP
11
+ #define GGML_CUDA_NAME "ROCm"
12
+ #define GGML_CUBLAS_NAME "hipBLAS"
13
+ #elif defined(GGML_USE_MUSA)
14
+ #define GGML_CUDA_NAME "MUSA"
15
+ #define GGML_CUBLAS_NAME "muBLAS"
16
+ #else
17
+ #define GGML_CUDA_NAME "CUDA"
18
+ #define GGML_CUBLAS_NAME "cuBLAS"
19
+ #endif
20
+ #define GGML_CUDA_MAX_DEVICES 16
21
+
22
+ // backend API
23
+ GGML_BACKEND_API ggml_backend_t ggml_backend_cuda_init(int device);
24
+
25
+ GGML_BACKEND_API bool ggml_backend_is_cuda(ggml_backend_t backend);
26
+
27
+ // device buffer
28
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
29
+
30
+ // split tensor buffer that splits matrices by rows across multiple devices
31
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split);
32
+
33
+ // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
34
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
35
+
36
+ GGML_BACKEND_API int ggml_backend_cuda_get_device_count(void);
37
+ GGML_BACKEND_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
38
+ GGML_BACKEND_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
39
+
40
+ GGML_BACKEND_API bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
41
+ GGML_BACKEND_API void ggml_backend_cuda_unregister_host_buffer(void * buffer);
42
+
43
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cuda_reg(void);
44
+
45
+ #ifdef __cplusplus
46
+ }
47
+ #endif
include/ggml-metal.h ADDED
@@ -0,0 +1,61 @@
1
+ // Note: this description is outdated
2
+ //
3
+ // An interface allowing to compute ggml_cgraph with Metal
4
+ //
5
+ // This is a fully functional interface that extends ggml with GPU support for Apple devices.
6
+ // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.)
7
+ //
8
+ // How it works?
9
+ //
10
+ // As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
11
+ // interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
12
+ // use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
13
+ //
14
+ // You only need to make sure that all memory buffers that you used during the graph creation
15
+ // are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
16
+ // used during the graph evaluation to determine the arguments of the compute kernels.
17
+ //
18
+ // Synchronization between device and host memory (for example for input and output tensors)
19
+ // is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
20
+ //
21
+
22
+ #pragma once
23
+
24
+ #include "ggml.h"
25
+ #include "ggml-backend.h"
26
+
27
+ #include <stddef.h>
28
+ #include <stdbool.h>
29
+
30
+ struct ggml_tensor;
31
+ struct ggml_cgraph;
32
+
33
+ #ifdef __cplusplus
34
+ extern "C" {
35
+ #endif
36
+
37
+ //
38
+ // backend API
39
+ // user-code should use only these functions
40
+ //
41
+
42
+ // TODO: remove in the future
43
+ GGML_BACKEND_API ggml_backend_t ggml_backend_metal_init(void);
44
+
45
+ GGML_BACKEND_API bool ggml_backend_is_metal(ggml_backend_t backend);
46
+
47
+ GGML_BACKEND_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data);
48
+
49
+ // helper to check if the device supports a specific family
50
+ // ideally, the user code should be doing these checks
51
+ // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
52
+ GGML_BACKEND_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family);
53
+
54
+ // capture all command buffers committed the next time `ggml_backend_graph_compute` is called
55
+ GGML_BACKEND_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
56
+
57
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_metal_reg(void);
58
+
59
+ #ifdef __cplusplus
60
+ }
61
+ #endif
include/ggml-opt.h ADDED
@@ -0,0 +1,256 @@
1
+ // This file contains functionality for training models using GGML.
2
+ // It is not strictly needed vs. just vanilla GGML but it provides a more high-level interface for common needs such as datasets.
3
+ // At the bottom of this file especially there are relatively high-level functions that are suitable use or adaptation in user code.
4
+ //
5
+ // Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de)
6
+
7
+ #pragma once
8
+
9
+ #include "ggml.h"
10
+ #include "ggml-backend.h"
11
+
12
+ #include <stdint.h>
13
+
14
+ #ifdef __cplusplus
15
+ extern "C" {
16
+ #endif
17
+
18
+ struct ggml_opt_dataset;
19
+ struct ggml_opt_context;
20
+ struct ggml_opt_result;
21
+
22
+ typedef struct ggml_opt_dataset * ggml_opt_dataset_t;
23
+ typedef struct ggml_opt_context * ggml_opt_context_t;
24
+ typedef struct ggml_opt_result * ggml_opt_result_t;
25
+
26
+ // ====== Loss ======
27
+
28
+ // built-in loss types, i.e. the built-in quantities minimized by the optimizer
29
+ // custom loss types can be defined via mean or sum which simply reduce the outputs for all datapoints to a single value
30
+ enum ggml_opt_loss_type {
31
+ GGML_OPT_LOSS_TYPE_MEAN,
32
+ GGML_OPT_LOSS_TYPE_SUM,
33
+ GGML_OPT_LOSS_TYPE_CROSS_ENTROPY,
34
+ GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR,
35
+ };
36
+
37
+ // ====== Dataset ======
38
+
39
+ GGML_API ggml_opt_dataset_t ggml_opt_dataset_init(
40
+ enum ggml_type type_data, // the type for the internal data tensor
41
+ enum ggml_type type_label, // the type for the internal labels tensor
42
+ int64_t ne_datapoint, // number of elements per datapoint
43
+ int64_t ne_label, // number of elements per label
44
+ int64_t ndata, // total number of datapoints/labels
45
+ int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied)
46
+ GGML_API void ggml_opt_dataset_free(ggml_opt_dataset_t dataset);
47
+
48
+ // get underlying tensors that store the data
49
+ GGML_API int64_t ggml_opt_dataset_ndata (ggml_opt_dataset_t dataset);
50
+ GGML_API struct ggml_tensor * ggml_opt_dataset_data (ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata]
51
+ GGML_API struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset); // shape = [nd_label, ndata]
52
+
53
+ // shuffle idata first datapoints from dataset with RNG from opt_ctx, shuffle all datapoints if idata is negative
54
+ GGML_API void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata);
55
+
56
+ // get batch at position ibatch from dataset and copy the data to data_batch and labels_batch
57
+ GGML_API void ggml_opt_dataset_get_batch(
58
+ ggml_opt_dataset_t dataset,
59
+ struct ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch]
60
+ struct ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch]
61
+ int64_t ibatch);
62
+ GGML_API void ggml_opt_dataset_get_batch_host(
63
+ ggml_opt_dataset_t dataset,
64
+ void * data_batch,
65
+ size_t nb_data_batch,
66
+ void * labels_batch,
67
+ int64_t ibatch);
68
+
69
+ // ====== Model / Context ======
70
+
71
+ enum ggml_opt_build_type {
72
+ GGML_OPT_BUILD_TYPE_FORWARD = 10,
73
+ GGML_OPT_BUILD_TYPE_GRAD = 20,
74
+ GGML_OPT_BUILD_TYPE_OPT = 30,
75
+ };
76
+
77
+ enum ggml_opt_optimizer_type {
78
+ GGML_OPT_OPTIMIZER_TYPE_ADAMW,
79
+ GGML_OPT_OPTIMIZER_TYPE_SGD,
80
+
81
+ GGML_OPT_OPTIMIZER_TYPE_COUNT
82
+ };
83
+
84
+ // parameters that control which optimizer is used and how said optimizer tries to find the minimal loss
85
+ struct ggml_opt_optimizer_params {
86
+ struct {
87
+ float alpha; // learning rate
88
+ float beta1; // first AdamW momentum
89
+ float beta2; // second AdamW momentum
90
+ float eps; // epsilon for numerical stability
91
+ float wd; // weight decay - 0.0f to disable
92
+ } adamw;
93
+ struct {
94
+ float alpha; // learning rate
95
+ float wd; // weight decay
96
+ } sgd;
97
+ };
98
+
99
+ // callback to calculate optimizer parameters prior to a backward pass
100
+ // userdata can be used to pass arbitrary data
101
+ typedef struct ggml_opt_optimizer_params (*ggml_opt_get_optimizer_params)(void * userdata);
102
+
103
+ // returns the default optimizer params (constant, hard-coded values)
104
+ // userdata is not used
105
+ GGML_API struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata);
106
+
107
+ // casts userdata to ggml_opt_optimizer_params and returns it
108
+ GGML_API struct ggml_opt_optimizer_params ggml_opt_get_constant_optimizer_params(void * userdata);
109
+
110
+ // parameters for initializing a new optimization context
111
+ struct ggml_opt_params {
112
+ ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs
113
+
114
+ // by default the forward graph needs to be reconstructed for each eval
115
+ // if ctx_compute, inputs, and outputs are set the graphs are instead allocated statically
116
+ struct ggml_context * ctx_compute;
117
+ struct ggml_tensor * inputs;
118
+ struct ggml_tensor * outputs;
119
+
120
+ enum ggml_opt_loss_type loss_type;
121
+ enum ggml_opt_build_type build_type;
122
+
123
+ int32_t opt_period; // after how many gradient accumulation steps an optimizer step should be done
124
+
125
+ ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters
126
+ void * get_opt_pars_ud; // userdata for calculating optimizer parameters
127
+
128
+ // only GGML_OPT_OPTIMIZER_TYPE_ADAMW needs m, v momenta per parameter tensor
129
+ enum ggml_opt_optimizer_type optimizer;
130
+ };
131
+
132
+ // get parameters for an optimization context with defaults set where possible
133
+ // parameters for which no sensible defaults exist are supplied as arguments to this function
134
+ GGML_API struct ggml_opt_params ggml_opt_default_params(
135
+ ggml_backend_sched_t backend_sched,
136
+ enum ggml_opt_loss_type loss_type);
137
+
138
+ GGML_API ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params);
139
+ GGML_API void ggml_opt_free(ggml_opt_context_t opt_ctx);
140
+
141
+ // set gradients to zero, initilize loss, and optionally reset the optimizer
142
+ GGML_API void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer);
143
+
144
+ GGML_API bool ggml_opt_static_graphs(ggml_opt_context_t opt_ctx); // whether the graphs are allocated_statically
145
+
146
+ // get underlying tensors that store data
147
+ // if not using static graphs these pointers become invalid with the next call to ggml_opt_alloc
148
+ GGML_API struct ggml_tensor * ggml_opt_inputs( ggml_opt_context_t opt_ctx); // forward graph input tensor
149
+ GGML_API struct ggml_tensor * ggml_opt_outputs( ggml_opt_context_t opt_ctx); // forward graph output tensor
150
+ GGML_API struct ggml_tensor * ggml_opt_labels( ggml_opt_context_t opt_ctx); // labels to compare outputs against
151
+ GGML_API struct ggml_tensor * ggml_opt_loss( ggml_opt_context_t opt_ctx); // scalar tensor that contains the loss
152
+ GGML_API struct ggml_tensor * ggml_opt_pred( ggml_opt_context_t opt_ctx); // predictions made by outputs
153
+ GGML_API struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels
154
+
155
+ // get the gradient accumulator for a node from the forward graph
156
+ GGML_API struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node);
157
+
158
+ GGML_API enum ggml_opt_optimizer_type ggml_opt_context_optimizer_type(ggml_opt_context_t); //TODO consistent naming scheme
159
+
160
+ GGML_API const char * ggml_opt_optimizer_name(enum ggml_opt_optimizer_type);
161
+
162
+ // ====== Optimization Result ======
163
+
164
+ GGML_API ggml_opt_result_t ggml_opt_result_init(void);
165
+ GGML_API void ggml_opt_result_free(ggml_opt_result_t result);
166
+ GGML_API void ggml_opt_result_reset(ggml_opt_result_t result);
167
+
168
+ // get data from result, uncertainties are optional and can be ignored by passing NULL
169
+ GGML_API void ggml_opt_result_ndata( ggml_opt_result_t result, int64_t * ndata); // writes 1 value, number of datapoints
170
+ GGML_API void ggml_opt_result_loss( ggml_opt_result_t result, double * loss, double * unc); // writes 1 value
171
+ GGML_API void ggml_opt_result_pred( ggml_opt_result_t result, int32_t * pred); // writes ndata values
172
+ GGML_API void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc); // writes 1 value
173
+
174
+ // ====== Computation ======
175
+
176
+ // if not using static graphs, this function must be called prior to ggml_opt_alloc
177
+ GGML_API void ggml_opt_prepare_alloc(
178
+ ggml_opt_context_t opt_ctx,
179
+ struct ggml_context * ctx_compute,
180
+ struct ggml_cgraph * gf,
181
+ struct ggml_tensor * inputs,
182
+ struct ggml_tensor * outputs);
183
+
184
+ // allocate the next graph for evaluation, either forward or forward + backward
185
+ // must be called exactly once prior to calling ggml_opt_eval
186
+ GGML_API void ggml_opt_alloc(ggml_opt_context_t opt_ctx, bool backward);
187
+
188
+ // do forward pass, increment result if not NULL, do backward pass if allocated
189
+ GGML_API void ggml_opt_eval(ggml_opt_context_t opt_ctx, ggml_opt_result_t result);
190
+
191
+ // ############################################################################
192
+ // ## The high-level functions start here. They do not depend on any private ##
193
+ // ## functions or structs and can be copied to and adapted for user code. ##
194
+ // ############################################################################
195
+
196
+ // ====== Intended Usage ======
197
+ //
198
+ // 1. Select the appropriate loss for your problem.
199
+ // 2. Create a dataset and set the data for the "data" tensor. Also set the "labels" tensor if your loss needs them.
200
+ // Setting the shard size to 1 will be fine, it's the granularity with which data is shuffled/loaded (bigger values are faster).
201
+ // 3. Create a GGML graph for your model with no_alloc == true. Use two separate contexts for the tensors.
202
+ // The first context should contain the model parameters and inputs and be allocated statically in user code.
203
+ // The second context should contain all other tensors and will be (re)allocated automatically.
204
+ // Due to this automated allocation the data of the second context is not defined when accessed in user code.
205
+ // Note that the second dimension of the inputs/outputs are interpreted as the number of datapoints in those tensors.
206
+ // 4. Call ggml_opt_fit. If you need more control you can use ggml_opt_epoch instead.
207
+
208
+ // signature for a callback while evaluating opt_ctx on dataset, called after an evaluation
209
+ typedef void (*ggml_opt_epoch_callback)(
210
+ bool train, // true after training evaluation, false after validation evaluation
211
+ ggml_opt_context_t opt_ctx,
212
+ ggml_opt_dataset_t dataset,
213
+ ggml_opt_result_t result, // result associated with the dataset subsection
214
+ int64_t ibatch, // number of batches that have been evaluated so far
215
+ int64_t ibatch_max, // total number of batches in this dataset subsection
216
+ int64_t t_start_us); // time at which the evaluation on the dataset subsection was started
217
+
218
+ // do training on front of dataset, do evaluation only on back of dataset
219
+ GGML_API void ggml_opt_epoch(
220
+ ggml_opt_context_t opt_ctx,
221
+ ggml_opt_dataset_t dataset,
222
+ ggml_opt_result_t result_train, // result to increment during training, ignored if NULL
223
+ ggml_opt_result_t result_eval, // result to increment during evaluation, ignored if NULL
224
+ int64_t idata_split, // data index at which to split training and evaluation
225
+ ggml_opt_epoch_callback callback_train,
226
+ ggml_opt_epoch_callback callback_eval);
227
+
228
+ // callback that prints a progress bar on stderr
229
+ GGML_API void ggml_opt_epoch_callback_progress_bar(
230
+ bool train,
231
+ ggml_opt_context_t opt_ctx,
232
+ ggml_opt_dataset_t dataset,
233
+ ggml_opt_result_t result,
234
+ int64_t ibatch,
235
+ int64_t ibatch_max,
236
+ int64_t t_start_us);
237
+
238
+ // fit model defined by inputs and outputs to dataset
239
+ GGML_API void ggml_opt_fit(
240
+ ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs
241
+ struct ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs
242
+ struct ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch]
243
+ struct ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used
244
+ ggml_opt_dataset_t dataset, // dataset with data and optionally also labels
245
+ enum ggml_opt_loss_type loss_type, // loss to minimize
246
+ enum ggml_opt_optimizer_type optimizer, // sgd or adamw
247
+ ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t)
248
+ int64_t nepoch, // how many times the dataset should be iterated over
249
+ int64_t nbatch_logical, // datapoints optimizer step, must be a multiple of ndata_batch in inputs/outputs
250
+ float val_split, // fraction of the dataset to use for validation, must be in [0.0f, 1.0f)
251
+ bool silent); // whether or not info prints to stderr should be suppressed
252
+
253
+
254
+ #ifdef __cplusplus
255
+ }
256
+ #endif
include/ggml-rpc.h ADDED
@@ -0,0 +1,30 @@
1
+ #pragma once
2
+
3
+ #include "ggml-backend.h"
4
+
5
+ #ifdef __cplusplus
6
+ extern "C" {
7
+ #endif
8
+
9
+ #define RPC_PROTO_MAJOR_VERSION 3
10
+ #define RPC_PROTO_MINOR_VERSION 6
11
+ #define RPC_PROTO_PATCH_VERSION 0
12
+ #define GGML_RPC_MAX_SERVERS 16
13
+
14
+ // backend API
15
+ GGML_BACKEND_API ggml_backend_t ggml_backend_rpc_init(const char * endpoint, uint32_t device);
16
+ GGML_BACKEND_API bool ggml_backend_is_rpc(ggml_backend_t backend);
17
+
18
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint, uint32_t device);
19
+
20
+ GGML_BACKEND_API void ggml_backend_rpc_get_device_memory(const char * endpoint, uint32_t device, size_t * free, size_t * total);
21
+
22
+ GGML_BACKEND_API void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir,
23
+ size_t n_threads, size_t n_devices, ggml_backend_dev_t * devices);
24
+
25
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_reg(void);
26
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_add_server(const char * endpoint);
27
+
28
+ #ifdef __cplusplus
29
+ }
30
+ #endif
include/ggml-sycl.h ADDED
@@ -0,0 +1,49 @@
1
+ //
2
+ // MIT license
3
+ // Copyright (C) 2024 Intel Corporation
4
+ // SPDX-License-Identifier: MIT
5
+ //
6
+
7
+ #pragma once
8
+
9
+ #include "ggml.h"
10
+ #include "ggml-backend.h"
11
+
12
+ #define GGML_SYCL_NAME "SYCL"
13
+ #define GGML_SYCL_MAX_DEVICES 48
14
+
15
+ #ifdef __cplusplus
16
+ extern "C" {
17
+ #endif
18
+
19
+ // backend API
20
+ GGML_BACKEND_API ggml_backend_t ggml_backend_sycl_init(int device);
21
+
22
+ GGML_BACKEND_API bool ggml_backend_is_sycl(ggml_backend_t backend);
23
+
24
+ // devide buffer
25
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device);
26
+
27
+ // split tensor buffer that splits matrices by rows across multiple devices
28
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
29
+
30
+ // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
31
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
32
+
33
+ GGML_BACKEND_API void ggml_backend_sycl_print_sycl_devices(void);
34
+ GGML_BACKEND_API void ggml_backend_sycl_get_gpu_list(int *id_list, int max_len);
35
+ GGML_BACKEND_API void ggml_backend_sycl_get_device_description(int device,
36
+ char *description,
37
+ size_t description_size);
38
+ GGML_BACKEND_API int ggml_backend_sycl_get_device_count();
39
+ GGML_BACKEND_API void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
40
+
41
+ // SYCL doesn't support registering host memory, keep here for reference
42
+ // GGML_BACKEND_API bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
43
+ // GGML_BACKEND_API void ggml_backend_sycl_unregister_host_buffer(void * buffer);
44
+
45
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_sycl_reg(void);
46
+
47
+ #ifdef __cplusplus
48
+ }
49
+ #endif
include/ggml-virtgpu.h ADDED
@@ -0,0 +1,16 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ #define GGML_REMOTING_FRONTEND_NAME "RemotingFrontend"
11
+
12
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_virtgpu_reg();
13
+
14
+ #ifdef __cplusplus
15
+ }
16
+ #endif
include/ggml-vulkan.h ADDED
@@ -0,0 +1,29 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ #define GGML_VK_NAME "Vulkan"
11
+ #define GGML_VK_MAX_DEVICES 16
12
+
13
+ // backend API
14
+ GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);
15
+
16
+ GGML_BACKEND_API bool ggml_backend_is_vk(ggml_backend_t backend);
17
+ GGML_BACKEND_API int ggml_backend_vk_get_device_count(void);
18
+ GGML_BACKEND_API void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size);
19
+ GGML_BACKEND_API void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total);
20
+
21
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
22
+ // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
23
+ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void);
24
+
25
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_vk_reg(void);
26
+
27
+ #ifdef __cplusplus
28
+ }
29
+ #endif
include/ggml-webgpu.h ADDED
@@ -0,0 +1,19 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ #define GGML_WEBGPU_NAME "WebGPU"
11
+
12
+ // Needed for examples in ggml
13
+ GGML_BACKEND_API ggml_backend_t ggml_backend_webgpu_init(void);
14
+
15
+ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_webgpu_reg(void);
16
+
17
+ #ifdef __cplusplus
18
+ }
19
+ #endif