whispercpp 1.2.0.2 → 1.3.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (135) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +5 -0
  3. data/LICENSE +1 -1
  4. data/README.md +165 -434
  5. data/Rakefile +46 -86
  6. data/ext/.gitignore +13 -0
  7. data/ext/cpu.mk +9 -0
  8. data/ext/{dr_wav.h → examples/dr_wav.h} +3560 -1179
  9. data/ext/extconf.rb +185 -7
  10. data/ext/ggml/include/ggml-alloc.h +76 -0
  11. data/ext/ggml/include/ggml-backend.h +352 -0
  12. data/ext/ggml/include/ggml-blas.h +25 -0
  13. data/ext/ggml/include/ggml-cann.h +123 -0
  14. data/ext/ggml/include/ggml-cpp.h +38 -0
  15. data/ext/ggml/include/ggml-cpu.h +135 -0
  16. data/ext/ggml/include/ggml-cuda.h +47 -0
  17. data/ext/ggml/include/ggml-kompute.h +50 -0
  18. data/ext/ggml/include/ggml-metal.h +66 -0
  19. data/ext/ggml/include/ggml-opencl.h +26 -0
  20. data/ext/ggml/include/ggml-opt.h +216 -0
  21. data/ext/ggml/include/ggml-rpc.h +28 -0
  22. data/ext/ggml/include/ggml-sycl.h +49 -0
  23. data/ext/ggml/include/ggml-vulkan.h +31 -0
  24. data/ext/ggml/include/ggml.h +2285 -0
  25. data/ext/ggml/src/ggml-alloc.c +1037 -0
  26. data/ext/ggml/src/ggml-amx/common.h +94 -0
  27. data/ext/ggml/src/ggml-amx/ggml-amx.cpp +446 -0
  28. data/ext/ggml/src/ggml-amx/mmq.cpp +2510 -0
  29. data/ext/ggml/src/ggml-amx/mmq.h +17 -0
  30. data/ext/ggml/src/ggml-backend-impl.h +256 -0
  31. data/ext/ggml/src/ggml-backend-reg.cpp +552 -0
  32. data/ext/ggml/src/ggml-backend.cpp +1999 -0
  33. data/ext/ggml/src/ggml-blas/ggml-blas.cpp +517 -0
  34. data/ext/ggml/src/ggml-cann/acl_tensor.cpp +175 -0
  35. data/ext/ggml/src/ggml-cann/acl_tensor.h +258 -0
  36. data/ext/ggml/src/ggml-cann/aclnn_ops.cpp +3427 -0
  37. data/ext/ggml/src/ggml-cann/aclnn_ops.h +592 -0
  38. data/ext/ggml/src/ggml-cann/common.h +286 -0
  39. data/ext/ggml/src/ggml-cann/ggml-cann.cpp +2188 -0
  40. data/ext/ggml/src/ggml-cann/kernels/ascendc_kernels.h +19 -0
  41. data/ext/ggml/src/ggml-cann/kernels/dup.cpp +236 -0
  42. data/ext/ggml/src/ggml-cann/kernels/get_row_f16.cpp +197 -0
  43. data/ext/ggml/src/ggml-cann/kernels/get_row_f32.cpp +190 -0
  44. data/ext/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +204 -0
  45. data/ext/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +191 -0
  46. data/ext/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +218 -0
  47. data/ext/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +216 -0
  48. data/ext/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +295 -0
  49. data/ext/ggml/src/ggml-common.h +1853 -0
  50. data/ext/ggml/src/ggml-cpu/amx/amx.cpp +220 -0
  51. data/ext/ggml/src/ggml-cpu/amx/amx.h +8 -0
  52. data/ext/ggml/src/ggml-cpu/amx/common.h +91 -0
  53. data/ext/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
  54. data/ext/ggml/src/ggml-cpu/amx/mmq.h +10 -0
  55. data/ext/ggml/src/ggml-cpu/cpu-feats-x86.cpp +323 -0
  56. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +4262 -0
  57. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
  58. data/ext/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
  59. data/ext/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
  60. data/ext/ggml/src/ggml-cpu/ggml-cpu-impl.h +386 -0
  61. data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.c +10835 -0
  62. data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
  63. data/ext/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
  64. data/ext/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
  65. data/ext/ggml/src/ggml-cpu/ggml-cpu.c +14123 -0
  66. data/ext/ggml/src/ggml-cpu/ggml-cpu.cpp +622 -0
  67. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1884 -0
  68. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
  69. data/ext/ggml/src/ggml-cuda/vendors/cuda.h +14 -0
  70. data/ext/ggml/src/ggml-cuda/vendors/hip.h +186 -0
  71. data/ext/ggml/src/ggml-cuda/vendors/musa.h +134 -0
  72. data/ext/ggml/src/ggml-impl.h +556 -0
  73. data/ext/ggml/src/ggml-kompute/ggml-kompute.cpp +2251 -0
  74. data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +288 -0
  75. data/ext/ggml/src/ggml-metal/ggml-metal.m +4884 -0
  76. data/ext/ggml/src/ggml-metal/ggml-metal.metal +6732 -0
  77. data/ext/ggml/src/ggml-opt.cpp +854 -0
  78. data/ext/ggml/src/ggml-quants.c +5238 -0
  79. data/ext/ggml/src/ggml-quants.h +100 -0
  80. data/ext/ggml/src/ggml-rpc/ggml-rpc.cpp +1406 -0
  81. data/ext/ggml/src/ggml-sycl/common.cpp +95 -0
  82. data/ext/ggml/src/ggml-sycl/concat.cpp +196 -0
  83. data/ext/ggml/src/ggml-sycl/conv.cpp +99 -0
  84. data/ext/ggml/src/ggml-sycl/convert.cpp +547 -0
  85. data/ext/ggml/src/ggml-sycl/dmmv.cpp +1023 -0
  86. data/ext/ggml/src/ggml-sycl/element_wise.cpp +1030 -0
  87. data/ext/ggml/src/ggml-sycl/ggml-sycl.cpp +4729 -0
  88. data/ext/ggml/src/ggml-sycl/im2col.cpp +126 -0
  89. data/ext/ggml/src/ggml-sycl/mmq.cpp +3031 -0
  90. data/ext/ggml/src/ggml-sycl/mmvq.cpp +1015 -0
  91. data/ext/ggml/src/ggml-sycl/norm.cpp +378 -0
  92. data/ext/ggml/src/ggml-sycl/outprod.cpp +56 -0
  93. data/ext/ggml/src/ggml-sycl/rope.cpp +276 -0
  94. data/ext/ggml/src/ggml-sycl/softmax.cpp +251 -0
  95. data/ext/ggml/src/ggml-sycl/tsembd.cpp +72 -0
  96. data/ext/ggml/src/ggml-sycl/wkv6.cpp +141 -0
  97. data/ext/ggml/src/ggml-threading.cpp +12 -0
  98. data/ext/ggml/src/ggml-threading.h +14 -0
  99. data/ext/ggml/src/ggml-vulkan/ggml-vulkan.cpp +8657 -0
  100. data/ext/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +593 -0
  101. data/ext/ggml/src/ggml.c +7694 -0
  102. data/ext/include/whisper.h +672 -0
  103. data/ext/metal-embed.mk +17 -0
  104. data/ext/metal.mk +6 -0
  105. data/ext/ruby_whisper.cpp +1608 -159
  106. data/ext/ruby_whisper.h +10 -0
  107. data/ext/scripts/get-flags.mk +38 -0
  108. data/ext/src/coreml/whisper-decoder-impl.h +146 -0
  109. data/ext/src/coreml/whisper-decoder-impl.m +201 -0
  110. data/ext/src/coreml/whisper-encoder-impl.h +142 -0
  111. data/ext/src/coreml/whisper-encoder-impl.m +197 -0
  112. data/ext/src/coreml/whisper-encoder.h +26 -0
  113. data/ext/src/openvino/whisper-openvino-encoder.cpp +108 -0
  114. data/ext/src/openvino/whisper-openvino-encoder.h +31 -0
  115. data/ext/src/whisper.cpp +7393 -0
  116. data/extsources.rb +6 -0
  117. data/lib/whisper/model/uri.rb +157 -0
  118. data/lib/whisper.rb +2 -0
  119. data/tests/helper.rb +7 -0
  120. data/tests/jfk_reader/.gitignore +5 -0
  121. data/tests/jfk_reader/extconf.rb +3 -0
  122. data/tests/jfk_reader/jfk_reader.c +68 -0
  123. data/tests/test_callback.rb +160 -0
  124. data/tests/test_error.rb +20 -0
  125. data/tests/test_model.rb +71 -0
  126. data/tests/test_package.rb +31 -0
  127. data/tests/test_params.rb +160 -0
  128. data/tests/test_segment.rb +83 -0
  129. data/tests/test_whisper.rb +211 -123
  130. data/whispercpp.gemspec +36 -0
  131. metadata +137 -11
  132. data/ext/ggml.c +0 -8616
  133. data/ext/ggml.h +0 -748
  134. data/ext/whisper.cpp +0 -4829
  135. data/ext/whisper.h +0 -402
@@ -0,0 +1,854 @@
1
+ #include "ggml-opt.h"
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-alloc.h"
5
+ #include "ggml-backend.h"
6
+ #include "ggml-impl.h"
7
+
8
+ #include <algorithm>
9
+ #include <cmath>
10
+ #include <cstdint>
11
+ #include <cinttypes>
12
+ #include <map>
13
+ #include <random>
14
+ #include <vector>
15
+
16
+ struct ggml_opt_dataset {
17
+ struct ggml_context * ctx = nullptr;
18
+ ggml_backend_buffer_t buf = nullptr;
19
+ struct ggml_tensor * data = nullptr;
20
+ struct ggml_tensor * labels = nullptr;
21
+
22
+ int64_t ndata = -1;
23
+ int64_t ndata_shard = -1;
24
+ size_t nbs_data = -1;
25
+ size_t nbs_labels = -1;
26
+
27
+ std::vector<int64_t> permutation;
28
+ };
29
+
30
+ struct ggml_opt_context {
31
+ ggml_backend_sched_t backend_sched = nullptr;
32
+ ggml_cgraph * allocated_graph = nullptr;
33
+ ggml_cgraph * allocated_graph_copy = nullptr;
34
+ struct ggml_context * ctx_static = nullptr;
35
+ struct ggml_context * ctx_static_cpu = nullptr;
36
+ struct ggml_context * ctx_compute = nullptr;
37
+ struct ggml_context * ctx_copy = nullptr;
38
+ ggml_backend_buffer_t buf_static = nullptr;
39
+ ggml_backend_buffer_t buf_static_cpu = nullptr;
40
+ std::mt19937 rng;
41
+
42
+ struct ggml_tensor * inputs = nullptr;
43
+ struct ggml_tensor * outputs = nullptr;
44
+ struct ggml_tensor * labels = nullptr;
45
+
46
+ struct ggml_tensor * loss = nullptr;
47
+ struct ggml_tensor * pred = nullptr;
48
+ struct ggml_tensor * ncorrect = nullptr;
49
+
50
+ struct ggml_cgraph * gf = nullptr;
51
+ struct ggml_cgraph * gb_grad = nullptr;
52
+ struct ggml_cgraph * gb_opt = nullptr;
53
+
54
+ int64_t iter = 1;
55
+ int32_t opt_period = 1;
56
+ int32_t opt_i = 0;
57
+ bool loss_per_datapoint = false;
58
+
59
+ ggml_opt_get_optimizer_params get_opt_pars = nullptr;
60
+ void * get_opt_pars_ud = nullptr;
61
+ struct ggml_tensor * adamw_params = nullptr;
62
+ };
63
+
64
+ struct ggml_opt_result {
65
+ int64_t ndata = 0;
66
+ std::vector<float> loss;
67
+ std::vector<int32_t> pred;
68
+ int64_t ncorrect = 0;
69
+
70
+ int64_t opt_period = -1;
71
+ bool loss_per_datapoint = false;
72
+ };
73
+
74
+ // ====== Dataset ======
75
+
76
+ ggml_opt_dataset_t ggml_opt_dataset_init(int64_t ne_datapoint, int64_t ne_label, int64_t ndata, int64_t ndata_shard) {
77
+ GGML_ASSERT(ne_datapoint > 0);
78
+ GGML_ASSERT(ne_label >= 0);
79
+ GGML_ASSERT(ndata > 0);
80
+ GGML_ASSERT(ndata_shard > 0);
81
+
82
+ ggml_opt_dataset_t result = new ggml_opt_dataset;
83
+ result->ndata = ndata;
84
+ result->ndata_shard = ndata_shard;
85
+
86
+ {
87
+ struct ggml_init_params params = {
88
+ /*.mem_size =*/ 2*ggml_tensor_overhead(),
89
+ /*.mem_buffer =*/ nullptr,
90
+ /*.no_alloc =*/ true,
91
+ };
92
+ result->ctx = ggml_init(params);
93
+ }
94
+
95
+ result->data = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_datapoint, ndata);
96
+ result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata;
97
+
98
+ if (ne_label > 0) {
99
+ result->labels = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_label, ndata);
100
+ result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata;
101
+ } else {
102
+ result->labels = nullptr;
103
+ result->nbs_labels = 0;
104
+ }
105
+
106
+ result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type());
107
+
108
+ const int64_t nshards = ndata/ndata_shard;
109
+ result->permutation.resize(nshards);
110
+ for (int64_t i = 0; i < nshards; ++i) {
111
+ result->permutation[i] = i;
112
+ }
113
+ return result;
114
+ }
115
+
116
+ void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) {
117
+ ggml_backend_buffer_free(dataset->buf);
118
+ ggml_free(dataset->ctx);
119
+ delete dataset;
120
+ }
121
+
122
+ struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) {
123
+ return dataset->data;
124
+ }
125
+
126
+ struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) {
127
+ return dataset->labels;
128
+ }
129
+
130
+ void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) {
131
+ GGML_ASSERT(idata <= dataset->ndata);
132
+
133
+ if (idata < 0) {
134
+ std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng);
135
+ return;
136
+ }
137
+
138
+ GGML_ASSERT(idata % dataset->ndata_shard == 0);
139
+ const int64_t ishard_max = idata / dataset->ndata_shard;
140
+ std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng);
141
+ }
142
+
143
+ void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) {
144
+ GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch));
145
+ GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch));
146
+ GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr));
147
+
148
+ const size_t nb_data_batch = ggml_nbytes(data_batch);
149
+ GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0);
150
+ const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data;
151
+
152
+ if (labels_batch) {
153
+ const size_t nb_labels_batch = ggml_nbytes(labels_batch);
154
+ GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels);
155
+ }
156
+
157
+ GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size()));
158
+
159
+ for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) {
160
+ const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch];
161
+
162
+ const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data;
163
+ ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data);
164
+
165
+ if (!labels_batch) {
166
+ continue;
167
+ }
168
+
169
+ const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels;
170
+ ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels);
171
+ }
172
+ }
173
+
174
+ // ====== Model / Context ======
175
+
176
+ struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) {
177
+ GGML_UNUSED(userdata);
178
+
179
+ ggml_opt_optimizer_params result;
180
+
181
+ result.adamw.alpha = 0.001f;
182
+ result.adamw.beta1 = 0.9f;
183
+ result.adamw.beta2 = 0.999f;
184
+ result.adamw.eps = 1e-8f;
185
+ result.adamw.wd = 0.0f;
186
+
187
+ return result;
188
+ }
189
+
190
+ struct ggml_opt_params ggml_opt_default_params(
191
+ ggml_backend_sched_t backend_sched,
192
+ struct ggml_context * ctx_compute,
193
+ struct ggml_tensor * inputs,
194
+ struct ggml_tensor * outputs,
195
+ enum ggml_opt_loss_type loss_type) {
196
+ return {
197
+ /*backend_sched =*/ backend_sched,
198
+ /*ctx_compute =*/ ctx_compute,
199
+ /*inputs =*/ inputs,
200
+ /*logits =*/ outputs,
201
+ /*loss_type =*/ loss_type,
202
+ /*build_type =*/ GGML_OPT_BUILD_TYPE_OPT,
203
+ /*opt_period =*/ 1,
204
+ /*get_opt_pars =*/ ggml_opt_get_default_optimizer_params,
205
+ /*get_opt_pars_ud =*/ nullptr,
206
+ };
207
+ }
208
+
209
+ static ggml_tensor * map_tensor(std::map<ggml_tensor *, ggml_tensor *> & tensor_map, ggml_context * ctx, ggml_tensor * tensor) {
210
+ if (!tensor) {
211
+ return nullptr;
212
+ }
213
+
214
+ if (tensor_map.find(tensor) != tensor_map.end()) {
215
+ return tensor_map[tensor];
216
+ }
217
+
218
+ ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor);
219
+ tensor_map[tensor] = new_tensor;
220
+
221
+ new_tensor->op = tensor->op;
222
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
223
+ new_tensor->nb[i] = tensor->nb[i];
224
+ }
225
+ new_tensor->flags = tensor->flags;
226
+ memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params));
227
+ strcpy(new_tensor->name, tensor->name);
228
+ new_tensor->data = tensor->data;
229
+ new_tensor->buffer = tensor->buffer;
230
+ new_tensor->extra = tensor->extra;
231
+ new_tensor->view_offs = tensor->view_offs;
232
+ new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src);
233
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
234
+ new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]);
235
+ }
236
+
237
+ return new_tensor;
238
+ }
239
+
240
+ static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) {
241
+ std::map<ggml_tensor *, ggml_tensor *> tensor_map;
242
+
243
+ ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true);
244
+
245
+ for (int i = 0; i < src->n_leafs; i++) {
246
+ ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i]));
247
+ }
248
+ GGML_ASSERT(dst->n_leafs == src->n_leafs);
249
+ for (int i = 0; i < src->n_nodes; i++) {
250
+ ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i]));
251
+ }
252
+ GGML_ASSERT(dst->n_nodes == src->n_nodes);
253
+ for (int i = 0; i < src->n_nodes; ++i) {
254
+ const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]);
255
+ const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]);
256
+
257
+ GGML_ASSERT(igrad_src != GGML_HASHSET_FULL);
258
+ GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src));
259
+ GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL);
260
+ GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst));
261
+
262
+ dst->grads[igrad_dst] = src->grads[igrad_src];
263
+ dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src];
264
+ }
265
+
266
+ return dst;
267
+ }
268
+
269
+ static void ggml_opt_alloc_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph) {
270
+ GGML_ASSERT(graph);
271
+ if (opt_ctx->allocated_graph == graph) {
272
+ return;
273
+ }
274
+
275
+ ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph
276
+
277
+ {
278
+ ggml_init_params params = {
279
+ /*.mem_size =*/ ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE,
280
+ /*.mem_buffer =*/ nullptr,
281
+ /*.no_alloc =*/ true,
282
+ };
283
+ ggml_free(opt_ctx->ctx_copy);
284
+ opt_ctx->ctx_copy = ggml_init(params);
285
+ }
286
+
287
+ opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph);
288
+
289
+ ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
290
+ opt_ctx->allocated_graph = graph;
291
+ }
292
+
293
+ ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) {
294
+ ggml_opt_context_t result = new struct ggml_opt_context;
295
+ result->backend_sched = params.backend_sched;
296
+ result->ctx_compute = params.ctx_compute;
297
+ result->inputs = params.inputs;
298
+ result->outputs = params.outputs;
299
+ result->opt_period = params.opt_period;
300
+ result->get_opt_pars = params.get_opt_pars;
301
+ result->get_opt_pars_ud = params.get_opt_pars_ud;
302
+
303
+ GGML_ASSERT(result->inputs->data && "the inputs must be allocated statically");
304
+ GGML_ASSERT(result->opt_period >= 1);
305
+
306
+ const bool accumulate = params.build_type == GGML_OPT_BUILD_TYPE_GRAD ||
307
+ (params.build_type == GGML_OPT_BUILD_TYPE_OPT && result->opt_period > 1);
308
+
309
+ ggml_set_input(result->inputs);
310
+ ggml_set_output(result->outputs);
311
+
312
+ result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass.
313
+ ggml_build_forward_expand(result->gf, result->outputs);
314
+
315
+ int n_param = 0;
316
+ for (int i = 0; i < result->gf->n_nodes; ++i) {
317
+ if (result->gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
318
+ n_param++;
319
+ }
320
+ }
321
+
322
+ {
323
+ // The static context is used for:
324
+ // - gradients (1 tensor per param if using gradient accumulation)
325
+ // - optimizer momenta (2 tensors per param)
326
+ // - labels
327
+ // - loss + its gradient (up to 5 tensors)
328
+ // - pred
329
+ // - ncorrect (2 tensors).
330
+ const size_t tensors_per_param = (accumulate ? 1 : 0) + (params.build_type == GGML_OPT_BUILD_TYPE_OPT ? 2 : 0);
331
+ const size_t size_meta = (tensors_per_param*n_param + 9) * ggml_tensor_overhead();
332
+ struct ggml_init_params params = {
333
+ /*.mem_size =*/ size_meta,
334
+ /*.mem_buffer =*/ nullptr,
335
+ /*.no_alloc =*/ true,
336
+ };
337
+ result->ctx_static = ggml_init(params);
338
+ }
339
+ {
340
+ // The static cpu context is used for:
341
+ // - optimizer parameters (1 for the entire context)
342
+ const size_t size_meta = 1 * ggml_tensor_overhead();
343
+ struct ggml_init_params params = {
344
+ /*.mem_size =*/ size_meta,
345
+ /*.mem_buffer =*/ nullptr,
346
+ /*.no_alloc =*/ true,
347
+ };
348
+ result->ctx_static_cpu = ggml_init(params);
349
+ }
350
+
351
+
352
+ switch (params.loss_type) {
353
+ case GGML_OPT_LOSS_TYPE_MEAN: {
354
+ result->loss = ggml_sum(result->ctx_static, result->outputs);
355
+ ggml_set_name(result->loss, "loss_sum");
356
+ const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs));
357
+ result->loss = ggml_scale(result->ctx_static, result->loss, scale);
358
+ ggml_set_name(result->loss, "loss_mean");
359
+ result->loss_per_datapoint = true;
360
+ break;
361
+ }
362
+ case GGML_OPT_LOSS_TYPE_SUM: {
363
+ result->loss = ggml_sum(result->ctx_static, result->outputs);
364
+ ggml_set_name(result->loss, "loss_sum");
365
+ result->loss_per_datapoint = false;
366
+ break;
367
+ }
368
+ case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: {
369
+ result->labels = ggml_dup_tensor(result->ctx_static, result->outputs);
370
+ ggml_set_input(result->labels);
371
+ ggml_set_name(result->labels, "labels");
372
+ result->loss = ggml_cross_entropy_loss(result->ctx_static, result->outputs, result->labels);
373
+ ggml_set_name(result->loss, "loss_cross_entropy");
374
+ if (result->opt_period > 1) {
375
+ result->loss = ggml_scale(result->ctx_static, result->loss, 1.0f / result->opt_period);
376
+ ggml_set_name(result->loss, "loss_cross_entropy_scaled");
377
+ }
378
+ result->loss_per_datapoint = true;
379
+ break;
380
+ }
381
+ case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: {
382
+ result->labels = ggml_dup_tensor(result->ctx_static, result->outputs);
383
+ ggml_set_input(result->labels);
384
+ ggml_set_name(result->labels, "labels");
385
+ result->loss = ggml_sub(result->ctx_static, result->outputs, result->labels);
386
+ ggml_set_name(result->loss, "loss_error");
387
+ result->loss = ggml_sqr(result->ctx_static, result->loss);
388
+ ggml_set_name(result->loss, "loss_squared_error");
389
+ result->loss = ggml_sum(result->ctx_static, result->loss);
390
+ ggml_set_name(result->loss, "loss_sum_squared_error");
391
+ const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs));
392
+ result->loss = ggml_scale(result->ctx_static, result->loss, scale);
393
+ ggml_set_name(result->loss, "loss_mean_squared_error");
394
+ result->loss_per_datapoint = true;
395
+ break;
396
+ }
397
+ }
398
+ ggml_set_output(result->loss);
399
+ ggml_set_loss(result->loss);
400
+ ggml_build_forward_expand(result->gf, result->loss);
401
+
402
+ result->pred = ggml_argmax(result->ctx_static, result->outputs);
403
+ ggml_set_name(result->pred, "pred");
404
+ ggml_set_output(result->pred);
405
+ ggml_build_forward_expand(result->gf, result->pred);
406
+
407
+ if (result->labels) {
408
+ result->ncorrect = ggml_count_equal(result->ctx_static, result->pred, ggml_argmax(result->ctx_static, result->labels));
409
+ ggml_set_name(result->ncorrect, "ncorrect");
410
+ ggml_set_output(result->ncorrect);
411
+ ggml_build_forward_expand(result->gf, result->ncorrect);
412
+ } else {
413
+ result->ncorrect = nullptr;
414
+ }
415
+
416
+ if (params.build_type == GGML_OPT_BUILD_TYPE_FORWARD) {
417
+ result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
418
+ return result;
419
+ }
420
+
421
+ // gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients.
422
+ result->gb_grad = ggml_graph_dup(result->ctx_compute, result->gf);
423
+ ggml_build_backward_expand(result->ctx_static, result->ctx_compute, result->gb_grad, accumulate);
424
+
425
+ if (params.build_type == GGML_OPT_BUILD_TYPE_GRAD) {
426
+ result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
427
+ ggml_graph_reset(result->gb_grad);
428
+ return result;
429
+ }
430
+
431
+ GGML_ASSERT(params.build_type == GGML_OPT_BUILD_TYPE_OPT);
432
+
433
+ // gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step.
434
+ result->gb_opt = ggml_graph_dup(result->ctx_compute, result->gb_grad);
435
+
436
+ result->adamw_params = ggml_new_tensor_1d(result->ctx_static_cpu, GGML_TYPE_F32, 7);
437
+ ggml_set_input(result->adamw_params);
438
+ ggml_set_name(result->adamw_params, "adamw_params");
439
+
440
+ for (int i = result->gf->n_nodes-1; i >= 0; --i) {
441
+ struct ggml_tensor * node = result->gb_opt->nodes[i];
442
+ struct ggml_tensor * grad = ggml_graph_get_grad(result->gb_opt, node);
443
+
444
+ if (node->flags & GGML_TENSOR_FLAG_PARAM) {
445
+ struct ggml_tensor * m = ggml_dup_tensor(result->ctx_static, node);
446
+ struct ggml_tensor * v = ggml_dup_tensor(result->ctx_static, node);
447
+ struct ggml_tensor * opt_step = ggml_opt_step_adamw(result->ctx_compute, node, grad, m, v, result->adamw_params);
448
+ ggml_build_forward_expand(result->gb_opt, opt_step);
449
+ }
450
+ }
451
+
452
+ result->buf_static = ggml_backend_alloc_ctx_tensors(
453
+ result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0));
454
+
455
+ result->buf_static_cpu = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx_static_cpu, ggml_backend_cpu_buffer_type());
456
+
457
+ ggml_graph_reset(result->gb_opt);
458
+
459
+ return result;
460
+ }
461
+
462
+ void ggml_opt_free(ggml_opt_context_t opt_ctx) {
463
+ if (opt_ctx == nullptr) {
464
+ return;
465
+ }
466
+ ggml_backend_buffer_free(opt_ctx->buf_static);
467
+ ggml_backend_buffer_free(opt_ctx->buf_static_cpu);
468
+ ggml_free(opt_ctx->ctx_static);
469
+ ggml_free(opt_ctx->ctx_static_cpu);
470
+ delete opt_ctx;
471
+ }
472
+
473
+ void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) {
474
+ if (optimizer) {
475
+ ggml_graph_reset(opt_ctx->gb_opt);
476
+ opt_ctx->iter = 1;
477
+ } else {
478
+ ggml_graph_reset(opt_ctx->gb_grad);
479
+ }
480
+ }
481
+
482
+ struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) {
483
+ return opt_ctx->inputs;
484
+ }
485
+
486
+ struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) {
487
+ return opt_ctx->outputs;
488
+ }
489
+
490
+ struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) {
491
+ return opt_ctx->labels;
492
+ }
493
+
494
+ struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) {
495
+ return opt_ctx->loss;
496
+ }
497
+
498
+ struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) {
499
+ return opt_ctx->pred;
500
+ }
501
+
502
+ struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) {
503
+ return opt_ctx->ncorrect;
504
+ }
505
+
506
+ struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) {
507
+ return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node);
508
+ }
509
+
510
+ // ====== Optimization Result ======
511
+
512
+ ggml_opt_result_t ggml_opt_result_init() {
513
+ return new ggml_opt_result;
514
+ }
515
+
516
+ void ggml_opt_result_free(ggml_opt_result_t result) {
517
+ delete result;
518
+ }
519
+
520
+ void ggml_opt_result_reset(ggml_opt_result_t result) {
521
+ result->ndata = 0;
522
+ result->loss.clear();
523
+ result->pred.clear();
524
+ result->ncorrect = 0;
525
+ }
526
+
527
+ void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) {
528
+ *ndata = result->ndata;
529
+ }
530
+
531
+ void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) {
532
+ const int64_t nbatches = result->loss.size(); // Number of physical batches.
533
+
534
+ if (nbatches == 0) {
535
+ *loss = 0.0;
536
+ *unc = NAN;
537
+ return;
538
+ }
539
+
540
+ double sum = 0.0;
541
+ double sum_squared = 0.0;
542
+
543
+ for (const float & loss : result->loss) {
544
+ // If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch.
545
+ const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss;
546
+ sum += loss_scaled;
547
+ sum_squared += loss_scaled*loss_scaled;
548
+ }
549
+
550
+ const double mean = sum/nbatches;
551
+ *loss = result->loss_per_datapoint ? mean : sum;
552
+
553
+ if (!unc) {
554
+ return;
555
+ }
556
+
557
+ if (nbatches < 2) {
558
+ *unc = NAN;
559
+ return;
560
+ }
561
+
562
+ const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1)
563
+ *unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1));
564
+ }
565
+
566
+ void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) {
567
+ for (size_t i = 0; i < result->pred.size(); ++i) {
568
+ pred[i] = result->pred[i];
569
+ }
570
+ }
571
+
572
+ void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) {
573
+ *accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN;
574
+
575
+ if (!unc) {
576
+ return;
577
+ }
578
+
579
+ *unc = result->ncorrect >= 0 && result->ndata >= 2 ?
580
+ sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN;
581
+ }
582
+
583
+ // ====== Computation ======
584
+
585
+ static void ggml_opt_eval_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph, ggml_opt_result * result) {
586
+ if (graph != opt_ctx->gf) {
587
+ struct ggml_opt_optimizer_params opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud);
588
+
589
+ GGML_ASSERT(opt_pars.adamw.alpha > 0.0f);
590
+ GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f);
591
+ GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f);
592
+ GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f);
593
+ GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f);
594
+ GGML_ASSERT(opt_pars.adamw.eps >= 0.0f);
595
+ GGML_ASSERT(opt_pars.adamw.wd >= 0.0f);
596
+ GGML_ASSERT(opt_pars.adamw.wd <= 1.0f);
597
+
598
+ // beta1, beta2 after applying warmup
599
+ const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter));
600
+ const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter));
601
+
602
+ float * adamw_par_data = ggml_get_data_f32(opt_ctx->adamw_params);
603
+ adamw_par_data[0] = opt_pars.adamw.alpha;
604
+ adamw_par_data[1] = opt_pars.adamw.beta1;
605
+ adamw_par_data[2] = opt_pars.adamw.beta2;
606
+ adamw_par_data[3] = opt_pars.adamw.eps;
607
+ adamw_par_data[4] = opt_pars.adamw.wd;
608
+ adamw_par_data[5] = beta1h;
609
+ adamw_par_data[6] = beta2h;
610
+ }
611
+
612
+ ggml_opt_alloc_graph(opt_ctx, graph);
613
+ ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy);
614
+ opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt;
615
+
616
+ if (!result) {
617
+ return;
618
+ }
619
+
620
+ if (result->ndata == 0) {
621
+ result->loss_per_datapoint = opt_ctx->loss_per_datapoint;
622
+ result->opt_period = opt_ctx->opt_period;
623
+ } else {
624
+ GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint);
625
+ GGML_ASSERT(result->opt_period == opt_ctx->opt_period);
626
+ }
627
+
628
+ const int64_t ndata = opt_ctx->outputs->ne[1];
629
+ GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported");
630
+ result->ndata += ndata;
631
+
632
+ GGML_ASSERT(ggml_is_scalar(opt_ctx->loss));
633
+ GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32);
634
+ float loss;
635
+ ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss));
636
+ result->loss.push_back(loss);
637
+
638
+ GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32);
639
+ std::vector<int32_t> pred(ndata);
640
+ ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred));
641
+ result->pred.insert(result->pred.end(), pred.begin(), pred.end());
642
+
643
+ if (!opt_ctx->labels || result->ncorrect < 0) {
644
+ result->ncorrect = -1;
645
+ return;
646
+ }
647
+
648
+ GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect));
649
+ GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64);
650
+ int64_t ncorrect;
651
+ ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect));
652
+ result->ncorrect += ncorrect;
653
+ }
654
+
655
+ void ggml_opt_forward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) {
656
+ ggml_opt_eval_graph(opt_ctx, opt_ctx->gf, result);
657
+ }
658
+
659
+ void ggml_opt_forward_backward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) {
660
+ if (opt_ctx->opt_period == 1) {
661
+ ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result);
662
+ return;
663
+ }
664
+
665
+ const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period;
666
+ if (opt_i_next == 0) {
667
+ ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result);
668
+ ggml_opt_reset(opt_ctx, /*optimizer =*/ false);
669
+ } else {
670
+ ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_grad, result);
671
+ }
672
+ opt_ctx->opt_i = opt_i_next;
673
+ }
674
+
675
+ // ====== High-Level Functions ======
676
+
677
+ void ggml_opt_epoch(
678
+ ggml_opt_context_t opt_ctx,
679
+ ggml_opt_dataset_t dataset,
680
+ ggml_opt_result_t result_train,
681
+ ggml_opt_result_t result_eval,
682
+ int64_t idata_split,
683
+ ggml_opt_epoch_callback callback_train,
684
+ ggml_opt_epoch_callback callback_eval) {
685
+ struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx);
686
+ struct ggml_tensor * labels = ggml_opt_labels(opt_ctx);
687
+ struct ggml_tensor * data = ggml_opt_dataset_data(dataset);
688
+ GGML_ASSERT(data->ne[0] == inputs->ne[0]);
689
+
690
+ const int64_t ndata = data->ne[1];
691
+ const int64_t ndata_batch = inputs->ne[1];
692
+
693
+ GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0);
694
+ const int64_t nbatches = ndata/ndata_batch;
695
+
696
+ idata_split = idata_split < 0 ? ndata : idata_split;
697
+ GGML_ASSERT(idata_split % ndata_batch == 0);
698
+ const int64_t ibatch_split = idata_split / ndata_batch;
699
+
700
+ int64_t ibatch = 0;
701
+ int64_t t_loop_start = ggml_time_us();
702
+ for (; ibatch < ibatch_split; ++ibatch) {
703
+ ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
704
+ ggml_opt_forward_backward(opt_ctx, result_train);
705
+ if (callback_train) {
706
+ callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start);
707
+ }
708
+ }
709
+ t_loop_start = ggml_time_us();
710
+ for (; ibatch < nbatches; ++ibatch) {
711
+ ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch);
712
+ ggml_opt_forward(opt_ctx, result_eval);
713
+ if (callback_eval) {
714
+ callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start);
715
+ }
716
+ }
717
+ }
718
+
719
+ void ggml_opt_epoch_callback_progress_bar(
720
+ bool train,
721
+ ggml_opt_context_t opt_ctx,
722
+ ggml_opt_dataset_t dataset,
723
+ ggml_opt_result_t result,
724
+ int64_t ibatch,
725
+ int64_t ibatch_max,
726
+ int64_t t_start_us) {
727
+ fprintf(stderr, "%s[", train ? "train: " : "val: ");
728
+
729
+ constexpr int64_t bar_length = 25;
730
+ for (int64_t j = 0; j < bar_length; ++j) {
731
+ const int64_t ibatch_j = ibatch_max * j/bar_length;
732
+ if (ibatch_j < ibatch) {
733
+ fprintf(stderr, "=");
734
+ } else if (ibatch_max * (j - 1)/bar_length < ibatch) {
735
+ fprintf(stderr, ">");
736
+ } else {
737
+ fprintf(stderr, " ");
738
+ }
739
+ }
740
+
741
+ const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1];
742
+ const int64_t idata = ibatch*batch_size;
743
+ const int64_t idata_max = ibatch_max*batch_size;
744
+
745
+ double loss;
746
+ double loss_unc;
747
+ ggml_opt_result_loss(result, &loss, &loss_unc);
748
+
749
+ double accuracy;
750
+ double accuracy_unc;
751
+ ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc);
752
+
753
+ const int64_t t_ibatch_us = ggml_time_us() - t_start_us;
754
+ int64_t t_ibatch_s = t_ibatch_us / 1000000;
755
+ const int64_t t_ibatch_h = t_ibatch_s / 3600;
756
+ t_ibatch_s -= t_ibatch_h * 3600;
757
+ const int64_t t_ibatch_m = t_ibatch_s / 60;
758
+ t_ibatch_s -= t_ibatch_m * 60;
759
+
760
+ const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch;
761
+ int64_t t_eta_s = t_eta_us / 1000000;
762
+ const int64_t t_eta_h = t_eta_s / 3600;
763
+ t_eta_s -= t_eta_h * 3600;
764
+ const int64_t t_eta_m = t_eta_s / 60;
765
+ t_eta_s -= t_eta_m * 60;
766
+
767
+ fprintf(stderr, "| data=%06" PRId64 "/%06" PRId64 ", loss=%.6lf+-%.6lf, accuracy=%.2lf+-%.2lf%%, "
768
+ "t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 ", ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 "]\r",
769
+ idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc,
770
+ t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s);
771
+ if (ibatch == ibatch_max) {
772
+ fprintf(stderr, "\n");
773
+ }
774
+ fflush(stderr);
775
+
776
+ GGML_UNUSED(dataset);
777
+ }
778
+
779
+ void ggml_opt_fit(
780
+ ggml_backend_sched_t backend_sched,
781
+ ggml_context * ctx_compute,
782
+ ggml_tensor * inputs,
783
+ ggml_tensor * outputs,
784
+ ggml_opt_dataset_t dataset,
785
+ enum ggml_opt_loss_type loss_type,
786
+ ggml_opt_get_optimizer_params get_opt_pars,
787
+ int64_t nepoch,
788
+ int64_t nbatch_logical,
789
+ float val_split,
790
+ bool silent) {
791
+ ggml_time_init();
792
+ const int64_t t_start_us = ggml_time_us();
793
+
794
+ const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1];
795
+ const int64_t nbatch_physical = inputs->ne[1];
796
+ GGML_ASSERT(ndata % nbatch_logical == 0);
797
+ GGML_ASSERT(nbatch_logical % nbatch_physical == 0);
798
+
799
+ const int64_t opt_period = nbatch_logical / nbatch_physical;
800
+ const int64_t nbatches_logical = ndata / nbatch_logical;
801
+
802
+ GGML_ASSERT(val_split >= 0.0f);
803
+ GGML_ASSERT(val_split < 1.0f);
804
+ const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical)
805
+ const int64_t idata_split = ibatch_split * nbatch_physical;
806
+
807
+ int64_t epoch = 1;
808
+
809
+ ggml_opt_params params = ggml_opt_default_params(backend_sched, ctx_compute, inputs, outputs, loss_type);
810
+ params.opt_period = opt_period;
811
+ params.get_opt_pars = get_opt_pars;
812
+ params.get_opt_pars_ud = &epoch;
813
+ ggml_opt_context_t opt_ctx = ggml_opt_init(params);
814
+
815
+ // Shuffling the data is generally useful but there is only a point if not all data is used in a single batch.
816
+ if (nbatch_logical < ndata) {
817
+ ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation).
818
+ }
819
+
820
+ ggml_opt_result_t result_train = ggml_opt_result_init();
821
+ ggml_opt_result_t result_val = ggml_opt_result_init();
822
+
823
+ ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar;
824
+
825
+ for (; epoch <= nepoch; ++epoch) {
826
+ if (nbatch_logical < idata_split) {
827
+ ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split);
828
+ }
829
+
830
+ ggml_opt_result_reset(result_train);
831
+ ggml_opt_result_reset(result_val);
832
+
833
+ if (!silent) {
834
+ fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch);
835
+ }
836
+ ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback);
837
+ if (!silent) {
838
+ fprintf(stderr, "\n");
839
+ }
840
+ }
841
+
842
+ if (!silent) {
843
+ int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000;
844
+ const int64_t t_total_h = t_total_s / 3600;
845
+ t_total_s -= t_total_h * 3600;
846
+ const int64_t t_total_m = t_total_s / 60;
847
+ t_total_s -= t_total_m * 60;
848
+ fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s);
849
+ }
850
+
851
+ ggml_opt_free(opt_ctx);
852
+ ggml_opt_result_free(result_train);
853
+ ggml_opt_result_free(result_val);
854
+ }