whispercpp 1.2.0.2 → 1.3.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (135) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +5 -0
  3. data/LICENSE +1 -1
  4. data/README.md +165 -434
  5. data/Rakefile +46 -86
  6. data/ext/.gitignore +13 -0
  7. data/ext/cpu.mk +9 -0
  8. data/ext/{dr_wav.h → examples/dr_wav.h} +3560 -1179
  9. data/ext/extconf.rb +185 -7
  10. data/ext/ggml/include/ggml-alloc.h +76 -0
  11. data/ext/ggml/include/ggml-backend.h +352 -0
  12. data/ext/ggml/include/ggml-blas.h +25 -0
  13. data/ext/ggml/include/ggml-cann.h +123 -0
  14. data/ext/ggml/include/ggml-cpp.h +38 -0
  15. data/ext/ggml/include/ggml-cpu.h +135 -0
  16. data/ext/ggml/include/ggml-cuda.h +47 -0
  17. data/ext/ggml/include/ggml-kompute.h +50 -0
  18. data/ext/ggml/include/ggml-metal.h +66 -0
  19. data/ext/ggml/include/ggml-opencl.h +26 -0
  20. data/ext/ggml/include/ggml-opt.h +216 -0
  21. data/ext/ggml/include/ggml-rpc.h +28 -0
  22. data/ext/ggml/include/ggml-sycl.h +49 -0
  23. data/ext/ggml/include/ggml-vulkan.h +31 -0
  24. data/ext/ggml/include/ggml.h +2285 -0
  25. data/ext/ggml/src/ggml-alloc.c +1037 -0
  26. data/ext/ggml/src/ggml-amx/common.h +94 -0
  27. data/ext/ggml/src/ggml-amx/ggml-amx.cpp +446 -0
  28. data/ext/ggml/src/ggml-amx/mmq.cpp +2510 -0
  29. data/ext/ggml/src/ggml-amx/mmq.h +17 -0
  30. data/ext/ggml/src/ggml-backend-impl.h +256 -0
  31. data/ext/ggml/src/ggml-backend-reg.cpp +552 -0
  32. data/ext/ggml/src/ggml-backend.cpp +1999 -0
  33. data/ext/ggml/src/ggml-blas/ggml-blas.cpp +517 -0
  34. data/ext/ggml/src/ggml-cann/acl_tensor.cpp +175 -0
  35. data/ext/ggml/src/ggml-cann/acl_tensor.h +258 -0
  36. data/ext/ggml/src/ggml-cann/aclnn_ops.cpp +3427 -0
  37. data/ext/ggml/src/ggml-cann/aclnn_ops.h +592 -0
  38. data/ext/ggml/src/ggml-cann/common.h +286 -0
  39. data/ext/ggml/src/ggml-cann/ggml-cann.cpp +2188 -0
  40. data/ext/ggml/src/ggml-cann/kernels/ascendc_kernels.h +19 -0
  41. data/ext/ggml/src/ggml-cann/kernels/dup.cpp +236 -0
  42. data/ext/ggml/src/ggml-cann/kernels/get_row_f16.cpp +197 -0
  43. data/ext/ggml/src/ggml-cann/kernels/get_row_f32.cpp +190 -0
  44. data/ext/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +204 -0
  45. data/ext/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +191 -0
  46. data/ext/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +218 -0
  47. data/ext/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +216 -0
  48. data/ext/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +295 -0
  49. data/ext/ggml/src/ggml-common.h +1853 -0
  50. data/ext/ggml/src/ggml-cpu/amx/amx.cpp +220 -0
  51. data/ext/ggml/src/ggml-cpu/amx/amx.h +8 -0
  52. data/ext/ggml/src/ggml-cpu/amx/common.h +91 -0
  53. data/ext/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
  54. data/ext/ggml/src/ggml-cpu/amx/mmq.h +10 -0
  55. data/ext/ggml/src/ggml-cpu/cpu-feats-x86.cpp +323 -0
  56. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +4262 -0
  57. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
  58. data/ext/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
  59. data/ext/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
  60. data/ext/ggml/src/ggml-cpu/ggml-cpu-impl.h +386 -0
  61. data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.c +10835 -0
  62. data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
  63. data/ext/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
  64. data/ext/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
  65. data/ext/ggml/src/ggml-cpu/ggml-cpu.c +14123 -0
  66. data/ext/ggml/src/ggml-cpu/ggml-cpu.cpp +622 -0
  67. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1884 -0
  68. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
  69. data/ext/ggml/src/ggml-cuda/vendors/cuda.h +14 -0
  70. data/ext/ggml/src/ggml-cuda/vendors/hip.h +186 -0
  71. data/ext/ggml/src/ggml-cuda/vendors/musa.h +134 -0
  72. data/ext/ggml/src/ggml-impl.h +556 -0
  73. data/ext/ggml/src/ggml-kompute/ggml-kompute.cpp +2251 -0
  74. data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +288 -0
  75. data/ext/ggml/src/ggml-metal/ggml-metal.m +4884 -0
  76. data/ext/ggml/src/ggml-metal/ggml-metal.metal +6732 -0
  77. data/ext/ggml/src/ggml-opt.cpp +854 -0
  78. data/ext/ggml/src/ggml-quants.c +5238 -0
  79. data/ext/ggml/src/ggml-quants.h +100 -0
  80. data/ext/ggml/src/ggml-rpc/ggml-rpc.cpp +1406 -0
  81. data/ext/ggml/src/ggml-sycl/common.cpp +95 -0
  82. data/ext/ggml/src/ggml-sycl/concat.cpp +196 -0
  83. data/ext/ggml/src/ggml-sycl/conv.cpp +99 -0
  84. data/ext/ggml/src/ggml-sycl/convert.cpp +547 -0
  85. data/ext/ggml/src/ggml-sycl/dmmv.cpp +1023 -0
  86. data/ext/ggml/src/ggml-sycl/element_wise.cpp +1030 -0
  87. data/ext/ggml/src/ggml-sycl/ggml-sycl.cpp +4729 -0
  88. data/ext/ggml/src/ggml-sycl/im2col.cpp +126 -0
  89. data/ext/ggml/src/ggml-sycl/mmq.cpp +3031 -0
  90. data/ext/ggml/src/ggml-sycl/mmvq.cpp +1015 -0
  91. data/ext/ggml/src/ggml-sycl/norm.cpp +378 -0
  92. data/ext/ggml/src/ggml-sycl/outprod.cpp +56 -0
  93. data/ext/ggml/src/ggml-sycl/rope.cpp +276 -0
  94. data/ext/ggml/src/ggml-sycl/softmax.cpp +251 -0
  95. data/ext/ggml/src/ggml-sycl/tsembd.cpp +72 -0
  96. data/ext/ggml/src/ggml-sycl/wkv6.cpp +141 -0
  97. data/ext/ggml/src/ggml-threading.cpp +12 -0
  98. data/ext/ggml/src/ggml-threading.h +14 -0
  99. data/ext/ggml/src/ggml-vulkan/ggml-vulkan.cpp +8657 -0
  100. data/ext/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +593 -0
  101. data/ext/ggml/src/ggml.c +7694 -0
  102. data/ext/include/whisper.h +672 -0
  103. data/ext/metal-embed.mk +17 -0
  104. data/ext/metal.mk +6 -0
  105. data/ext/ruby_whisper.cpp +1608 -159
  106. data/ext/ruby_whisper.h +10 -0
  107. data/ext/scripts/get-flags.mk +38 -0
  108. data/ext/src/coreml/whisper-decoder-impl.h +146 -0
  109. data/ext/src/coreml/whisper-decoder-impl.m +201 -0
  110. data/ext/src/coreml/whisper-encoder-impl.h +142 -0
  111. data/ext/src/coreml/whisper-encoder-impl.m +197 -0
  112. data/ext/src/coreml/whisper-encoder.h +26 -0
  113. data/ext/src/openvino/whisper-openvino-encoder.cpp +108 -0
  114. data/ext/src/openvino/whisper-openvino-encoder.h +31 -0
  115. data/ext/src/whisper.cpp +7393 -0
  116. data/extsources.rb +6 -0
  117. data/lib/whisper/model/uri.rb +157 -0
  118. data/lib/whisper.rb +2 -0
  119. data/tests/helper.rb +7 -0
  120. data/tests/jfk_reader/.gitignore +5 -0
  121. data/tests/jfk_reader/extconf.rb +3 -0
  122. data/tests/jfk_reader/jfk_reader.c +68 -0
  123. data/tests/test_callback.rb +160 -0
  124. data/tests/test_error.rb +20 -0
  125. data/tests/test_model.rb +71 -0
  126. data/tests/test_package.rb +31 -0
  127. data/tests/test_params.rb +160 -0
  128. data/tests/test_segment.rb +83 -0
  129. data/tests/test_whisper.rb +211 -123
  130. data/whispercpp.gemspec +36 -0
  131. metadata +137 -11
  132. data/ext/ggml.c +0 -8616
  133. data/ext/ggml.h +0 -748
  134. data/ext/whisper.cpp +0 -4829
  135. data/ext/whisper.h +0 -402
@@ -0,0 +1,220 @@
1
+ #include "amx.h"
2
+ #include "common.h"
3
+ #include "mmq.h"
4
+ #include "ggml-backend-impl.h"
5
+ #include "ggml-backend.h"
6
+ #include "ggml-impl.h"
7
+ #include "ggml-cpu.h"
8
+ #include "ggml-cpu-traits.h"
9
+
10
+ #if defined(__gnu_linux__)
11
+ #include <sys/syscall.h>
12
+ #include <unistd.h>
13
+ #endif
14
+
15
+ #include <cstdlib>
16
+ #include <cstring>
17
+ #include <memory>
18
+
19
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
20
+
21
+ // AMX type_trais
22
+ namespace ggml::cpu::amx {
23
+ class tensor_traits : public ggml::cpu::tensor_traits {
24
+ bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override {
25
+ size = ggml_backend_amx_desired_wsize(op);
26
+ return true;
27
+ }
28
+
29
+ bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override {
30
+ if (op->op == GGML_OP_MUL_MAT) {
31
+ ggml_backend_amx_mul_mat(params, op);
32
+ return true;
33
+ }
34
+ return false;
35
+ }
36
+ };
37
+
38
+ static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) {
39
+ static tensor_traits traits;
40
+ return &traits;
41
+ }
42
+ } // namespace ggml::cpu::amx
43
+
44
+ // AMX buffer interface
45
+ static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
46
+ free(buffer->context);
47
+ }
48
+
49
+ static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
50
+ return (void *) (buffer->context);
51
+ }
52
+
53
+ static void ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
54
+ tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor);
55
+
56
+ GGML_UNUSED(buffer);
57
+ }
58
+
59
+ static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
60
+ uint8_t value, size_t offset, size_t size) {
61
+ memset((char *) tensor->data + offset, value, size);
62
+
63
+ GGML_UNUSED(buffer);
64
+ }
65
+
66
+ static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
67
+ const void * data, size_t offset, size_t size) {
68
+ if (qtype_has_amx_kernels(tensor->type)) {
69
+ GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type));
70
+ ggml_backend_amx_convert_weight(tensor, data, offset, size);
71
+ } else {
72
+ memcpy((char *) tensor->data + offset, data, size);
73
+ }
74
+
75
+ GGML_UNUSED(buffer);
76
+ }
77
+
78
+ /*
79
+ // need to figure what we need to do with buffer->extra.
80
+ static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
81
+ GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
82
+ memcpy(data, (const char *)tensor->data + offset, size);
83
+
84
+ GGML_UNUSED(buffer);
85
+ }
86
+
87
+ static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
88
+ if (ggml_backend_buffer_is_host(src->buffer)) {
89
+ if (qtype_has_amx_kernels(src->type)) {
90
+ ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst));
91
+ } else {
92
+ memcpy(dst->data, src->data, ggml_nbytes(src));
93
+ }
94
+ return true;
95
+ }
96
+ return false;
97
+
98
+ GGML_UNUSED(buffer);
99
+ }
100
+ */
101
+
102
+ static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
103
+ memset(buffer->context, value, buffer->size);
104
+ }
105
+
106
+ static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
107
+ /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
108
+ /* .get_base = */ ggml_backend_amx_buffer_get_base,
109
+ /* .init_tensor = */ ggml_backend_amx_buffer_init_tensor,
110
+ /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
111
+ /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
112
+ /* .get_tensor = */ nullptr,
113
+ /* .cpy_tensor = */ nullptr,
114
+ /* .clear = */ ggml_backend_amx_buffer_clear,
115
+ /* .reset = */ nullptr,
116
+ };
117
+
118
+ static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
119
+ return "AMX";
120
+
121
+ GGML_UNUSED(buft);
122
+ }
123
+
124
+ static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
125
+ void * data = ggml_aligned_malloc(size);
126
+ if (data == NULL) {
127
+ fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
128
+ return NULL;
129
+ }
130
+
131
+ return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size);
132
+ }
133
+
134
+ static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
135
+ return TENSOR_ALIGNMENT;
136
+
137
+ GGML_UNUSED(buft);
138
+ }
139
+
140
+ namespace ggml::cpu::amx {
141
+ class extra_buffer_type : ggml::cpu::extra_buffer_type {
142
+ bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
143
+ // handle only 2d gemm for now
144
+ auto is_contiguous_2d = [](const struct ggml_tensor * t) {
145
+ return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
146
+ };
147
+
148
+ if (op->op == GGML_OP_MUL_MAT && is_contiguous_2d(op->src[0]) && // src0 must be contiguous
149
+ is_contiguous_2d(op->src[1]) && // src1 must be contiguous
150
+ op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_amx_buffer_type() &&
151
+ op->ne[0] % (TILE_N * 2) == 0 && // out_features is 32x
152
+ (qtype_has_amx_kernels(op->src[0]->type) || (op->src[0]->type == GGML_TYPE_F16))) {
153
+ // src1 must be host buffer
154
+ if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
155
+ return false;
156
+ }
157
+ // src1 must be float32
158
+ if (op->src[1]->type == GGML_TYPE_F32) {
159
+ return true;
160
+ }
161
+ }
162
+ return false;
163
+ }
164
+
165
+ ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override {
166
+ if (op->op == GGML_OP_MUL_MAT && op->src[0]->buffer &&
167
+ op->src[0]->buffer->buft == ggml_backend_amx_buffer_type()) {
168
+ return (ggml::cpu::tensor_traits *) op->src[0]->extra;
169
+ }
170
+
171
+ return nullptr;
172
+ }
173
+ };
174
+ } // namespace ggml::cpu::amx
175
+
176
+ static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
177
+ return ggml_backend_amx_get_alloc_size(tensor);
178
+
179
+ GGML_UNUSED(buft);
180
+ }
181
+
182
+ #define ARCH_GET_XCOMP_PERM 0x1022
183
+ #define ARCH_REQ_XCOMP_PERM 0x1023
184
+ #define XFEATURE_XTILECFG 17
185
+ #define XFEATURE_XTILEDATA 18
186
+
187
+ static bool ggml_amx_init() {
188
+ #if defined(__gnu_linux__)
189
+ if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
190
+ fprintf(stderr, "AMX is not ready to be used!\n");
191
+ return false;
192
+ }
193
+ return true;
194
+ #elif defined(_WIN32)
195
+ return true;
196
+ #endif
197
+ }
198
+
199
+ ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
200
+ static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
201
+ /* .iface = */ {
202
+ /* .get_name = */ ggml_backend_amx_buffer_type_get_name,
203
+ /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
204
+ /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
205
+ /* .get_max_size = */ nullptr, // defaults to SIZE_MAX
206
+ /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
207
+ /* .is_host = */ nullptr,
208
+ },
209
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
210
+ /* .context = */ new ggml::cpu::amx::extra_buffer_type(),
211
+ };
212
+
213
+ if (!ggml_amx_init()) {
214
+ return nullptr;
215
+ }
216
+
217
+ return &ggml_backend_buffer_type_amx;
218
+ }
219
+
220
+ #endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
@@ -0,0 +1,8 @@
1
+ #include "ggml-backend.h"
2
+ #include "ggml-cpu-impl.h"
3
+
4
+ // GGML internal header
5
+
6
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
7
+ ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
8
+ #endif
@@ -0,0 +1,91 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-cpu-impl.h"
5
+
6
+ #include <algorithm>
7
+ #include <memory>
8
+ #include <type_traits>
9
+
10
+ #if defined(GGML_USE_OPENMP)
11
+ #include <omp.h>
12
+ #endif
13
+
14
+ #define TILE_M 16
15
+ #define TILE_N 16
16
+ #define TILE_K 32
17
+ #define VNNI_BLK 4
18
+
19
+ #define AMX_BLK_SIZE 32
20
+
21
+ #define TMM0 0
22
+ #define TMM1 1
23
+ #define TMM2 2
24
+ #define TMM3 3
25
+ #define TMM4 4
26
+ #define TMM5 5
27
+ #define TMM6 6
28
+ #define TMM7 7
29
+
30
+ // parallel routines
31
+ template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
32
+ inline T div_up(T x, T y) { return (x + y - 1) / y; }
33
+
34
+ template <typename T>
35
+ inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
36
+ #if 0
37
+ // onednn partition pattern
38
+ T& n_my = n_end;
39
+ if (nth <= 1 || n == 0) {
40
+ n_start = 0;
41
+ n_my = n;
42
+ } else {
43
+ T n1 = div_up(n, nth);
44
+ T n2 = n1 - 1;
45
+ T T1 = n - n2 * nth;
46
+ n_my = ith < T1 ? n1 : n2;
47
+ n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
48
+ }
49
+ n_end += n_start;
50
+ #else
51
+ // pytorch aten partition pattern
52
+ T n_my = div_up(n, nth);
53
+ n_start = ith * n_my;
54
+ n_end = std::min(n_start + n_my, n);
55
+ #endif
56
+ }
57
+
58
+ template <typename func_t>
59
+ inline void parallel_for(int n, const func_t& f) {
60
+ #if defined(GGML_USE_OPENMP)
61
+ #pragma omp parallel
62
+ {
63
+ int nth = omp_get_num_threads();
64
+ int ith = omp_get_thread_num();
65
+ int tbegin, tend;
66
+ balance211(n, nth, ith, tbegin, tend);
67
+ f(tbegin, tend);
68
+ }
69
+ #else
70
+ f(0, n);
71
+ #endif
72
+ }
73
+
74
+ template <typename func_t>
75
+ inline void parallel_for_ggml(const ggml_compute_params * params, int n, const func_t & f) {
76
+ int tbegin, tend;
77
+ balance211(n, params->nth, params->ith, tbegin, tend);
78
+ f(tbegin, tend);
79
+ }
80
+
81
+ // quantized types that have AMX support
82
+ inline bool qtype_has_amx_kernels(const enum ggml_type type) {
83
+ // TODO: fix padding for vnni format
84
+ return (type == GGML_TYPE_Q4_0) ||
85
+ (type == GGML_TYPE_Q4_1) ||
86
+ (type == GGML_TYPE_Q8_0) ||
87
+ (type == GGML_TYPE_Q4_K) ||
88
+ (type == GGML_TYPE_Q5_K) ||
89
+ (type == GGML_TYPE_Q6_K) ||
90
+ (type == GGML_TYPE_IQ4_XS);
91
+ }