gpt_neox_client 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +84 -0
- data/LICENSE.txt +21 -0
- data/README.md +68 -0
- data/ext/gpt_neox_client/extconf.rb +25 -0
- data/ext/gpt_neox_client/gpt_neox_client.cpp +316 -0
- data/ext/gpt_neox_client/gpt_neox_client.h +10 -0
- data/ext/gpt_neox_client/src/LICENSE +21 -0
- data/ext/gpt_neox_client/src/common-ggml.cpp +246 -0
- data/ext/gpt_neox_client/src/common-ggml.h +18 -0
- data/ext/gpt_neox_client/src/common.cpp +809 -0
- data/ext/gpt_neox_client/src/common.h +176 -0
- data/ext/gpt_neox_client/src/dr_wav.h +6434 -0
- data/ext/gpt_neox_client/src/ggml/ggml-alloc.c +594 -0
- data/ext/gpt_neox_client/src/ggml/ggml-alloc.h +26 -0
- data/ext/gpt_neox_client/src/ggml/ggml-cuda.cu +6756 -0
- data/ext/gpt_neox_client/src/ggml/ggml-cuda.h +46 -0
- data/ext/gpt_neox_client/src/ggml/ggml-metal.h +85 -0
- data/ext/gpt_neox_client/src/ggml/ggml-metal.m +1195 -0
- data/ext/gpt_neox_client/src/ggml/ggml-metal.metal +2049 -0
- data/ext/gpt_neox_client/src/ggml/ggml-opencl.cpp +1865 -0
- data/ext/gpt_neox_client/src/ggml/ggml-opencl.h +25 -0
- data/ext/gpt_neox_client/src/ggml/ggml.c +20632 -0
- data/ext/gpt_neox_client/src/ggml/ggml.h +1997 -0
- data/ext/gpt_neox_client/src/main.cpp +814 -0
- data/lib/gpt_neox_client/version.rb +7 -0
- data/lib/gpt_neox_client.rb +4 -0
- metadata +75 -0
@@ -0,0 +1,1865 @@
|
|
1
|
+
#include "ggml-opencl.h"
|
2
|
+
|
3
|
+
#include <array>
|
4
|
+
#include <atomic>
|
5
|
+
#include <sstream>
|
6
|
+
#include <vector>
|
7
|
+
#include <limits>
|
8
|
+
|
9
|
+
#define CL_TARGET_OPENCL_VERSION 110
|
10
|
+
#include <clblast.h>
|
11
|
+
|
12
|
+
#include <stdlib.h>
|
13
|
+
#include <stdio.h>
|
14
|
+
#include <string.h>
|
15
|
+
|
16
|
+
#include "ggml.h"
|
17
|
+
|
18
|
+
#if defined(_MSC_VER)
|
19
|
+
#pragma warning(disable: 4244 4267) // possible loss of data
|
20
|
+
#endif
|
21
|
+
|
22
|
+
#define CL_DMMV_BLOCK_SIZE 32
|
23
|
+
|
24
|
+
#ifndef K_QUANTS_PER_ITERATION
|
25
|
+
#define K_QUANTS_PER_ITERATION 1
|
26
|
+
#else
|
27
|
+
static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
|
28
|
+
#endif
|
29
|
+
|
30
|
+
#define MULTILINE_QUOTE(...) #__VA_ARGS__
|
31
|
+
static std::string program_source = MULTILINE_QUOTE(
|
32
|
+
|
33
|
+
typedef char int8_t;
|
34
|
+
typedef uchar uint8_t;
|
35
|
+
typedef short int16_t;
|
36
|
+
typedef ushort uint16_t;
|
37
|
+
typedef int int32_t;
|
38
|
+
typedef uint uint32_t;
|
39
|
+
|
40
|
+
struct __attribute__ ((packed)) block_q4_0
|
41
|
+
{
|
42
|
+
half d;
|
43
|
+
uint8_t qs[QK4_0 / 2];
|
44
|
+
};
|
45
|
+
|
46
|
+
struct __attribute__ ((packed)) block_q4_1
|
47
|
+
{
|
48
|
+
half d;
|
49
|
+
half m;
|
50
|
+
uint8_t qs[QK4_1 / 2];
|
51
|
+
};
|
52
|
+
|
53
|
+
struct __attribute__ ((packed)) block_q5_0
|
54
|
+
{
|
55
|
+
half d;
|
56
|
+
uint32_t qh;
|
57
|
+
uint8_t qs[QK5_0 / 2];
|
58
|
+
};
|
59
|
+
|
60
|
+
struct __attribute__ ((packed)) block_q5_1
|
61
|
+
{
|
62
|
+
half d;
|
63
|
+
half m;
|
64
|
+
uint32_t qh;
|
65
|
+
uint8_t qs[QK5_1 / 2];
|
66
|
+
};
|
67
|
+
|
68
|
+
struct __attribute__ ((packed)) block_q8_0
|
69
|
+
{
|
70
|
+
half d;
|
71
|
+
int8_t qs[QK8_0];
|
72
|
+
};
|
73
|
+
|
74
|
+
struct __attribute__((packed)) block_q2_K
|
75
|
+
{
|
76
|
+
uint8_t scales[16];
|
77
|
+
uint8_t qs[64];
|
78
|
+
half d;
|
79
|
+
half dmin;
|
80
|
+
};
|
81
|
+
|
82
|
+
struct __attribute__((packed)) block_q3_K
|
83
|
+
{
|
84
|
+
uint8_t hmask[32];
|
85
|
+
uint8_t qs[64];
|
86
|
+
uint8_t scales[12];
|
87
|
+
half d;
|
88
|
+
};
|
89
|
+
|
90
|
+
struct __attribute__((packed)) block_q4_K
|
91
|
+
{
|
92
|
+
half d;
|
93
|
+
half dmin;
|
94
|
+
uint8_t scales[12];
|
95
|
+
uint8_t qs[128];
|
96
|
+
};
|
97
|
+
|
98
|
+
struct __attribute__((packed)) block_q5_K
|
99
|
+
{
|
100
|
+
half d;
|
101
|
+
half dmin;
|
102
|
+
uint8_t scales[12];
|
103
|
+
uint8_t qh[32];
|
104
|
+
uint8_t qs[128];
|
105
|
+
};
|
106
|
+
|
107
|
+
struct __attribute__((packed)) block_q6_K
|
108
|
+
{
|
109
|
+
uint8_t ql[128];
|
110
|
+
uint8_t qh[64];
|
111
|
+
int8_t scales[16];
|
112
|
+
half d;
|
113
|
+
};
|
114
|
+
|
115
|
+
__kernel void convert_fp16_to_fp32(__global half* x, __global float* y) {
|
116
|
+
const uint i = get_global_id(0);
|
117
|
+
|
118
|
+
y[i] = vload_half(0, &x[i]);
|
119
|
+
}
|
120
|
+
|
121
|
+
void dequantize_q4_0(__global const struct block_q4_0* x, const int ib, const int iqs, float* v0, float* v1) {
|
122
|
+
const float d = vload_half(0, &x[ib].d);
|
123
|
+
|
124
|
+
const uint8_t vui = x[ib].qs[iqs];
|
125
|
+
|
126
|
+
const int8_t vi0 = vui & 0xF;
|
127
|
+
const int8_t vi1 = vui >> 4;
|
128
|
+
|
129
|
+
*v0 = (vi0 - 8)*d;
|
130
|
+
*v1 = (vi1 - 8)*d;
|
131
|
+
}
|
132
|
+
void dequantize_q4_1(__global const struct block_q4_1* x, const int ib, const int iqs, float* v0, float* v1) {
|
133
|
+
const float d = vload_half(0, &x[ib].d);
|
134
|
+
const float m = vload_half(0, &x[ib].m);
|
135
|
+
|
136
|
+
const uint8_t vui = x[ib].qs[iqs];
|
137
|
+
|
138
|
+
const int8_t vi0 = vui & 0xF;
|
139
|
+
const int8_t vi1 = vui >> 4;
|
140
|
+
|
141
|
+
*v0 = vi0*d + m;
|
142
|
+
*v1 = vi1*d + m;
|
143
|
+
}
|
144
|
+
void dequantize_q5_0(__global const struct block_q5_0* x, const int ib, const int iqs, float* v0, float* v1) {
|
145
|
+
const float d = vload_half(0, &x[ib].d);
|
146
|
+
|
147
|
+
uint32_t qh = x[ib].qh;
|
148
|
+
|
149
|
+
const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
|
150
|
+
const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
|
151
|
+
|
152
|
+
const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0) - 16;
|
153
|
+
const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1) - 16;
|
154
|
+
|
155
|
+
*v0 = x0*d;
|
156
|
+
*v1 = x1*d;
|
157
|
+
}
|
158
|
+
void dequantize_q5_1(__global const struct block_q5_1* x, const int ib, const int iqs, float* v0, float* v1) {
|
159
|
+
const float d = vload_half(0, &x[ib].d);
|
160
|
+
const float m = vload_half(0, &x[ib].m);
|
161
|
+
|
162
|
+
uint32_t qh = x[ib].qh;
|
163
|
+
|
164
|
+
const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
|
165
|
+
const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
|
166
|
+
|
167
|
+
const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0);
|
168
|
+
const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1);
|
169
|
+
|
170
|
+
*v0 = x0*d + m;
|
171
|
+
*v1 = x1*d + m;
|
172
|
+
}
|
173
|
+
void dequantize_q8_0(__global const struct block_q8_0* x, const int ib, const int iqs, float* v0, float* v1) {
|
174
|
+
const float d = vload_half(0, &x[ib].d);
|
175
|
+
|
176
|
+
const int8_t vi0 = x[ib].qs[iqs + 0];
|
177
|
+
const int8_t vi1 = x[ib].qs[iqs + 1];
|
178
|
+
|
179
|
+
*v0 = vi0*d;
|
180
|
+
*v1 = vi1*d;
|
181
|
+
}
|
182
|
+
void convert_f16(__global half* x, const int ib, const int iqs, float* v0, float* v1){
|
183
|
+
*v0 = vload_half(0, &x[ib + 0]);
|
184
|
+
*v1 = vload_half(0, &x[ib + 1]);
|
185
|
+
}
|
186
|
+
);
|
187
|
+
|
188
|
+
static std::string k_quants_source = MULTILINE_QUOTE(
|
189
|
+
inline void get_scale_min_k4(int j, const __global uint8_t *q, uint8_t *d, uint8_t *m)
|
190
|
+
{
|
191
|
+
if (j < 4)
|
192
|
+
{
|
193
|
+
*d = q[j] & 63;
|
194
|
+
*m = q[j + 4] & 63;
|
195
|
+
}
|
196
|
+
else
|
197
|
+
{
|
198
|
+
*d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
|
199
|
+
*m = (q[j + 4] >> 4) | ((q[j - 0] >> 6) << 4);
|
200
|
+
}
|
201
|
+
}
|
202
|
+
|
203
|
+
__kernel void dequantize_block_q2_K(__global const struct block_q2_K *x, __global float *yy)
|
204
|
+
{
|
205
|
+
const int i = get_group_id(0);
|
206
|
+
const int tid = get_local_id(0);
|
207
|
+
const int n = tid / 32;
|
208
|
+
const int l = tid - 32 * n;
|
209
|
+
const int is = 8 * n + l / 16;
|
210
|
+
|
211
|
+
const uint8_t q = x[i].qs[32 * n + l];
|
212
|
+
__global float *y = yy + i * QK_K + 128 * n;
|
213
|
+
|
214
|
+
const float dall = vload_half(0, &x[i].d);
|
215
|
+
const float dmin = vload_half(0, &x[i].dmin);
|
216
|
+
|
217
|
+
y[l + 0] = dall * (x[i].scales[is + 0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is + 0] >> 4);
|
218
|
+
y[l + 32] = dall * (x[i].scales[is + 2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is + 2] >> 4);
|
219
|
+
y[l + 64] = dall * (x[i].scales[is + 4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is + 4] >> 4);
|
220
|
+
y[l + 96] = dall * (x[i].scales[is + 6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is + 6] >> 4);
|
221
|
+
}
|
222
|
+
|
223
|
+
__kernel void dequantize_block_q3_K(__global const struct block_q3_K *x, __global float *yy)
|
224
|
+
{
|
225
|
+
int r = get_local_id(0) / 4;
|
226
|
+
int i = get_group_id(0);
|
227
|
+
int tid = r / 2;
|
228
|
+
int is0 = r % 2;
|
229
|
+
int l0 = 16 * is0 + 4 * (get_local_id(0) % 4);
|
230
|
+
int n = tid / 4;
|
231
|
+
int j = tid - 4 * n;
|
232
|
+
|
233
|
+
uint8_t m = 1 << (4 * n + j);
|
234
|
+
int is = 8 * n + 2 * j + is0;
|
235
|
+
int shift = 2 * j;
|
236
|
+
|
237
|
+
int8_t us = is < 4 ? (x[i].scales[is - 0] & 0xF) | (((x[i].scales[is + 8] >> 0) & 3) << 4)
|
238
|
+
: is < 8 ? (x[i].scales[is - 0] & 0xF) | (((x[i].scales[is + 4] >> 2) & 3) << 4)
|
239
|
+
: is < 12 ? (x[i].scales[is - 8] >> 4) | (((x[i].scales[is + 0] >> 4) & 3) << 4)
|
240
|
+
: (x[i].scales[is - 8] >> 4) | (((x[i].scales[is - 4] >> 6) & 3) << 4);
|
241
|
+
float d_all = vload_half(0, &x[i].d);
|
242
|
+
float dl = d_all * (us - 32);
|
243
|
+
|
244
|
+
__global float *y = yy + i * QK_K + 128 * n + 32 * j;
|
245
|
+
const __global uint8_t *q = x[i].qs + 32 * n;
|
246
|
+
const __global uint8_t *hm = x[i].hmask;
|
247
|
+
|
248
|
+
for (int l = l0; l < l0 + 4; ++l)
|
249
|
+
y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
|
250
|
+
}
|
251
|
+
|
252
|
+
__kernel void dequantize_block_q4_K(__global const struct block_q4_K *x, __global float *yy)
|
253
|
+
{
|
254
|
+
const int i = get_group_id(0);
|
255
|
+
const int tid = get_local_id(0);
|
256
|
+
const int il = tid / 8;
|
257
|
+
const int ir = tid % 8;
|
258
|
+
const int is = 2 * il;
|
259
|
+
const int n = 4;
|
260
|
+
|
261
|
+
__global float *y = yy + i * QK_K + 64 * il + n * ir;
|
262
|
+
|
263
|
+
const float dall = vload_half(0, &x[i].d);
|
264
|
+
const float dmin = vload_half(0, &x[i].dmin);
|
265
|
+
|
266
|
+
__global const uint8_t *q = x[i].qs + 32 * il + n * ir;
|
267
|
+
|
268
|
+
uint8_t sc, m;
|
269
|
+
get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
|
270
|
+
float d1 = dall * sc;
|
271
|
+
float m1 = dmin * m;
|
272
|
+
get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
|
273
|
+
float d2 = dall * sc;
|
274
|
+
float m2 = dmin * m;
|
275
|
+
for (int l = 0; l < n; ++l)
|
276
|
+
{
|
277
|
+
y[l + 0] = d1 * (q[l] & 0xF) - m1;
|
278
|
+
y[l + 32] = d2 * (q[l] >> 4) - m2;
|
279
|
+
}
|
280
|
+
}
|
281
|
+
|
282
|
+
__kernel void dequantize_block_q5_K(__global const struct block_q5_K *x, __global float *yy)
|
283
|
+
{
|
284
|
+
const int i = get_group_id(0);
|
285
|
+
const int tid = get_local_id(0);
|
286
|
+
const int il = tid / 16;
|
287
|
+
const int ir = tid % 16;
|
288
|
+
const int is = 2 * il;
|
289
|
+
|
290
|
+
__global float *y = yy + i * QK_K + 64 * il + 2 * ir;
|
291
|
+
|
292
|
+
const float dall = vload_half(0, &x[i].d);
|
293
|
+
const float dmin = vload_half(0, &x[i].dmin);
|
294
|
+
|
295
|
+
__global const uint8_t *ql = x[i].qs + 32 * il + 2 * ir;
|
296
|
+
__global const uint8_t *qh = x[i].qh + 2 * ir;
|
297
|
+
|
298
|
+
uint8_t sc, m;
|
299
|
+
get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
|
300
|
+
const float d1 = dall * sc;
|
301
|
+
const float m1 = dmin * m;
|
302
|
+
get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
|
303
|
+
const float d2 = dall * sc;
|
304
|
+
const float m2 = dmin * m;
|
305
|
+
|
306
|
+
uint8_t hm = 1 << (2 * il);
|
307
|
+
y[0] = d1 * ((ql[0] & 0xF) + (qh[0] & hm ? 16 : 0)) - m1;
|
308
|
+
y[1] = d1 * ((ql[1] & 0xF) + (qh[1] & hm ? 16 : 0)) - m1;
|
309
|
+
hm <<= 1;
|
310
|
+
y[32] = d2 * ((ql[0] >> 4) + (qh[0] & hm ? 16 : 0)) - m2;
|
311
|
+
y[33] = d2 * ((ql[1] >> 4) + (qh[1] & hm ? 16 : 0)) - m2;
|
312
|
+
}
|
313
|
+
|
314
|
+
__kernel void dequantize_block_q6_K(__global const struct block_q6_K *x, __global float *yy)
|
315
|
+
{
|
316
|
+
const int i = get_group_id(0);
|
317
|
+
const int tid = get_local_id(0);
|
318
|
+
const int ip = tid / 32;
|
319
|
+
const int il = tid - 32 * ip;
|
320
|
+
const int is = 8 * ip + il / 16;
|
321
|
+
|
322
|
+
__global float *y = yy + i * QK_K + 128 * ip + il;
|
323
|
+
|
324
|
+
const float d = vload_half(0, &x[i].d);
|
325
|
+
|
326
|
+
__global const uint8_t *ql = x[i].ql + 64 * ip + il;
|
327
|
+
const uint8_t qh = x[i].qh[32 * ip + il];
|
328
|
+
__global const int8_t *sc = x[i].scales + is;
|
329
|
+
|
330
|
+
y[0] = d * sc[0] * ((int8_t)((ql[0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
|
331
|
+
y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
|
332
|
+
y[64] = d * sc[4] * ((int8_t)((ql[0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
|
333
|
+
y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
|
334
|
+
}
|
335
|
+
|
336
|
+
__kernel void dequantize_mul_mat_vec_q2_K(__global const struct block_q2_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
|
337
|
+
|
338
|
+
const int row = get_group_id(0);
|
339
|
+
|
340
|
+
const int num_blocks_per_row = ncols / QK_K;
|
341
|
+
const int ib0 = row*num_blocks_per_row;
|
342
|
+
|
343
|
+
__global const struct block_q2_K * x = xx + ib0;
|
344
|
+
|
345
|
+
const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...31 or 0...15
|
346
|
+
const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION; // 0 or 0,1
|
347
|
+
|
348
|
+
const int step = 16/K_QUANTS_PER_ITERATION;
|
349
|
+
|
350
|
+
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
351
|
+
const int in = tid - step*im; // 0...15 or 0...7
|
352
|
+
|
353
|
+
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
|
354
|
+
const int q_offset = 32*im + l0;
|
355
|
+
const int s_offset = 8*im;
|
356
|
+
const int y_offset = 128*im + l0;
|
357
|
+
|
358
|
+
tmp[16 * ix + tid] = 0;
|
359
|
+
|
360
|
+
uint32_t aux[4];
|
361
|
+
const uint8_t * d = (const uint8_t *)aux;
|
362
|
+
const uint8_t * m = (const uint8_t *)(aux + 2);
|
363
|
+
|
364
|
+
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
365
|
+
|
366
|
+
__global const float * y = yy + i * QK_K + y_offset;
|
367
|
+
__global const uint8_t * q = x[i].qs + q_offset;
|
368
|
+
|
369
|
+
const float dall = vload_half(0, &x[i].d);
|
370
|
+
const float dmin = vload_half(0, &x[i].dmin);
|
371
|
+
|
372
|
+
__global const uint32_t * a = (__global const uint32_t *)(x[i].scales + s_offset);
|
373
|
+
aux[0] = a[0] & 0x0f0f0f0f;
|
374
|
+
aux[1] = a[1] & 0x0f0f0f0f;
|
375
|
+
aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
|
376
|
+
aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
|
377
|
+
|
378
|
+
float sum1 = 0, sum2 = 0;
|
379
|
+
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
380
|
+
sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
|
381
|
+
+ y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
|
382
|
+
+ y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
|
383
|
+
+ y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
|
384
|
+
+ y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
|
385
|
+
+ y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
|
386
|
+
+ y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
|
387
|
+
+y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
|
388
|
+
sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
|
389
|
+
+ y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
|
390
|
+
|
391
|
+
}
|
392
|
+
tmp[16 * ix + tid] += dall * sum1 - dmin * sum2;
|
393
|
+
|
394
|
+
}
|
395
|
+
|
396
|
+
// sum up partial sums and write back result
|
397
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
398
|
+
for (int s=16; s>0; s>>=1) {
|
399
|
+
if (tid < s) {
|
400
|
+
tmp[tid] += tmp[tid + s];
|
401
|
+
}
|
402
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
403
|
+
}
|
404
|
+
if (tid == 0) {
|
405
|
+
dst[row] = tmp[0];
|
406
|
+
}
|
407
|
+
}
|
408
|
+
|
409
|
+
__kernel void dequantize_mul_mat_vec_q3_K(__global const struct block_q3_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
|
410
|
+
const uint16_t kmask1 = 0x0303;
|
411
|
+
const uint16_t kmask2 = 0x0f0f;
|
412
|
+
|
413
|
+
const int row = get_group_id(0);
|
414
|
+
|
415
|
+
const int num_blocks_per_row = ncols / QK_K;
|
416
|
+
const int ib0 = row*num_blocks_per_row;
|
417
|
+
|
418
|
+
__global const struct block_q3_K * x = xx + ib0;
|
419
|
+
|
420
|
+
const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
421
|
+
const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION; // 0 or 0,1
|
422
|
+
|
423
|
+
const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
|
424
|
+
const int step = 16/K_QUANTS_PER_ITERATION;
|
425
|
+
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
426
|
+
const int in = tid - step*im; // 0....15 or 0...7
|
427
|
+
|
428
|
+
const uint8_t m = 1 << (4*im);
|
429
|
+
|
430
|
+
const int l0 = n*in; // 0...15 or 0...14 in steps of 2
|
431
|
+
const int q_offset = 32*im + l0;
|
432
|
+
const int y_offset = 128*im + l0;
|
433
|
+
|
434
|
+
uint16_t utmp[4];
|
435
|
+
const int8_t * s = (const int8_t *)utmp;
|
436
|
+
|
437
|
+
const uint16_t s_shift = 4*im;
|
438
|
+
|
439
|
+
tmp[16 * ix + tid] = 0;
|
440
|
+
|
441
|
+
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
442
|
+
|
443
|
+
__global const float * y = yy + i * QK_K + y_offset;
|
444
|
+
__global const uint8_t * q = x[i].qs + q_offset;
|
445
|
+
__global const uint8_t * h = x[i].hmask + l0;
|
446
|
+
|
447
|
+
__global const uint16_t * a = (__global const uint16_t *)x[i].scales;
|
448
|
+
utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
|
449
|
+
utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
|
450
|
+
utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
|
451
|
+
utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
|
452
|
+
|
453
|
+
const float d = vload_half(0, &x[i].d);
|
454
|
+
|
455
|
+
float sum = 0;
|
456
|
+
for (int l = 0; l < n; ++l) {
|
457
|
+
sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
|
458
|
+
+ y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
|
459
|
+
+ y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
|
460
|
+
+ y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
|
461
|
+
sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
|
462
|
+
+ y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
|
463
|
+
+ y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
|
464
|
+
+ y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
|
465
|
+
}
|
466
|
+
tmp[16 * ix + tid] += d * sum;
|
467
|
+
|
468
|
+
}
|
469
|
+
|
470
|
+
// sum up partial sums and write back result
|
471
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
472
|
+
for (int s=16; s>0; s>>=1) {
|
473
|
+
if (tid < s) {
|
474
|
+
tmp[tid] += tmp[tid + s];
|
475
|
+
}
|
476
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
477
|
+
}
|
478
|
+
if (tid == 0) {
|
479
|
+
dst[row] = tmp[0];
|
480
|
+
}
|
481
|
+
}
|
482
|
+
|
483
|
+
__kernel void dequantize_mul_mat_vec_q4_K(__global const struct block_q4_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
|
484
|
+
|
485
|
+
//to rename it later, just to test now
|
486
|
+
const uint16_t kmask1 = 0x3f3f;
|
487
|
+
const uint16_t kmask2 = 0x0f0f;
|
488
|
+
const uint16_t kmask3 = 0xc0c0;
|
489
|
+
|
490
|
+
const int row = get_group_id(0);
|
491
|
+
const int num_blocks_per_row = ncols / QK_K;
|
492
|
+
const int ib0 = row*num_blocks_per_row;
|
493
|
+
|
494
|
+
const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...15
|
495
|
+
const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION;
|
496
|
+
|
497
|
+
const int step = 8/K_QUANTS_PER_ITERATION;
|
498
|
+
|
499
|
+
const int il = tid/step; // 0...3
|
500
|
+
const int ir = tid - step*il;// 0...3
|
501
|
+
const int n = 2*K_QUANTS_PER_ITERATION;
|
502
|
+
|
503
|
+
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
504
|
+
const int in = il%2;
|
505
|
+
|
506
|
+
const int l0 = n*(2*ir + in);
|
507
|
+
const int q_offset = 32*im + l0;
|
508
|
+
const int y_offset = 64*im + l0;
|
509
|
+
|
510
|
+
uint16_t aux[4];
|
511
|
+
const uint8_t * sc = (const uint8_t *)aux;
|
512
|
+
|
513
|
+
__global const struct block_q4_K * x = xx + ib0;
|
514
|
+
|
515
|
+
tmp[16 * ix + tid] = 0;
|
516
|
+
|
517
|
+
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
518
|
+
|
519
|
+
__global const uint8_t * q1 = x[i].qs + q_offset;
|
520
|
+
__global const uint8_t * q2 = q1 + 64;
|
521
|
+
__global const float * y1 = yy + i*QK_K + y_offset;
|
522
|
+
__global const float * y2 = y1 + 128;
|
523
|
+
|
524
|
+
const float dall = vload_half(0, &x[i].d);
|
525
|
+
const float dmin = vload_half(0, &x[i].dmin);
|
526
|
+
|
527
|
+
__global const uint16_t * a = (__global const uint16_t *)x[i].scales;
|
528
|
+
aux[0] = a[im+0] & kmask1;
|
529
|
+
aux[1] = a[im+2] & kmask1;
|
530
|
+
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
|
531
|
+
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
|
532
|
+
|
533
|
+
float4 s = (float4)(0.f);
|
534
|
+
float smin = 0;
|
535
|
+
for (int l = 0; l < n; ++l) {
|
536
|
+
s.x += y1[l] * (q1[l] & 0xF); s.y += y1[l+32] * (q1[l] >> 4);
|
537
|
+
s.z += y2[l] * (q2[l] & 0xF); s.w += y2[l+32] * (q2[l] >> 4);
|
538
|
+
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
|
539
|
+
}
|
540
|
+
tmp[16 * ix + tid] += dall * (s.x * sc[0] + s.y * sc[1] + s.z * sc[4] + s.w * sc[5]) - dmin * smin;
|
541
|
+
|
542
|
+
}
|
543
|
+
|
544
|
+
// sum up partial sums and write back result
|
545
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
546
|
+
for (int s=16; s>0; s>>=1) {
|
547
|
+
if (tid < s) {
|
548
|
+
tmp[tid] += tmp[tid + s];
|
549
|
+
}
|
550
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
551
|
+
}
|
552
|
+
if (tid == 0) {
|
553
|
+
dst[row] = tmp[0];
|
554
|
+
}
|
555
|
+
}
|
556
|
+
|
557
|
+
__kernel void dequantize_mul_mat_vec_q5_K(__global const struct block_q5_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
|
558
|
+
|
559
|
+
const uint16_t kmask1 = 0x3f3f;
|
560
|
+
const uint16_t kmask2 = 0x0f0f;
|
561
|
+
const uint16_t kmask3 = 0xc0c0;
|
562
|
+
|
563
|
+
const int row = get_group_id(0);
|
564
|
+
const int num_blocks_per_row = ncols / QK_K;
|
565
|
+
const int ib0 = row*num_blocks_per_row;
|
566
|
+
|
567
|
+
const int tid = get_local_id(0)/2; // 0...15
|
568
|
+
const int ix = get_local_id(0)%2;
|
569
|
+
|
570
|
+
const int il = tid/4; // 0...3
|
571
|
+
const int ir = tid - 4*il;// 0...3
|
572
|
+
const int n = 2;
|
573
|
+
|
574
|
+
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
575
|
+
const int in = il%2;
|
576
|
+
|
577
|
+
const int l0 = n*(2*ir + in);
|
578
|
+
const int q_offset = 32*im + l0;
|
579
|
+
const int y_offset = 64*im + l0;
|
580
|
+
|
581
|
+
const uint8_t hm1 = 1 << (2*im);
|
582
|
+
const uint8_t hm2 = hm1 << 4;
|
583
|
+
|
584
|
+
uint16_t aux[4];
|
585
|
+
const uint8_t * sc = (const uint8_t *)aux;
|
586
|
+
|
587
|
+
__global const struct block_q5_K * x = xx + ib0;
|
588
|
+
|
589
|
+
tmp[16 * ix + tid] = 0;
|
590
|
+
|
591
|
+
for (int i = ix; i < num_blocks_per_row; i += 2) {
|
592
|
+
|
593
|
+
__global const uint8_t * ql1 = x[i].qs + q_offset;
|
594
|
+
__global const uint8_t * ql2 = ql1 + 64;
|
595
|
+
__global const uint8_t * qh = x[i].qh + l0;
|
596
|
+
__global const float * y1 = yy + i*QK_K + y_offset;
|
597
|
+
__global const float * y2 = y1 + 128;
|
598
|
+
|
599
|
+
const float dall = vload_half(0, &x[i].d);
|
600
|
+
const float dmin = vload_half(0, &x[i].dmin);
|
601
|
+
|
602
|
+
__global const uint16_t * a = (__global const uint16_t *)x[i].scales;
|
603
|
+
aux[0] = a[im+0] & kmask1;
|
604
|
+
aux[1] = a[im+2] & kmask1;
|
605
|
+
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
|
606
|
+
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
|
607
|
+
|
608
|
+
float4 sum = (float4)(0.f);
|
609
|
+
float smin = 0;
|
610
|
+
for (int l = 0; l < n; ++l) {
|
611
|
+
sum.x += y1[l+ 0] * ((ql1[l+ 0] & 0xF) + (qh[l+ 0] & (hm1 << 0) ? 16 : 0))
|
612
|
+
+ y1[l+16] * ((ql1[l+16] & 0xF) + (qh[l+16] & (hm1 << 0) ? 16 : 0));
|
613
|
+
sum.y += y1[l+32] * ((ql1[l+ 0] >> 4) + (qh[l+ 0] & (hm1 << 1) ? 16 : 0))
|
614
|
+
+ y1[l+48] * ((ql1[l+16] >> 4) + (qh[l+16] & (hm1 << 1) ? 16 : 0));
|
615
|
+
sum.z += y2[l+ 0] * ((ql2[l+ 0] & 0xF) + (qh[l+ 0] & (hm2 << 0) ? 16 : 0))
|
616
|
+
+ y2[l+16] * ((ql2[l+16] & 0xF) + (qh[l+16] & (hm2 << 0) ? 16 : 0));
|
617
|
+
sum.w += y2[l+32] * ((ql2[l+ 0] >> 4) + (qh[l+ 0] & (hm2 << 1) ? 16 : 0))
|
618
|
+
+ y2[l+48] * ((ql2[l+16] >> 4) + (qh[l+16] & (hm2 << 1) ? 16 : 0));
|
619
|
+
smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
|
620
|
+
+ (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
|
621
|
+
}
|
622
|
+
tmp[16 * ix + tid] += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin;
|
623
|
+
|
624
|
+
}
|
625
|
+
|
626
|
+
// sum up partial sums and write back result
|
627
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
628
|
+
for (int s=16; s>0; s>>=1) {
|
629
|
+
if (tid < s) {
|
630
|
+
tmp[tid] += tmp[tid + s];
|
631
|
+
}
|
632
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
633
|
+
}
|
634
|
+
if (tid == 0) {
|
635
|
+
dst[row] = tmp[0];
|
636
|
+
}
|
637
|
+
}
|
638
|
+
|
639
|
+
__kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx, __local float* tmp, __global const float * yy, __global float * dst, const int ncols) {
|
640
|
+
|
641
|
+
const int row = get_group_id(0);
|
642
|
+
|
643
|
+
const int num_blocks_per_row = ncols / QK_K;
|
644
|
+
const int ib0 = row*num_blocks_per_row;
|
645
|
+
|
646
|
+
__global const struct block_q6_K * x = xx + ib0;
|
647
|
+
|
648
|
+
const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
649
|
+
const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION; // 0 or 0, 1
|
650
|
+
|
651
|
+
const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
|
652
|
+
|
653
|
+
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
654
|
+
const int in = tid - step*im; // 0...15 or 0...7
|
655
|
+
|
656
|
+
\n#if K_QUANTS_PER_ITERATION == 1\n
|
657
|
+
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
|
658
|
+
const int is = 0;
|
659
|
+
|
660
|
+
\n#else\n
|
661
|
+
|
662
|
+
const int l0 = 4 * in; // 0, 4, 8, ..., 28
|
663
|
+
const int is = in / 4;
|
664
|
+
|
665
|
+
\n#endif\n
|
666
|
+
|
667
|
+
const int ql_offset = 64*im + l0;
|
668
|
+
const int qh_offset = 32*im + l0;
|
669
|
+
const int s_offset = 8*im + is;
|
670
|
+
const int y_offset = 128*im + l0;
|
671
|
+
|
672
|
+
tmp[16 * ix + tid] = 0; // partial sum for thread in warp
|
673
|
+
|
674
|
+
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
675
|
+
|
676
|
+
__global const float * y = yy + i * QK_K + y_offset;
|
677
|
+
__global const uint8_t * ql = x[i].ql + ql_offset;
|
678
|
+
__global const uint8_t * qh = x[i].qh + qh_offset;
|
679
|
+
__global const int8_t * s = x[i].scales + s_offset;
|
680
|
+
|
681
|
+
const float d = vload_half(0, &x[i].d);
|
682
|
+
|
683
|
+
\n#if K_QUANTS_PER_ITERATION == 1\n
|
684
|
+
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
|
685
|
+
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
|
686
|
+
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
|
687
|
+
+ y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
|
688
|
+
+ y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
|
689
|
+
+ y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
|
690
|
+
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
|
691
|
+
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
|
692
|
+
tmp[16 * ix + tid] += sum;
|
693
|
+
\n#else\n
|
694
|
+
float sum = 0;
|
695
|
+
for (int l = 0; l < 4; ++l) {
|
696
|
+
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
|
697
|
+
+ y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
|
698
|
+
+ y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
|
699
|
+
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
|
700
|
+
}
|
701
|
+
tmp[16 * ix + tid] += sum;
|
702
|
+
\n#endif\n
|
703
|
+
|
704
|
+
}
|
705
|
+
|
706
|
+
// sum up partial sums and write back result
|
707
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
708
|
+
for (int s=16; s>0; s>>=1) {
|
709
|
+
if (tid < s) {
|
710
|
+
tmp[tid] += tmp[tid + s];
|
711
|
+
}
|
712
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
713
|
+
}
|
714
|
+
if (tid == 0) {
|
715
|
+
dst[row] = tmp[0];
|
716
|
+
}
|
717
|
+
}
|
718
|
+
|
719
|
+
);
|
720
|
+
|
721
|
+
|
722
|
+
std::string dequant_template = MULTILINE_QUOTE(
|
723
|
+
__kernel void KERNEL_NAME(__global X_TYPE* x, __global float* y) {
|
724
|
+
const int i = get_group_id(0)*get_local_size(0) + get_local_id(0)*2;
|
725
|
+
|
726
|
+
if (i >= get_global_size(0)) {
|
727
|
+
return;
|
728
|
+
}
|
729
|
+
|
730
|
+
const uint qk = QUANT_K;
|
731
|
+
const uint qr = QUANT_R;
|
732
|
+
|
733
|
+
const int ib = i/qk; // block index
|
734
|
+
const int iqs = (i%qk)/qr; // quant index
|
735
|
+
const int iybs = i - i%qk; // y block start index
|
736
|
+
const int y_offset = qr == 1 ? 1 : qk/2;
|
737
|
+
|
738
|
+
// dequantize
|
739
|
+
float v0, v1;
|
740
|
+
DEQUANT_FUNC(x, ib, iqs, &v0, &v1);
|
741
|
+
y[iybs + iqs + 0] = v0;
|
742
|
+
y[iybs + iqs + y_offset] = v1;
|
743
|
+
}
|
744
|
+
);
|
745
|
+
|
746
|
+
std::string dequant_mul_mat_vec_template = MULTILINE_QUOTE(
|
747
|
+
__kernel void KERNEL_NAME(__global X_TYPE* x, __local float* tmp, __global float* y, __global float* dst, const int ncols) {
|
748
|
+
const int block_size = get_local_size(0);
|
749
|
+
const int row = get_group_id(0);
|
750
|
+
const int tid = get_local_id(0);
|
751
|
+
|
752
|
+
const uint qk = QUANT_K;
|
753
|
+
const uint qr = QUANT_R;
|
754
|
+
|
755
|
+
const int y_offset = qr == 1 ? 1 : qk/2;
|
756
|
+
|
757
|
+
tmp[tid] = 0;
|
758
|
+
|
759
|
+
for (int i = 0; i < ncols/block_size; i += 2) {
|
760
|
+
const int col = i*block_size + 2*tid;
|
761
|
+
const int ib = (row*ncols + col)/qk; // block index
|
762
|
+
const int iqs = (col%qk)/qr; // quant index
|
763
|
+
const int iybs = col - col%qk; // y block start index
|
764
|
+
|
765
|
+
// dequantize
|
766
|
+
float v0, v1;
|
767
|
+
DEQUANT_FUNC(x, ib, iqs, &v0, &v1);
|
768
|
+
|
769
|
+
// matrix multiplication
|
770
|
+
tmp[tid] += v0 * y[iybs + iqs + 0];
|
771
|
+
tmp[tid] += v1 * y[iybs + iqs + y_offset];
|
772
|
+
}
|
773
|
+
|
774
|
+
// sum up partial sums and write back result
|
775
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
776
|
+
for (int s=block_size/2; s>0; s>>=1) {
|
777
|
+
if (tid < s) {
|
778
|
+
tmp[tid] += tmp[tid + s];
|
779
|
+
}
|
780
|
+
barrier(CLK_LOCAL_MEM_FENCE);
|
781
|
+
}
|
782
|
+
if (tid == 0) {
|
783
|
+
dst[row] = tmp[0];
|
784
|
+
}
|
785
|
+
}
|
786
|
+
);
|
787
|
+
|
788
|
+
|
789
|
+
std::string mul_template = MULTILINE_QUOTE(
|
790
|
+
__kernel void KERNEL_NAME(__global TYPE* x, const int x_offset, __global TYPE* y, const int y_offset, __global TYPE* dst, const int dst_offset, const int ky) {
|
791
|
+
const int i = get_group_id(0)*get_local_size(0) + get_local_id(0);
|
792
|
+
|
793
|
+
if (i >= get_global_size(0)) {
|
794
|
+
return;
|
795
|
+
}
|
796
|
+
|
797
|
+
dst[dst_offset + i] = x[x_offset + i] * y[y_offset + i%ky];
|
798
|
+
}
|
799
|
+
);
|
800
|
+
|
801
|
+
#define CL_CHECK(err) \
|
802
|
+
do { \
|
803
|
+
cl_int err_ = (err); \
|
804
|
+
if (err_ != CL_SUCCESS) { \
|
805
|
+
fprintf(stderr, "ggml_opencl: %s error %d at %s:%d\n", \
|
806
|
+
#err, err_, __FILE__, __LINE__); \
|
807
|
+
exit(1); \
|
808
|
+
} \
|
809
|
+
} while (0)
|
810
|
+
|
811
|
+
#define CLBLAST_CHECK(err) \
|
812
|
+
do { \
|
813
|
+
CLBlastStatusCode err_ = (err); \
|
814
|
+
if (err_ != CLBlastSuccess) { \
|
815
|
+
fprintf(stderr, "ggml_opencl: %s error %d at %s:%d\n", \
|
816
|
+
#err, err_, __FILE__, __LINE__); \
|
817
|
+
exit(1); \
|
818
|
+
} \
|
819
|
+
} while (0)
|
820
|
+
|
821
|
+
std::array<std::string, 5> dequant_str_keys = {
|
822
|
+
"KERNEL_NAME", "X_TYPE", "QUANT_K", "QUANT_R", "DEQUANT_FUNC"
|
823
|
+
};
|
824
|
+
|
825
|
+
std::array<std::string, 30> dequant_str_values = {
|
826
|
+
"dequantize_row_q4_0", "struct block_q4_0", "QK4_0", "QR4_0", "dequantize_q4_0",
|
827
|
+
"dequantize_row_q4_1", "struct block_q4_1", "QK4_1", "QR4_1", "dequantize_q4_1",
|
828
|
+
"dequantize_row_q5_0", "struct block_q5_0", "QK5_0", "QR5_0", "dequantize_q5_0",
|
829
|
+
"dequantize_row_q5_1", "struct block_q5_1", "QK5_1", "QR5_1", "dequantize_q5_1",
|
830
|
+
"dequantize_row_q8_0", "struct block_q8_0", "QK8_0", "QR8_0", "dequantize_q8_0",
|
831
|
+
"convert_row_f16", "half", "1", "1", "convert_f16"
|
832
|
+
};
|
833
|
+
|
834
|
+
std::array<std::string, 30> dequant_mul_mat_vec_str_values = {
|
835
|
+
"dequantize_mul_mat_vec_q4_0", "struct block_q4_0", "QK4_0", "QR4_0", "dequantize_q4_0",
|
836
|
+
"dequantize_mul_mat_vec_q4_1", "struct block_q4_1", "QK4_1", "QR4_1", "dequantize_q4_1",
|
837
|
+
"dequantize_mul_mat_vec_q5_0", "struct block_q5_0", "QK5_0", "QR5_0", "dequantize_q5_0",
|
838
|
+
"dequantize_mul_mat_vec_q5_1", "struct block_q5_1", "QK5_1", "QR5_1", "dequantize_q5_1",
|
839
|
+
"dequantize_mul_mat_vec_q8_0", "struct block_q8_0", "QK8_0", "QR8_0", "dequantize_q8_0",
|
840
|
+
"convert_mul_mat_vec_f16", "half", "1", "1", "convert_f16"
|
841
|
+
};
|
842
|
+
|
843
|
+
std::array<std::string, 2> mul_str_keys = {
|
844
|
+
"KERNEL_NAME", "TYPE"
|
845
|
+
};
|
846
|
+
std::array<std::string, 2> mul_str_values = {
|
847
|
+
"mul_f32", "float"
|
848
|
+
};
|
849
|
+
|
850
|
+
std::string& replace(std::string& s, const std::string& from, const std::string& to) {
|
851
|
+
size_t pos = 0;
|
852
|
+
while ((pos = s.find(from, pos)) != std::string::npos) {
|
853
|
+
s.replace(pos, from.length(), to);
|
854
|
+
pos += to.length();
|
855
|
+
}
|
856
|
+
return s;
|
857
|
+
}
|
858
|
+
|
859
|
+
std::string generate_kernels() {
|
860
|
+
std::stringstream src;
|
861
|
+
src << program_source << '\n';
|
862
|
+
src << k_quants_source << '\n';
|
863
|
+
for (size_t i = 0; i < dequant_str_values.size(); i += dequant_str_keys.size()) {
|
864
|
+
std::string dequant_kernel = dequant_template;
|
865
|
+
std::string dmmv_kernel = dequant_mul_mat_vec_template;
|
866
|
+
for (size_t j = 0; j < dequant_str_keys.size(); j++) {
|
867
|
+
replace(dequant_kernel, dequant_str_keys[j], dequant_str_values[i + j]);
|
868
|
+
replace(dmmv_kernel, dequant_str_keys[j], dequant_mul_mat_vec_str_values[i + j]);
|
869
|
+
}
|
870
|
+
src << dequant_kernel << '\n';
|
871
|
+
src << dmmv_kernel << '\n';
|
872
|
+
}
|
873
|
+
for (size_t i = 0; i < mul_str_values.size(); i += mul_str_keys.size()) {
|
874
|
+
std::string mul_kernel = mul_template;
|
875
|
+
for (size_t j = 0; j < mul_str_keys.size(); j++) {
|
876
|
+
replace(mul_kernel, mul_str_keys[j], mul_str_values[i + j]);
|
877
|
+
}
|
878
|
+
src << mul_kernel << '\n';
|
879
|
+
}
|
880
|
+
|
881
|
+
return src.str();
|
882
|
+
}
|
883
|
+
|
884
|
+
static cl_platform_id platform;
|
885
|
+
static cl_device_id device;
|
886
|
+
static cl_context context;
|
887
|
+
static cl_command_queue queue;
|
888
|
+
static cl_program program;
|
889
|
+
static cl_kernel convert_row_f16_cl;
|
890
|
+
static cl_kernel dequantize_row_q4_0_cl, dequantize_row_q4_1_cl, dequantize_row_q5_0_cl, dequantize_row_q5_1_cl, dequantize_row_q8_0_cl;
|
891
|
+
static cl_kernel dequantize_mul_mat_vec_q4_0_cl, dequantize_mul_mat_vec_q4_1_cl, dequantize_mul_mat_vec_q5_0_cl, dequantize_mul_mat_vec_q5_1_cl, dequantize_mul_mat_vec_q8_0_cl, convert_mul_mat_vec_f16_cl;
|
892
|
+
static cl_kernel dequantize_block_q2_k_cl, dequantize_block_q3_k_cl, dequantize_block_q4_k_cl, dequantize_block_q5_k_cl, dequantize_block_q6_k_cl;
|
893
|
+
static cl_kernel dequantize_mul_mat_vec_q2_K_cl, dequantize_mul_mat_vec_q3_K_cl, dequantize_mul_mat_vec_q4_K_cl, dequantize_mul_mat_vec_q5_K_cl, dequantize_mul_mat_vec_q6_K_cl;
|
894
|
+
static cl_kernel mul_f32_cl;
|
895
|
+
static bool fp16_support;
|
896
|
+
|
897
|
+
static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, const char* program_buffer) {
|
898
|
+
cl_program p;
|
899
|
+
char *program_log;
|
900
|
+
size_t program_size;
|
901
|
+
size_t log_size;
|
902
|
+
int err;
|
903
|
+
|
904
|
+
program_size = strlen(program_buffer);
|
905
|
+
|
906
|
+
p = clCreateProgramWithSource(ctx, 1, (const char**)&program_buffer, &program_size, &err);
|
907
|
+
if(err < 0) {
|
908
|
+
fprintf(stderr, "OpenCL error creating program");
|
909
|
+
exit(1);
|
910
|
+
}
|
911
|
+
|
912
|
+
std::string compile_opts = "-cl-mad-enable -cl-unsafe-math-optimizations -cl-finite-math-only -cl-fast-relaxed-math "
|
913
|
+
"-DQK4_0=32 -DQR4_0=2 -DQK4_1=32 -DQR4_1=2 -DQK5_0=32 -DQR5_0=2 -DQK5_1=32 -DQR5_1=2 -DQK8_0=32 -DQR8_0=1 "
|
914
|
+
"-DQK_K=256 -DK_QUANTS_PER_ITERATION=" + std::to_string(K_QUANTS_PER_ITERATION);
|
915
|
+
|
916
|
+
err = clBuildProgram(p, 0, NULL, compile_opts.c_str(), NULL, NULL);
|
917
|
+
if(err < 0) {
|
918
|
+
|
919
|
+
clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size);
|
920
|
+
program_log = (char*) malloc(log_size + 1);
|
921
|
+
program_log[log_size] = '\0';
|
922
|
+
clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL);
|
923
|
+
fprintf(stderr, "ggml_opencl: kernel compile error:\n\n%s\n", program_log);
|
924
|
+
free(program_log);
|
925
|
+
exit(1);
|
926
|
+
}
|
927
|
+
|
928
|
+
return p;
|
929
|
+
}
|
930
|
+
|
931
|
+
void ggml_cl_init(void) {
|
932
|
+
cl_int err;
|
933
|
+
|
934
|
+
struct cl_device;
|
935
|
+
struct cl_platform {
|
936
|
+
cl_platform_id id;
|
937
|
+
unsigned number;
|
938
|
+
char name[128];
|
939
|
+
char vendor[128];
|
940
|
+
struct cl_device * devices;
|
941
|
+
unsigned n_devices;
|
942
|
+
struct cl_device * default_device;
|
943
|
+
};
|
944
|
+
|
945
|
+
struct cl_device {
|
946
|
+
struct cl_platform * platform;
|
947
|
+
cl_device_id id;
|
948
|
+
unsigned number;
|
949
|
+
cl_device_type type;
|
950
|
+
char name[128];
|
951
|
+
};
|
952
|
+
|
953
|
+
enum { NPLAT = 16, NDEV = 16 };
|
954
|
+
|
955
|
+
struct cl_platform platforms[NPLAT];
|
956
|
+
unsigned n_platforms = 0;
|
957
|
+
struct cl_device devices[NDEV];
|
958
|
+
unsigned n_devices = 0;
|
959
|
+
struct cl_device * default_device = NULL;
|
960
|
+
|
961
|
+
platform = NULL;
|
962
|
+
device = NULL;
|
963
|
+
|
964
|
+
cl_platform_id platform_ids[NPLAT];
|
965
|
+
CL_CHECK(clGetPlatformIDs(NPLAT, platform_ids, &n_platforms));
|
966
|
+
|
967
|
+
for (unsigned i = 0; i < n_platforms; i++) {
|
968
|
+
struct cl_platform * p = &platforms[i];
|
969
|
+
p->number = i;
|
970
|
+
p->id = platform_ids[i];
|
971
|
+
CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_NAME, sizeof(p->name), &p->name, NULL));
|
972
|
+
CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_VENDOR, sizeof(p->vendor), &p->vendor, NULL));
|
973
|
+
|
974
|
+
cl_device_id device_ids[NDEV];
|
975
|
+
cl_int clGetDeviceIDsError = clGetDeviceIDs(p->id, CL_DEVICE_TYPE_ALL, NDEV, device_ids, &p->n_devices);
|
976
|
+
if (clGetDeviceIDsError == CL_DEVICE_NOT_FOUND) {
|
977
|
+
p->n_devices = 0;
|
978
|
+
} else {
|
979
|
+
CL_CHECK(clGetDeviceIDsError);
|
980
|
+
}
|
981
|
+
p->devices = p->n_devices > 0 ? &devices[n_devices] : NULL;
|
982
|
+
p->default_device = NULL;
|
983
|
+
|
984
|
+
for (unsigned j = 0; j < p->n_devices; j++) {
|
985
|
+
struct cl_device * d = &devices[n_devices];
|
986
|
+
d->number = n_devices++;
|
987
|
+
d->id = device_ids[j];
|
988
|
+
d->platform = p;
|
989
|
+
CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_NAME, sizeof(d->name), &d->name, NULL));
|
990
|
+
CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_TYPE, sizeof(d->type), &d->type, NULL));
|
991
|
+
|
992
|
+
if (p->default_device == NULL && d->type == CL_DEVICE_TYPE_GPU) {
|
993
|
+
p->default_device = d;
|
994
|
+
}
|
995
|
+
}
|
996
|
+
|
997
|
+
if (default_device == NULL && p->default_device != NULL) {
|
998
|
+
default_device = p->default_device;
|
999
|
+
}
|
1000
|
+
}
|
1001
|
+
|
1002
|
+
if (n_devices == 0) {
|
1003
|
+
fprintf(stderr, "ggml_opencl: could find any OpenCL devices.\n");
|
1004
|
+
exit(1);
|
1005
|
+
}
|
1006
|
+
|
1007
|
+
char * user_platform_string = getenv("GGML_OPENCL_PLATFORM");
|
1008
|
+
char * user_device_string = getenv("GGML_OPENCL_DEVICE");
|
1009
|
+
int user_platform_number = -1;
|
1010
|
+
int user_device_number = -1;
|
1011
|
+
|
1012
|
+
unsigned n;
|
1013
|
+
if (user_platform_string != NULL && sscanf(user_platform_string, " %u", &n) == 1 && n < n_platforms) {
|
1014
|
+
user_platform_number = (int)n;
|
1015
|
+
}
|
1016
|
+
if (user_device_string != NULL && sscanf(user_device_string, " %u", &n) == 1 && n < n_devices) {
|
1017
|
+
user_device_number = (int)n;
|
1018
|
+
}
|
1019
|
+
if (user_platform_number != -1 && user_device_number != -1) {
|
1020
|
+
cl_platform* platform = &platforms[user_platform_number];
|
1021
|
+
if ((unsigned)user_device_number >= platform->n_devices) {
|
1022
|
+
fprintf(stderr, "ggml_opencl: invalid device number %d\n", user_device_number);
|
1023
|
+
exit(1);
|
1024
|
+
}
|
1025
|
+
default_device = &platform->devices[user_device_number];
|
1026
|
+
} else {
|
1027
|
+
|
1028
|
+
struct cl_device * selected_devices = devices;
|
1029
|
+
unsigned n_selected_devices = n_devices;
|
1030
|
+
|
1031
|
+
if (user_platform_number == -1 && user_platform_string != NULL && user_platform_string[0] != 0) {
|
1032
|
+
for (unsigned i = 0; i < n_platforms; i++) {
|
1033
|
+
struct cl_platform * p = &platforms[i];
|
1034
|
+
if (strstr(p->name, user_platform_string) != NULL ||
|
1035
|
+
strstr(p->vendor, user_platform_string) != NULL) {
|
1036
|
+
user_platform_number = (int)i;
|
1037
|
+
break;
|
1038
|
+
}
|
1039
|
+
}
|
1040
|
+
if (user_platform_number == -1) {
|
1041
|
+
fprintf(stderr, "ggml_opencl: no platform matching '%s' was found.\n", user_platform_string);
|
1042
|
+
exit(1);
|
1043
|
+
}
|
1044
|
+
}
|
1045
|
+
if (user_platform_number != -1) {
|
1046
|
+
struct cl_platform * p = &platforms[user_platform_number];
|
1047
|
+
selected_devices = p->devices;
|
1048
|
+
n_selected_devices = p->n_devices;
|
1049
|
+
default_device = p->default_device;
|
1050
|
+
if (n_selected_devices == 0) {
|
1051
|
+
fprintf(stderr, "ggml_opencl: selected platform '%s' does not have any devices.\n", p->name);
|
1052
|
+
exit(1);
|
1053
|
+
}
|
1054
|
+
}
|
1055
|
+
|
1056
|
+
if (user_device_number == -1 && user_device_string != NULL && user_device_string[0] != 0) {
|
1057
|
+
for (unsigned i = 0; i < n_selected_devices; i++) {
|
1058
|
+
struct cl_device * d = &selected_devices[i];
|
1059
|
+
if (strstr(d->name, user_device_string) != NULL) {
|
1060
|
+
user_device_number = d->number;
|
1061
|
+
break;
|
1062
|
+
}
|
1063
|
+
}
|
1064
|
+
if (user_device_number == -1) {
|
1065
|
+
fprintf(stderr, "ggml_opencl: no device matching '%s' was found.\n", user_device_string);
|
1066
|
+
exit(1);
|
1067
|
+
}
|
1068
|
+
}
|
1069
|
+
if (user_device_number != -1) {
|
1070
|
+
selected_devices = &devices[user_device_number];
|
1071
|
+
n_selected_devices = 1;
|
1072
|
+
default_device = &selected_devices[0];
|
1073
|
+
}
|
1074
|
+
|
1075
|
+
GGML_ASSERT(n_selected_devices > 0);
|
1076
|
+
|
1077
|
+
if (default_device == NULL) {
|
1078
|
+
default_device = &selected_devices[0];
|
1079
|
+
}
|
1080
|
+
}
|
1081
|
+
|
1082
|
+
fprintf(stderr, "ggml_opencl: selecting platform: '%s'\n", default_device->platform->name);
|
1083
|
+
fprintf(stderr, "ggml_opencl: selecting device: '%s'\n", default_device->name);
|
1084
|
+
if (default_device->type != CL_DEVICE_TYPE_GPU) {
|
1085
|
+
fprintf(stderr, "ggml_opencl: warning, not a GPU: '%s'.\n", default_device->name);
|
1086
|
+
}
|
1087
|
+
|
1088
|
+
platform = default_device->platform->id;
|
1089
|
+
device = default_device->id;
|
1090
|
+
|
1091
|
+
size_t ext_str_size;
|
1092
|
+
clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, 0, NULL, &ext_str_size);
|
1093
|
+
char *ext_buffer = (char *)alloca(ext_str_size + 1);
|
1094
|
+
clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, ext_str_size, ext_buffer, NULL);
|
1095
|
+
ext_buffer[ext_str_size] = '\0'; // ensure it is null terminated
|
1096
|
+
// Check if ext_buffer contains cl_khr_fp16
|
1097
|
+
fp16_support = strstr(ext_buffer, "cl_khr_fp16") != NULL;
|
1098
|
+
fprintf(stderr, "ggml_opencl: device FP16 support: %s\n", fp16_support ? "true" : "false");
|
1099
|
+
|
1100
|
+
cl_context_properties properties[] = {
|
1101
|
+
(intptr_t)CL_CONTEXT_PLATFORM, (intptr_t)platform, 0
|
1102
|
+
};
|
1103
|
+
|
1104
|
+
CL_CHECK((context = clCreateContext(properties, 1, &device, NULL, NULL, &err), err));
|
1105
|
+
|
1106
|
+
CL_CHECK((queue = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &err),
|
1107
|
+
(err != CL_INVALID_QUEUE_PROPERTIES && err != CL_INVALID_VALUE ? err :
|
1108
|
+
(queue = clCreateCommandQueue(context, device, 0, &err), err)
|
1109
|
+
)));
|
1110
|
+
|
1111
|
+
const std::string kernel_src = generate_kernels();
|
1112
|
+
|
1113
|
+
program = build_program_from_source(context, device, kernel_src.c_str());
|
1114
|
+
|
1115
|
+
// FP16 to FP32 kernel
|
1116
|
+
CL_CHECK((convert_row_f16_cl = clCreateKernel(program, "convert_row_f16", &err), err));
|
1117
|
+
|
1118
|
+
// Dequantize kernels
|
1119
|
+
CL_CHECK((dequantize_row_q4_0_cl = clCreateKernel(program, "dequantize_row_q4_0", &err), err));
|
1120
|
+
CL_CHECK((dequantize_row_q4_1_cl = clCreateKernel(program, "dequantize_row_q4_1", &err), err));
|
1121
|
+
CL_CHECK((dequantize_row_q5_0_cl = clCreateKernel(program, "dequantize_row_q5_0", &err), err));
|
1122
|
+
CL_CHECK((dequantize_row_q5_1_cl = clCreateKernel(program, "dequantize_row_q5_1", &err), err));
|
1123
|
+
CL_CHECK((dequantize_row_q8_0_cl = clCreateKernel(program, "dequantize_row_q8_0", &err), err));
|
1124
|
+
CL_CHECK((dequantize_row_q8_0_cl = clCreateKernel(program, "dequantize_row_q8_0", &err), err));
|
1125
|
+
CL_CHECK((dequantize_block_q2_k_cl = clCreateKernel(program, "dequantize_block_q2_K", &err), err));
|
1126
|
+
CL_CHECK((dequantize_block_q3_k_cl = clCreateKernel(program, "dequantize_block_q3_K", &err), err));
|
1127
|
+
CL_CHECK((dequantize_block_q4_k_cl = clCreateKernel(program, "dequantize_block_q4_K", &err), err));
|
1128
|
+
CL_CHECK((dequantize_block_q5_k_cl = clCreateKernel(program, "dequantize_block_q5_K", &err), err));
|
1129
|
+
CL_CHECK((dequantize_block_q6_k_cl = clCreateKernel(program, "dequantize_block_q6_K", &err), err));
|
1130
|
+
|
1131
|
+
// dequant mul mat kernel
|
1132
|
+
CL_CHECK((dequantize_mul_mat_vec_q4_0_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q4_0", &err), err));
|
1133
|
+
CL_CHECK((dequantize_mul_mat_vec_q4_1_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q4_1", &err), err));
|
1134
|
+
CL_CHECK((dequantize_mul_mat_vec_q5_0_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q5_0", &err), err));
|
1135
|
+
CL_CHECK((dequantize_mul_mat_vec_q5_1_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q5_1", &err), err));
|
1136
|
+
CL_CHECK((dequantize_mul_mat_vec_q8_0_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q8_0", &err), err));
|
1137
|
+
CL_CHECK((convert_mul_mat_vec_f16_cl = clCreateKernel(program, "convert_mul_mat_vec_f16", &err), err));
|
1138
|
+
CL_CHECK((dequantize_mul_mat_vec_q2_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q2_K", &err), err));
|
1139
|
+
CL_CHECK((dequantize_mul_mat_vec_q3_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q3_K", &err), err));
|
1140
|
+
CL_CHECK((dequantize_mul_mat_vec_q4_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q4_K", &err), err));
|
1141
|
+
CL_CHECK((dequantize_mul_mat_vec_q5_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q5_K", &err), err));
|
1142
|
+
CL_CHECK((dequantize_mul_mat_vec_q6_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q6_K", &err), err));
|
1143
|
+
|
1144
|
+
// mul kernel
|
1145
|
+
CL_CHECK((mul_f32_cl = clCreateKernel(program, "mul_f32", &err), err));
|
1146
|
+
}
|
1147
|
+
|
1148
|
+
static cl_kernel* ggml_get_to_fp32_cl(ggml_type type) {
|
1149
|
+
switch (type) {
|
1150
|
+
case GGML_TYPE_Q4_0:
|
1151
|
+
return &dequantize_row_q4_0_cl;
|
1152
|
+
case GGML_TYPE_Q4_1:
|
1153
|
+
return &dequantize_row_q4_1_cl;
|
1154
|
+
case GGML_TYPE_Q5_0:
|
1155
|
+
return &dequantize_row_q5_0_cl;
|
1156
|
+
case GGML_TYPE_Q5_1:
|
1157
|
+
return &dequantize_row_q5_1_cl;
|
1158
|
+
case GGML_TYPE_Q8_0:
|
1159
|
+
return &dequantize_row_q8_0_cl;
|
1160
|
+
case GGML_TYPE_Q2_K:
|
1161
|
+
return &dequantize_block_q2_k_cl;
|
1162
|
+
case GGML_TYPE_Q3_K:
|
1163
|
+
return &dequantize_block_q3_k_cl;
|
1164
|
+
case GGML_TYPE_Q4_K:
|
1165
|
+
return &dequantize_block_q4_k_cl;
|
1166
|
+
case GGML_TYPE_Q5_K:
|
1167
|
+
return &dequantize_block_q5_k_cl;
|
1168
|
+
case GGML_TYPE_Q6_K:
|
1169
|
+
return &dequantize_block_q6_k_cl;
|
1170
|
+
case GGML_TYPE_F16:
|
1171
|
+
return &convert_row_f16_cl;
|
1172
|
+
default:
|
1173
|
+
return nullptr;
|
1174
|
+
}
|
1175
|
+
}
|
1176
|
+
|
1177
|
+
static size_t ggml_cl_global_denom(ggml_type type) {
|
1178
|
+
switch (type) {
|
1179
|
+
case GGML_TYPE_Q4_0:
|
1180
|
+
case GGML_TYPE_Q4_1:
|
1181
|
+
case GGML_TYPE_Q5_0:
|
1182
|
+
case GGML_TYPE_Q5_1:
|
1183
|
+
case GGML_TYPE_Q8_0:
|
1184
|
+
return 1;
|
1185
|
+
case GGML_TYPE_Q2_K:
|
1186
|
+
case GGML_TYPE_Q3_K:
|
1187
|
+
return 4;
|
1188
|
+
case GGML_TYPE_Q4_K:
|
1189
|
+
return 8;
|
1190
|
+
case GGML_TYPE_Q5_K:
|
1191
|
+
case GGML_TYPE_Q6_K:
|
1192
|
+
return 4;
|
1193
|
+
case GGML_TYPE_F16:
|
1194
|
+
default:
|
1195
|
+
return 1;
|
1196
|
+
}
|
1197
|
+
}
|
1198
|
+
|
1199
|
+
static size_t ggml_cl_local_size(ggml_type type) {
|
1200
|
+
switch (type) {
|
1201
|
+
case GGML_TYPE_Q4_0:
|
1202
|
+
case GGML_TYPE_Q4_1:
|
1203
|
+
case GGML_TYPE_Q5_0:
|
1204
|
+
case GGML_TYPE_Q5_1:
|
1205
|
+
case GGML_TYPE_Q8_0:
|
1206
|
+
return 0;
|
1207
|
+
case GGML_TYPE_Q2_K:
|
1208
|
+
case GGML_TYPE_Q3_K:
|
1209
|
+
return 64;
|
1210
|
+
case GGML_TYPE_Q4_K:
|
1211
|
+
return 32;
|
1212
|
+
case GGML_TYPE_Q5_K:
|
1213
|
+
case GGML_TYPE_Q6_K:
|
1214
|
+
return 64;
|
1215
|
+
case GGML_TYPE_F16:
|
1216
|
+
default:
|
1217
|
+
return 0;
|
1218
|
+
}
|
1219
|
+
}
|
1220
|
+
|
1221
|
+
static cl_kernel* ggml_get_dequantize_mul_mat_vec_cl(ggml_type type) {
|
1222
|
+
switch (type) {
|
1223
|
+
case GGML_TYPE_Q4_0:
|
1224
|
+
return &dequantize_mul_mat_vec_q4_0_cl;
|
1225
|
+
case GGML_TYPE_Q4_1:
|
1226
|
+
return &dequantize_mul_mat_vec_q4_1_cl;
|
1227
|
+
case GGML_TYPE_Q5_0:
|
1228
|
+
return &dequantize_mul_mat_vec_q5_0_cl;
|
1229
|
+
case GGML_TYPE_Q5_1:
|
1230
|
+
return &dequantize_mul_mat_vec_q5_1_cl;
|
1231
|
+
case GGML_TYPE_Q8_0:
|
1232
|
+
return &dequantize_mul_mat_vec_q8_0_cl;
|
1233
|
+
case GGML_TYPE_F16:
|
1234
|
+
return &convert_mul_mat_vec_f16_cl;
|
1235
|
+
case GGML_TYPE_Q2_K:
|
1236
|
+
return &dequantize_mul_mat_vec_q2_K_cl;
|
1237
|
+
case GGML_TYPE_Q3_K:
|
1238
|
+
return &dequantize_mul_mat_vec_q3_K_cl;
|
1239
|
+
case GGML_TYPE_Q4_K:
|
1240
|
+
return &dequantize_mul_mat_vec_q4_K_cl;
|
1241
|
+
case GGML_TYPE_Q5_K:
|
1242
|
+
return &dequantize_mul_mat_vec_q5_K_cl;
|
1243
|
+
case GGML_TYPE_Q6_K:
|
1244
|
+
return &dequantize_mul_mat_vec_q6_K_cl;
|
1245
|
+
default:
|
1246
|
+
return nullptr;
|
1247
|
+
}
|
1248
|
+
}
|
1249
|
+
|
1250
|
+
// buffer pool for cl
|
1251
|
+
#define MAX_CL_BUFFERS 256
|
1252
|
+
|
1253
|
+
struct scoped_spin_lock {
|
1254
|
+
std::atomic_flag& lock;
|
1255
|
+
scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
|
1256
|
+
while (lock.test_and_set(std::memory_order_acquire)) {
|
1257
|
+
; // spin
|
1258
|
+
}
|
1259
|
+
}
|
1260
|
+
~scoped_spin_lock() {
|
1261
|
+
lock.clear(std::memory_order_release);
|
1262
|
+
}
|
1263
|
+
scoped_spin_lock(const scoped_spin_lock&) = delete;
|
1264
|
+
scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
|
1265
|
+
};
|
1266
|
+
|
1267
|
+
struct cl_buffer {
|
1268
|
+
cl_mem mem;
|
1269
|
+
size_t size = 0;
|
1270
|
+
};
|
1271
|
+
|
1272
|
+
static cl_buffer g_cl_buffer_pool[MAX_CL_BUFFERS];
|
1273
|
+
static std::atomic_flag g_cl_pool_lock = ATOMIC_FLAG_INIT;
|
1274
|
+
|
1275
|
+
static cl_mem ggml_cl_pool_malloc(size_t size, size_t * actual_size) {
|
1276
|
+
scoped_spin_lock lock(g_cl_pool_lock);
|
1277
|
+
cl_int err;
|
1278
|
+
|
1279
|
+
int best_i = -1;
|
1280
|
+
size_t best_size = std::numeric_limits<size_t>::max(); //smallest unused buffer that fits our needs
|
1281
|
+
int worst_i = -1;
|
1282
|
+
size_t worst_size = 0; //largest unused buffer seen so far
|
1283
|
+
for (int i = 0; i < MAX_CL_BUFFERS; ++i) {
|
1284
|
+
cl_buffer &b = g_cl_buffer_pool[i];
|
1285
|
+
if (b.size > 0 && b.size >= size && b.size < best_size)
|
1286
|
+
{
|
1287
|
+
best_i = i;
|
1288
|
+
best_size = b.size;
|
1289
|
+
}
|
1290
|
+
if (b.size > 0 && b.size > worst_size)
|
1291
|
+
{
|
1292
|
+
worst_i = i;
|
1293
|
+
worst_size = b.size;
|
1294
|
+
}
|
1295
|
+
}
|
1296
|
+
if(best_i!=-1) //found the smallest buffer that fits our needs
|
1297
|
+
{
|
1298
|
+
cl_buffer& b = g_cl_buffer_pool[best_i];
|
1299
|
+
cl_mem mem = b.mem;
|
1300
|
+
*actual_size = b.size;
|
1301
|
+
b.size = 0;
|
1302
|
+
return mem;
|
1303
|
+
}
|
1304
|
+
if(worst_i!=-1) //no buffer that fits our needs, resize largest one to save memory
|
1305
|
+
{
|
1306
|
+
cl_buffer& b = g_cl_buffer_pool[worst_i];
|
1307
|
+
cl_mem mem = b.mem;
|
1308
|
+
b.size = 0;
|
1309
|
+
clReleaseMemObject(mem);
|
1310
|
+
}
|
1311
|
+
cl_mem mem;
|
1312
|
+
CL_CHECK((mem = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err), err));
|
1313
|
+
*actual_size = size;
|
1314
|
+
return mem;
|
1315
|
+
}
|
1316
|
+
|
1317
|
+
static void ggml_cl_pool_free(cl_mem mem, size_t size) {
|
1318
|
+
scoped_spin_lock lock(g_cl_pool_lock);
|
1319
|
+
|
1320
|
+
for (int i = 0; i < MAX_CL_BUFFERS; ++i) {
|
1321
|
+
cl_buffer& b = g_cl_buffer_pool[i];
|
1322
|
+
if (b.size == 0) {
|
1323
|
+
b.mem = mem;
|
1324
|
+
b.size = size;
|
1325
|
+
return;
|
1326
|
+
}
|
1327
|
+
}
|
1328
|
+
fprintf(stderr, "WARNING: cl buffer pool full, increase MAX_CL_BUFFERS\n");
|
1329
|
+
clReleaseMemObject(mem);
|
1330
|
+
}
|
1331
|
+
|
1332
|
+
void ggml_cl_free_data(const struct ggml_tensor* tensor) {
|
1333
|
+
if (tensor->backend != GGML_BACKEND_GPU) {
|
1334
|
+
return;
|
1335
|
+
}
|
1336
|
+
|
1337
|
+
cl_mem mem = (cl_mem)tensor->data;
|
1338
|
+
clReleaseMemObject(mem);
|
1339
|
+
}
|
1340
|
+
|
1341
|
+
static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cl_event* ev) {
|
1342
|
+
cl_int err;
|
1343
|
+
const uint64_t ne0 = src->ne[0];
|
1344
|
+
const uint64_t ne1 = src->ne[1];
|
1345
|
+
const uint64_t nb0 = src->nb[0];
|
1346
|
+
const uint64_t nb1 = src->nb[1];
|
1347
|
+
const uint64_t nb2 = src->nb[2];
|
1348
|
+
const uint64_t nb3 = src->nb[3];
|
1349
|
+
const enum ggml_type type = src->type;
|
1350
|
+
const size_t ts = ggml_type_size(type);
|
1351
|
+
const size_t bs = ggml_blck_size(type);
|
1352
|
+
|
1353
|
+
const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3);
|
1354
|
+
if (nb0 == ts && nb1 == ts*ne0/bs) {
|
1355
|
+
err = clEnqueueWriteBuffer(queue, dst, CL_FALSE, offset, ne1*nb1, x, 0, NULL, ev);
|
1356
|
+
return err;
|
1357
|
+
}
|
1358
|
+
if (nb0 == ts) {
|
1359
|
+
const size_t buffer_origin[3] = { offset, 0, 0 };
|
1360
|
+
const size_t host_origin[3] = { 0, 0, 0 };
|
1361
|
+
const size_t region[3] = { ts*ne0/bs, ne1, 1 };
|
1362
|
+
err = clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, ts*ne0/bs, 0, nb1, 0, x, 0, NULL, ev);
|
1363
|
+
return err;
|
1364
|
+
}
|
1365
|
+
for (uint64_t i1 = 0; i1 < ne1; i1++) {
|
1366
|
+
// pretend the row is a matrix with cols=1
|
1367
|
+
const size_t buffer_origin[3] = { offset, i1, 0 };
|
1368
|
+
const size_t host_origin[3] = { 0, 0, 0 };
|
1369
|
+
const size_t region[3] = { ts/bs, ne0, 1 };
|
1370
|
+
err = clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, 0, 0, nb0, 0, ((const char *)x) + i1*nb0, 0, NULL, ev);
|
1371
|
+
if (err != CL_SUCCESS) {
|
1372
|
+
break;
|
1373
|
+
}
|
1374
|
+
}
|
1375
|
+
return err;
|
1376
|
+
}
|
1377
|
+
|
1378
|
+
static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
1379
|
+
GGML_ASSERT(src1->backend == GGML_BACKEND_GPU);
|
1380
|
+
const int64_t ne00 = src0->ne[0];
|
1381
|
+
const int64_t ne01 = src0->ne[1];
|
1382
|
+
const int64_t ne02 = src0->ne[2];
|
1383
|
+
const int64_t ne03 = src0->ne[3];
|
1384
|
+
const int64_t ne0 = ne00 * ne01 * ne02 * ne03;
|
1385
|
+
const int64_t ne10 = src1->ne[0];
|
1386
|
+
const int64_t ne11 = src1->ne[1];
|
1387
|
+
const int64_t ne12 = src1->ne[2];
|
1388
|
+
const int64_t ne13 = src1->ne[3];
|
1389
|
+
const int64_t nb10 = src1->nb[0];
|
1390
|
+
const int nb2 = dst->nb[2];
|
1391
|
+
const int nb3 = dst->nb[3];
|
1392
|
+
size_t x_size;
|
1393
|
+
size_t d_size;
|
1394
|
+
|
1395
|
+
cl_mem d_X = ggml_cl_pool_malloc(ne0 * sizeof(float), &x_size); // src0
|
1396
|
+
cl_mem d_Y = (cl_mem) src1->data; // src1 is already on device, broadcasted.
|
1397
|
+
cl_mem d_D = ggml_cl_pool_malloc(ne0 * sizeof(float), &d_size); // dst
|
1398
|
+
|
1399
|
+
|
1400
|
+
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
1401
|
+
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
1402
|
+
const int i0 = i03*ne02 + i02;
|
1403
|
+
|
1404
|
+
cl_event ev;
|
1405
|
+
|
1406
|
+
// copy src0 to device
|
1407
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, i0, src0, i03, i02, &ev));
|
1408
|
+
|
1409
|
+
if (nb10 == sizeof(float)) {
|
1410
|
+
// Contiguous, avoid overhead from queueing many kernel runs
|
1411
|
+
const int64_t i13 = i03%ne13;
|
1412
|
+
const int64_t i12 = i02%ne12;
|
1413
|
+
const int i1 = i13*ne12*ne11 + i12*ne11;
|
1414
|
+
|
1415
|
+
cl_int x_offset = 0;
|
1416
|
+
cl_int y_offset = i1*ne10;
|
1417
|
+
cl_int d_offset = 0;
|
1418
|
+
|
1419
|
+
size_t global = ne00 * ne01;
|
1420
|
+
cl_int ky = ne10;
|
1421
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X));
|
1422
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset));
|
1423
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y));
|
1424
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset));
|
1425
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D));
|
1426
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset));
|
1427
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky));
|
1428
|
+
CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL));
|
1429
|
+
} else {
|
1430
|
+
for (int64_t i01 = 0; i01 < ne01; i01++) {
|
1431
|
+
const int64_t i13 = i03%ne13;
|
1432
|
+
const int64_t i12 = i02%ne12;
|
1433
|
+
const int64_t i11 = i01%ne11;
|
1434
|
+
const int i1 = i13*ne12*ne11 + i12*ne11 + i11;
|
1435
|
+
|
1436
|
+
cl_int x_offset = i01*ne00;
|
1437
|
+
cl_int y_offset = i1*ne10;
|
1438
|
+
cl_int d_offset = i01*ne00;
|
1439
|
+
|
1440
|
+
// compute
|
1441
|
+
size_t global = ne00;
|
1442
|
+
cl_int ky = ne10;
|
1443
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X));
|
1444
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset));
|
1445
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y));
|
1446
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset));
|
1447
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D));
|
1448
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset));
|
1449
|
+
CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky));
|
1450
|
+
CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL));
|
1451
|
+
}
|
1452
|
+
}
|
1453
|
+
|
1454
|
+
CL_CHECK(clReleaseEvent(ev));
|
1455
|
+
CL_CHECK(clFinish(queue));
|
1456
|
+
|
1457
|
+
// copy dst to host
|
1458
|
+
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
1459
|
+
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * ne00*ne01, d, 0, NULL, NULL));
|
1460
|
+
}
|
1461
|
+
}
|
1462
|
+
ggml_cl_pool_free(d_X, x_size);
|
1463
|
+
ggml_cl_pool_free(d_D, d_size);
|
1464
|
+
}
|
1465
|
+
|
1466
|
+
void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
|
1467
|
+
GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
|
1468
|
+
ggml_cl_mul_f32(src0, src1, dst);
|
1469
|
+
}
|
1470
|
+
|
1471
|
+
static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
1472
|
+
const int64_t ne00 = src0->ne[0];
|
1473
|
+
const int64_t ne01 = src0->ne[1];
|
1474
|
+
const int64_t ne02 = src0->ne[2];
|
1475
|
+
const int64_t ne03 = src0->ne[3];
|
1476
|
+
|
1477
|
+
const int64_t ne10 = src1->ne[0];
|
1478
|
+
const int64_t ne11 = src1->ne[1];
|
1479
|
+
|
1480
|
+
const int nb2 = dst->nb[2];
|
1481
|
+
const int nb3 = dst->nb[3];
|
1482
|
+
|
1483
|
+
const float alpha = 1.0f;
|
1484
|
+
const float beta = 0.0f;
|
1485
|
+
const int x_ne = ne01 * ne00;
|
1486
|
+
const int y_ne = ne11 * ne10;
|
1487
|
+
const int d_ne = ne11 * ne01;
|
1488
|
+
|
1489
|
+
size_t x_size;
|
1490
|
+
size_t y_size;
|
1491
|
+
size_t d_size;
|
1492
|
+
cl_mem d_X;
|
1493
|
+
if (src0->backend == GGML_BACKEND_GPU) { // NOLINT
|
1494
|
+
d_X = (cl_mem) src0->data;
|
1495
|
+
} else {
|
1496
|
+
d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size);
|
1497
|
+
}
|
1498
|
+
cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
|
1499
|
+
cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
|
1500
|
+
|
1501
|
+
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
1502
|
+
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
1503
|
+
// copy data to device
|
1504
|
+
if (src0->backend != GGML_BACKEND_GPU) {
|
1505
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
|
1506
|
+
}
|
1507
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL));
|
1508
|
+
|
1509
|
+
CL_CHECK(clFinish(queue));
|
1510
|
+
|
1511
|
+
// compute
|
1512
|
+
cl_event ev_sgemm;
|
1513
|
+
clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor,
|
1514
|
+
clblast::Transpose::kYes, clblast::Transpose::kNo,
|
1515
|
+
ne01, ne11, ne10,
|
1516
|
+
alpha,
|
1517
|
+
d_X, 0, ne00,
|
1518
|
+
d_Y, 0, ne10,
|
1519
|
+
beta,
|
1520
|
+
d_D, 0, ne01,
|
1521
|
+
&queue, &ev_sgemm);
|
1522
|
+
|
1523
|
+
if (status != clblast::StatusCode::kSuccess) {
|
1524
|
+
GGML_ASSERT(false);
|
1525
|
+
}
|
1526
|
+
|
1527
|
+
// copy dst to host
|
1528
|
+
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
1529
|
+
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
|
1530
|
+
}
|
1531
|
+
}
|
1532
|
+
|
1533
|
+
if (src0->backend != GGML_BACKEND_GPU) {
|
1534
|
+
ggml_cl_pool_free(d_X, x_size);
|
1535
|
+
}
|
1536
|
+
ggml_cl_pool_free(d_Y, y_size);
|
1537
|
+
ggml_cl_pool_free(d_D, d_size);
|
1538
|
+
}
|
1539
|
+
|
1540
|
+
static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t /* wsize */) {
|
1541
|
+
GGML_ASSERT(fp16_support);
|
1542
|
+
|
1543
|
+
const int64_t ne00 = src0->ne[0];
|
1544
|
+
const int64_t ne01 = src0->ne[1];
|
1545
|
+
const int64_t ne02 = src0->ne[2];
|
1546
|
+
const int64_t ne03 = src0->ne[3];
|
1547
|
+
|
1548
|
+
const int64_t ne10 = src1->ne[0];
|
1549
|
+
const int64_t ne11 = src1->ne[1];
|
1550
|
+
|
1551
|
+
const int nb10 = src1->nb[0];
|
1552
|
+
const int nb11 = src1->nb[1];
|
1553
|
+
const int nb12 = src1->nb[2];
|
1554
|
+
const int nb13 = src1->nb[3];
|
1555
|
+
|
1556
|
+
const int nb2 = dst->nb[2];
|
1557
|
+
const int nb3 = dst->nb[3];
|
1558
|
+
|
1559
|
+
const ggml_fp16_t alpha = ggml_fp32_to_fp16(1.0f);
|
1560
|
+
const ggml_fp16_t beta = ggml_fp32_to_fp16(0.0f);
|
1561
|
+
const int x_ne = ne01 * ne00;
|
1562
|
+
const int y_ne = ne11 * ne10;
|
1563
|
+
const int d_ne = ne11 * ne01;
|
1564
|
+
|
1565
|
+
size_t x_size;
|
1566
|
+
size_t y_size;
|
1567
|
+
size_t d_size;
|
1568
|
+
cl_mem d_X;
|
1569
|
+
if (src0->backend == GGML_BACKEND_GPU) { // NOLINT
|
1570
|
+
d_X = (cl_mem) src0->data;
|
1571
|
+
} else {
|
1572
|
+
d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size);
|
1573
|
+
}
|
1574
|
+
cl_mem d_Y = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * y_ne, &y_size);
|
1575
|
+
cl_mem d_D = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * d_ne, &d_size);
|
1576
|
+
|
1577
|
+
bool src1_cont_rows = nb10 == sizeof(float);
|
1578
|
+
bool src1_cont_cols = (size_t)nb11 == ne11*sizeof(float);
|
1579
|
+
|
1580
|
+
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
1581
|
+
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
1582
|
+
// copy src0 to device
|
1583
|
+
if (src0->backend != GGML_BACKEND_GPU) {
|
1584
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
|
1585
|
+
}
|
1586
|
+
|
1587
|
+
// convert src1 to fp16
|
1588
|
+
// TODO: use multiple threads
|
1589
|
+
ggml_fp16_t * const tmp = (ggml_fp16_t *) wdata + (ne11 * ne10) * (i03 * ne02 + i02);
|
1590
|
+
char * src1i = (char *) src1->data + i03*nb13 + i02*nb12;
|
1591
|
+
if (src1_cont_rows) {
|
1592
|
+
if (src1_cont_cols) {
|
1593
|
+
ggml_fp32_to_fp16_row((float *) src1i, tmp, ne10*ne11);
|
1594
|
+
}
|
1595
|
+
else {
|
1596
|
+
for (int64_t i01 = 0; i01 < ne11; i01++) {
|
1597
|
+
ggml_fp32_to_fp16_row((float *) (src1i + i01*nb11), tmp + i01*ne10, ne10);
|
1598
|
+
}
|
1599
|
+
}
|
1600
|
+
}
|
1601
|
+
else {
|
1602
|
+
for (int64_t i01 = 0; i01 < ne11; i01++) {
|
1603
|
+
for (int64_t i00 = 0; i00 < ne10; i00++) {
|
1604
|
+
// very slow due to no inlining
|
1605
|
+
tmp[i01*ne10 + i00] = ggml_fp32_to_fp16(*(float *) (src1i + i01*nb11 + i00*nb10));
|
1606
|
+
}
|
1607
|
+
}
|
1608
|
+
}
|
1609
|
+
|
1610
|
+
// copy src1 to device
|
1611
|
+
CL_CHECK(clEnqueueWriteBuffer(queue, d_Y, false, 0, sizeof(ggml_fp16_t) * y_ne, tmp, 0, NULL, NULL));
|
1612
|
+
|
1613
|
+
CL_CHECK(clFinish(queue));
|
1614
|
+
|
1615
|
+
// compute
|
1616
|
+
cl_event ev_sgemm;
|
1617
|
+
clblast::StatusCode status = clblast::Gemm<cl_half>(clblast::Layout::kColMajor,
|
1618
|
+
clblast::Transpose::kYes, clblast::Transpose::kNo,
|
1619
|
+
ne01, ne11, ne10,
|
1620
|
+
alpha,
|
1621
|
+
d_X, 0, ne00,
|
1622
|
+
d_Y, 0, ne10,
|
1623
|
+
beta,
|
1624
|
+
d_D, 0, ne01,
|
1625
|
+
&queue, &ev_sgemm);
|
1626
|
+
|
1627
|
+
if (status != clblast::StatusCode::kSuccess) {
|
1628
|
+
GGML_ASSERT(false);
|
1629
|
+
}
|
1630
|
+
|
1631
|
+
// copy dst to host, then convert to float
|
1632
|
+
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
|
1633
|
+
|
1634
|
+
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
1635
|
+
|
1636
|
+
ggml_fp16_to_fp32_row(tmp, d, d_ne);
|
1637
|
+
}
|
1638
|
+
}
|
1639
|
+
|
1640
|
+
if (src0->backend != GGML_BACKEND_GPU) {
|
1641
|
+
ggml_cl_pool_free(d_X, x_size);
|
1642
|
+
}
|
1643
|
+
ggml_cl_pool_free(d_Y, y_size);
|
1644
|
+
ggml_cl_pool_free(d_D, d_size);
|
1645
|
+
}
|
1646
|
+
|
1647
|
+
static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
1648
|
+
const int64_t ne00 = src0->ne[0];
|
1649
|
+
const int64_t ne01 = src0->ne[1];
|
1650
|
+
const int64_t ne02 = src0->ne[2];
|
1651
|
+
const int64_t ne03 = src0->ne[3];
|
1652
|
+
|
1653
|
+
const int64_t ne10 = src1->ne[0];
|
1654
|
+
const int64_t ne11 = src1->ne[1];
|
1655
|
+
|
1656
|
+
const int nb2 = dst->nb[2];
|
1657
|
+
const int nb3 = dst->nb[3];
|
1658
|
+
const ggml_type type = src0->type;
|
1659
|
+
const bool mul_mat_vec = ne11 == 1;
|
1660
|
+
|
1661
|
+
const float alpha = 1.0f;
|
1662
|
+
const float beta = 0.0f;
|
1663
|
+
const int x_ne = ne01 * ne00;
|
1664
|
+
const int y_ne = ne11 * ne10;
|
1665
|
+
const int d_ne = ne11 * ne01;
|
1666
|
+
const size_t q_sz = ggml_type_size(type) * x_ne / ggml_blck_size(type);
|
1667
|
+
|
1668
|
+
size_t x_size;
|
1669
|
+
size_t y_size;
|
1670
|
+
size_t d_size;
|
1671
|
+
size_t q_size;
|
1672
|
+
cl_mem d_X;
|
1673
|
+
if (!mul_mat_vec) {
|
1674
|
+
d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
|
1675
|
+
}
|
1676
|
+
cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
|
1677
|
+
cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
|
1678
|
+
cl_mem d_Q;
|
1679
|
+
if (src0->backend == GGML_BACKEND_CPU) {
|
1680
|
+
d_Q = ggml_cl_pool_malloc(q_sz, &q_size);
|
1681
|
+
}
|
1682
|
+
|
1683
|
+
cl_kernel* to_fp32_cl = ggml_get_to_fp32_cl(type);
|
1684
|
+
cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type);
|
1685
|
+
GGML_ASSERT(to_fp32_cl != nullptr);
|
1686
|
+
|
1687
|
+
const size_t global_denom = ggml_cl_global_denom(type);
|
1688
|
+
const size_t local = ggml_cl_local_size(type);
|
1689
|
+
|
1690
|
+
size_t ev_idx = 0;
|
1691
|
+
std::vector<cl_event> events;
|
1692
|
+
|
1693
|
+
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
1694
|
+
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
1695
|
+
// copy src0 to device if necessary
|
1696
|
+
if (src0->backend == GGML_BACKEND_CPU) {
|
1697
|
+
events.emplace_back();
|
1698
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++));
|
1699
|
+
} else if (src0->backend == GGML_BACKEND_GPU) {
|
1700
|
+
d_Q = (cl_mem) src0->data;
|
1701
|
+
} else {
|
1702
|
+
GGML_ASSERT(false);
|
1703
|
+
}
|
1704
|
+
if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel
|
1705
|
+
// copy src1 to device
|
1706
|
+
events.emplace_back();
|
1707
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, events.data() + ev_idx++));
|
1708
|
+
|
1709
|
+
// compute
|
1710
|
+
const size_t global = ne01 * CL_DMMV_BLOCK_SIZE;
|
1711
|
+
const size_t local = CL_DMMV_BLOCK_SIZE;
|
1712
|
+
const cl_int ncols = ne00;
|
1713
|
+
events.emplace_back();
|
1714
|
+
CL_CHECK(clSetKernelArg(*dmmv, 0, sizeof(cl_mem), &d_Q));
|
1715
|
+
CL_CHECK(clSetKernelArg(*dmmv, 1, sizeof(float) * local, NULL));
|
1716
|
+
CL_CHECK(clSetKernelArg(*dmmv, 2, sizeof(cl_mem), &d_Y));
|
1717
|
+
CL_CHECK(clSetKernelArg(*dmmv, 3, sizeof(cl_mem), &d_D));
|
1718
|
+
CL_CHECK(clSetKernelArg(*dmmv, 4, sizeof(cl_int), &ncols));
|
1719
|
+
CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, NULL, &global, &local, events.size() - 1, events.data(), events.data() + ev_idx++));
|
1720
|
+
} else { // general dequantization kernel + CLBlast matrix matrix multiplication
|
1721
|
+
// convert src0 to fp32 on device
|
1722
|
+
const size_t global = x_ne / global_denom;
|
1723
|
+
CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q));
|
1724
|
+
CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X));
|
1725
|
+
CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, NULL, &global, local > 0 ? &local : NULL, events.size(), !events.empty() ? events.data() : NULL, NULL));
|
1726
|
+
|
1727
|
+
// copy src1 to device
|
1728
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL));
|
1729
|
+
|
1730
|
+
events.emplace_back();
|
1731
|
+
|
1732
|
+
// wait for conversion
|
1733
|
+
CL_CHECK(clFinish(queue));
|
1734
|
+
|
1735
|
+
// compute
|
1736
|
+
clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor,
|
1737
|
+
clblast::Transpose::kYes, clblast::Transpose::kNo,
|
1738
|
+
ne01, ne11, ne10,
|
1739
|
+
alpha,
|
1740
|
+
d_X, 0, ne00,
|
1741
|
+
d_Y, 0, ne10,
|
1742
|
+
beta,
|
1743
|
+
d_D, 0, ne01,
|
1744
|
+
&queue, events.data() + ev_idx++);
|
1745
|
+
|
1746
|
+
if (status != clblast::StatusCode::kSuccess) {
|
1747
|
+
GGML_ASSERT(false);
|
1748
|
+
}
|
1749
|
+
}
|
1750
|
+
|
1751
|
+
// copy dst to host
|
1752
|
+
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
1753
|
+
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &events[events.size() - 1], NULL));
|
1754
|
+
for (auto *event : events) {
|
1755
|
+
clReleaseEvent(event);
|
1756
|
+
}
|
1757
|
+
|
1758
|
+
ev_idx = 0;
|
1759
|
+
events.clear();
|
1760
|
+
}
|
1761
|
+
}
|
1762
|
+
|
1763
|
+
if (!mul_mat_vec) {
|
1764
|
+
ggml_cl_pool_free(d_X, x_size);
|
1765
|
+
}
|
1766
|
+
ggml_cl_pool_free(d_Y, y_size);
|
1767
|
+
ggml_cl_pool_free(d_D, d_size);
|
1768
|
+
if (src0->backend == GGML_BACKEND_CPU) {
|
1769
|
+
ggml_cl_pool_free(d_Q, q_size);
|
1770
|
+
}
|
1771
|
+
}
|
1772
|
+
|
1773
|
+
|
1774
|
+
bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
|
1775
|
+
const int64_t ne10 = src1->ne[0];
|
1776
|
+
|
1777
|
+
const int64_t ne0 = dst->ne[0];
|
1778
|
+
const int64_t ne1 = dst->ne[1];
|
1779
|
+
|
1780
|
+
// TODO: find the optimal values for these
|
1781
|
+
if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
|
1782
|
+
src1->type == GGML_TYPE_F32 &&
|
1783
|
+
dst->type == GGML_TYPE_F32 &&
|
1784
|
+
((ne0 >= 32 && ne1 >= 32 && ne10 >= 32) || src0->backend == GGML_BACKEND_GPU)) {
|
1785
|
+
return true;
|
1786
|
+
}
|
1787
|
+
|
1788
|
+
return false;
|
1789
|
+
}
|
1790
|
+
|
1791
|
+
bool ggml_cl_mul_mat_use_f16(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * /* dst */) {
|
1792
|
+
// If device doesn't support FP16
|
1793
|
+
if (!fp16_support) {
|
1794
|
+
return false;
|
1795
|
+
}
|
1796
|
+
|
1797
|
+
size_t src0_sz = ggml_nbytes(src0);
|
1798
|
+
size_t src1_sz = ggml_nbytes(src1);
|
1799
|
+
|
1800
|
+
// mul_mat_q: src0 is converted to fp32 on device
|
1801
|
+
size_t mul_mat_q_transfer = src0_sz + src1_sz;
|
1802
|
+
|
1803
|
+
// mul_mat_f16: src1 is converted to fp16 on cpu
|
1804
|
+
size_t mul_mat_f16_transfer = src0_sz + sizeof(ggml_fp16_t) * ggml_nelements(src1);
|
1805
|
+
|
1806
|
+
// choose the smaller one to transfer to the device
|
1807
|
+
// TODO: this is not always the best choice due to the overhead of converting to fp16
|
1808
|
+
return mul_mat_f16_transfer < mul_mat_q_transfer;
|
1809
|
+
}
|
1810
|
+
|
1811
|
+
void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize) {
|
1812
|
+
GGML_ASSERT(ggml_cl_can_mul_mat(src0, src1, dst));
|
1813
|
+
|
1814
|
+
if (src0->type == GGML_TYPE_F32) {
|
1815
|
+
ggml_cl_mul_mat_f32(src0, src1, dst);
|
1816
|
+
}
|
1817
|
+
else if (src0->type == GGML_TYPE_F16) {
|
1818
|
+
if (ggml_cl_mul_mat_use_f16(src0, src1, dst)) {
|
1819
|
+
ggml_cl_mul_mat_f16(src0, src1, dst, wdata, wsize);
|
1820
|
+
}
|
1821
|
+
else {
|
1822
|
+
ggml_cl_mul_mat_q_f32(src0, src1, dst);
|
1823
|
+
}
|
1824
|
+
}
|
1825
|
+
else if (ggml_is_quantized(src0->type)) {
|
1826
|
+
ggml_cl_mul_mat_q_f32(src0, src1, dst);
|
1827
|
+
}
|
1828
|
+
else {
|
1829
|
+
GGML_ASSERT(false);
|
1830
|
+
}
|
1831
|
+
}
|
1832
|
+
|
1833
|
+
size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
|
1834
|
+
if (ggml_cl_mul_mat_use_f16(src0, src1, dst)) {
|
1835
|
+
return ggml_nelements(src1) * sizeof(ggml_fp16_t);
|
1836
|
+
}
|
1837
|
+
return 0;
|
1838
|
+
}
|
1839
|
+
|
1840
|
+
void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) {
|
1841
|
+
const int64_t ne0 = tensor->ne[0];
|
1842
|
+
const int64_t ne1 = tensor->ne[1];
|
1843
|
+
const int64_t ne2 = tensor->ne[2];
|
1844
|
+
const int64_t ne3 = tensor->ne[3];
|
1845
|
+
|
1846
|
+
const ggml_type type = tensor->type;
|
1847
|
+
const size_t q_sz = ggml_type_size(type) * ne0 * ne1 * ne2 * ne3 / ggml_blck_size(type);
|
1848
|
+
|
1849
|
+
size_t q_size;
|
1850
|
+
cl_mem dst = ggml_cl_pool_malloc(q_sz, &q_size);
|
1851
|
+
|
1852
|
+
tensor->data = data;
|
1853
|
+
// copy tensor to device
|
1854
|
+
for (int64_t i3 = 0; i3 < ne3; i3++) {
|
1855
|
+
for (int64_t i2 = 0; i2 < ne2; i2++) {
|
1856
|
+
int i = i3*ne2 + i2;
|
1857
|
+
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, dst, i*ne0*ne1, tensor, i3, i2, NULL));
|
1858
|
+
}
|
1859
|
+
}
|
1860
|
+
|
1861
|
+
CL_CHECK(clFinish(queue));
|
1862
|
+
|
1863
|
+
tensor->data = dst;
|
1864
|
+
GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
|
1865
|
+
}
|