llama_cpp 0.14.6 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,47 +14,6 @@
14
14
  #include <stdlib.h> // for qsort
15
15
  #include <stdio.h> // for GGML_ASSERT
16
16
 
17
- #ifdef __ARM_NEON
18
-
19
- // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
20
- //
21
- // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
22
- //
23
- #include <arm_neon.h>
24
-
25
- #else
26
-
27
- #ifdef __wasm_simd128__
28
- #include <wasm_simd128.h>
29
- #else
30
- #if defined(__POWER9_VECTOR__) || defined(__powerpc64__)
31
- #include <altivec.h>
32
- #undef bool
33
- #define bool _Bool
34
- #else
35
- #if defined(_MSC_VER) || defined(__MINGW32__)
36
- #include <intrin.h>
37
- #else
38
- #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
39
- #if !defined(__riscv)
40
- #include <immintrin.h>
41
- #endif
42
- #endif
43
- #endif
44
- #endif
45
- #endif
46
- #endif
47
-
48
- #ifdef __riscv_v_intrinsic
49
- #include <riscv_vector.h>
50
- #endif
51
-
52
- #undef MIN
53
- #undef MAX
54
-
55
- #define MIN(a, b) ((a) < (b) ? (a) : (b))
56
- #define MAX(a, b) ((a) > (b) ? (a) : (b))
57
-
58
17
  #define UNUSED GGML_UNUSED
59
18
 
60
19
  // some compilers don't provide _mm256_set_m128i, e.g. gcc 7
@@ -276,258 +235,6 @@ static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128
276
235
  #endif // __AVX__ || __AVX2__ || __AVX512F__
277
236
  #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
278
237
 
279
- #if defined(__ARM_NEON)
280
-
281
- #ifdef _MSC_VER
282
-
283
- #define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
284
-
285
- #else
286
-
287
- #define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
288
-
289
- #endif
290
-
291
- #if !defined(__aarch64__)
292
-
293
- // 64-bit compatibility
294
-
295
- // vaddvq_s16
296
- // vpaddq_s16
297
- // vpaddq_s32
298
- // vaddvq_s32
299
- // vaddvq_f32
300
- // vmaxvq_f32
301
- // vcvtnq_s32_f32
302
- // vzip1_u8
303
- // vzip2_u8
304
-
305
- inline static int32_t vaddvq_s16(int16x8_t v) {
306
- return
307
- (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
308
- (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
309
- (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
310
- (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
311
- }
312
-
313
- inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
314
- int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
315
- int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
316
- return vcombine_s16(a0, b0);
317
- }
318
-
319
- inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
320
- int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
321
- int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
322
- return vcombine_s32(a0, b0);
323
- }
324
-
325
- inline static int32_t vaddvq_s32(int32x4_t v) {
326
- return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
327
- }
328
-
329
- inline static float vaddvq_f32(float32x4_t v) {
330
- return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
331
- }
332
-
333
- inline static float vmaxvq_f32(float32x4_t v) {
334
- return
335
- MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
336
- MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
337
- }
338
-
339
- inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
340
- int32x4_t res;
341
-
342
- res[0] = roundf(vgetq_lane_f32(v, 0));
343
- res[1] = roundf(vgetq_lane_f32(v, 1));
344
- res[2] = roundf(vgetq_lane_f32(v, 2));
345
- res[3] = roundf(vgetq_lane_f32(v, 3));
346
-
347
- return res;
348
- }
349
-
350
- inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
351
- uint8x8_t res;
352
-
353
- res[0] = a[0]; res[1] = b[0];
354
- res[2] = a[1]; res[3] = b[1];
355
- res[4] = a[2]; res[5] = b[2];
356
- res[6] = a[3]; res[7] = b[3];
357
-
358
- return res;
359
- }
360
-
361
- inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
362
- uint8x8_t res;
363
-
364
- res[0] = a[4]; res[1] = b[4];
365
- res[2] = a[5]; res[3] = b[5];
366
- res[4] = a[6]; res[5] = b[6];
367
- res[6] = a[7]; res[7] = b[7];
368
-
369
- return res;
370
- }
371
-
372
- // vld1q_s16_x2
373
- // vld1q_u8_x2
374
- // vld1q_u8_x4
375
- // vld1q_s8_x2
376
- // vld1q_s8_x4
377
- // TODO: double-check these work correctly
378
-
379
- typedef struct ggml_int16x8x2_t {
380
- int16x8_t val[2];
381
- } ggml_int16x8x2_t;
382
-
383
- inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
384
- ggml_int16x8x2_t res;
385
-
386
- res.val[0] = vld1q_s16(ptr + 0);
387
- res.val[1] = vld1q_s16(ptr + 8);
388
-
389
- return res;
390
- }
391
-
392
- typedef struct ggml_uint8x16x2_t {
393
- uint8x16_t val[2];
394
- } ggml_uint8x16x2_t;
395
-
396
- inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
397
- ggml_uint8x16x2_t res;
398
-
399
- res.val[0] = vld1q_u8(ptr + 0);
400
- res.val[1] = vld1q_u8(ptr + 16);
401
-
402
- return res;
403
- }
404
-
405
- typedef struct ggml_uint8x16x4_t {
406
- uint8x16_t val[4];
407
- } ggml_uint8x16x4_t;
408
-
409
- inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
410
- ggml_uint8x16x4_t res;
411
-
412
- res.val[0] = vld1q_u8(ptr + 0);
413
- res.val[1] = vld1q_u8(ptr + 16);
414
- res.val[2] = vld1q_u8(ptr + 32);
415
- res.val[3] = vld1q_u8(ptr + 48);
416
-
417
- return res;
418
- }
419
-
420
- typedef struct ggml_int8x16x2_t {
421
- int8x16_t val[2];
422
- } ggml_int8x16x2_t;
423
-
424
- inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
425
- ggml_int8x16x2_t res;
426
-
427
- res.val[0] = vld1q_s8(ptr + 0);
428
- res.val[1] = vld1q_s8(ptr + 16);
429
-
430
- return res;
431
- }
432
-
433
- typedef struct ggml_int8x16x4_t {
434
- int8x16_t val[4];
435
- } ggml_int8x16x4_t;
436
-
437
- inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
438
- ggml_int8x16x4_t res;
439
-
440
- res.val[0] = vld1q_s8(ptr + 0);
441
- res.val[1] = vld1q_s8(ptr + 16);
442
- res.val[2] = vld1q_s8(ptr + 32);
443
- res.val[3] = vld1q_s8(ptr + 48);
444
-
445
- return res;
446
- }
447
-
448
- // NOTE: not tested
449
- inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) {
450
- int8x16_t res;
451
-
452
- res[ 0] = a[b[ 0]];
453
- res[ 1] = a[b[ 1]];
454
- res[ 2] = a[b[ 2]];
455
- res[ 3] = a[b[ 3]];
456
- res[ 4] = a[b[ 4]];
457
- res[ 5] = a[b[ 5]];
458
- res[ 6] = a[b[ 6]];
459
- res[ 7] = a[b[ 7]];
460
- res[ 8] = a[b[ 8]];
461
- res[ 9] = a[b[ 9]];
462
- res[10] = a[b[10]];
463
- res[11] = a[b[11]];
464
- res[12] = a[b[12]];
465
- res[13] = a[b[13]];
466
- res[14] = a[b[14]];
467
- res[15] = a[b[15]];
468
-
469
- return res;
470
- }
471
-
472
- // NOTE: not tested
473
- inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) {
474
- uint8x16_t res;
475
-
476
- res[ 0] = a[b[ 0]];
477
- res[ 1] = a[b[ 1]];
478
- res[ 2] = a[b[ 2]];
479
- res[ 3] = a[b[ 3]];
480
- res[ 4] = a[b[ 4]];
481
- res[ 5] = a[b[ 5]];
482
- res[ 6] = a[b[ 6]];
483
- res[ 7] = a[b[ 7]];
484
- res[ 8] = a[b[ 8]];
485
- res[ 9] = a[b[ 9]];
486
- res[10] = a[b[10]];
487
- res[11] = a[b[11]];
488
- res[12] = a[b[12]];
489
- res[13] = a[b[13]];
490
- res[14] = a[b[14]];
491
- res[15] = a[b[15]];
492
-
493
- return res;
494
- }
495
-
496
- #else
497
-
498
- #define ggml_int16x8x2_t int16x8x2_t
499
- #define ggml_uint8x16x2_t uint8x16x2_t
500
- #define ggml_uint8x16x4_t uint8x16x4_t
501
- #define ggml_int8x16x2_t int8x16x2_t
502
- #define ggml_int8x16x4_t int8x16x4_t
503
-
504
- #define ggml_vld1q_s16_x2 vld1q_s16_x2
505
- #define ggml_vld1q_u8_x2 vld1q_u8_x2
506
- #define ggml_vld1q_u8_x4 vld1q_u8_x4
507
- #define ggml_vld1q_s8_x2 vld1q_s8_x2
508
- #define ggml_vld1q_s8_x4 vld1q_s8_x4
509
- #define ggml_vqtbl1q_s8 vqtbl1q_s8
510
- #define ggml_vqtbl1q_u8 vqtbl1q_u8
511
-
512
- #endif
513
-
514
- #if !defined(__ARM_FEATURE_DOTPROD)
515
-
516
- inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
517
- const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
518
- const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
519
-
520
- return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
521
- }
522
-
523
- #else
524
-
525
- #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
526
-
527
- #endif
528
-
529
- #endif
530
-
531
238
  #if defined(__ARM_NEON) || defined(__wasm_simd128__)
532
239
  #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
533
240
  #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
@@ -12676,3 +12383,287 @@ void quantize_row_iq2_s(const float * restrict x, void * restrict vy, int64_t k)
12676
12383
  block_iq2_s * restrict y = vy;
12677
12384
  quantize_row_iq2_s_reference(x, y, k);
12678
12385
  }
12386
+
12387
+ static bool validate_float(float f, size_t i) {
12388
+ if (isinf(f)) {
12389
+ fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
12390
+ return false;
12391
+ }
12392
+
12393
+ if (isnan(f)) {
12394
+ fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
12395
+ return false;
12396
+ }
12397
+
12398
+ return true;
12399
+ }
12400
+
12401
+ static bool isinf_fp16(ggml_fp16_t f) {
12402
+ return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) == 0;
12403
+ }
12404
+
12405
+ static bool isnan_fp16(ggml_fp16_t f) {
12406
+ return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) != 0;
12407
+ }
12408
+
12409
+ static bool validate_fp16(ggml_fp16_t f, size_t i) {
12410
+ if (isinf_fp16(f)) {
12411
+ fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
12412
+ return false;
12413
+ }
12414
+
12415
+ if (isnan_fp16(f)) {
12416
+ fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
12417
+ return false;
12418
+ }
12419
+
12420
+ return true;
12421
+ }
12422
+
12423
+ #define VALIDATE_ROW_DATA_D_F16_IMPL(type, data, nb) \
12424
+ const type * q = (const type *) (data); \
12425
+ for (size_t i = 0; i < (nb); ++i) { \
12426
+ if (!validate_fp16(q[i].d, i)) { \
12427
+ return false; \
12428
+ } \
12429
+ }
12430
+
12431
+ #define VALIDATE_ROW_DATA_DM_F16_IMPL(type, data, nb, d, m) \
12432
+ const type * q = (const type *) (data); \
12433
+ for (size_t i = 0; i < (nb); ++i) { \
12434
+ if (!validate_fp16(q[i].d, i) || !validate_fp16(q[i].m, i)) { \
12435
+ return false; \
12436
+ } \
12437
+ }
12438
+
12439
+ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) {
12440
+ if (type < 0 || type >= GGML_TYPE_COUNT) {
12441
+ fprintf(stderr, "%s: invalid type %d\n", __func__, type);
12442
+ return false;
12443
+ }
12444
+
12445
+ if (nbytes % ggml_type_size(type) != 0) {
12446
+ fprintf(stderr, "%s: invalid size %zu for type %d\n", __func__, nbytes, type);
12447
+ return false;
12448
+ }
12449
+
12450
+ const size_t nb = nbytes/ggml_type_size(type);
12451
+
12452
+ switch (type) {
12453
+ case GGML_TYPE_F16:
12454
+ {
12455
+ const ggml_fp16_t * f = (const ggml_fp16_t *) data;
12456
+ size_t i = 0;
12457
+ #if defined(__AVX2__)
12458
+ for (; i + 15 < nb; i += 16) {
12459
+ __m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
12460
+ __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi16(0x7c00));
12461
+ __m256i cmp = _mm256_cmpeq_epi16(vexp, _mm256_set1_epi16(0x7c00));
12462
+ int mask = _mm256_movemask_epi8(cmp);
12463
+ if (mask) {
12464
+ for (size_t j = 0; j < 16; ++j) {
12465
+ if (!validate_fp16(f[i + j], i + j)) {
12466
+ return false;
12467
+ }
12468
+ }
12469
+ GGML_UNREACHABLE();
12470
+ }
12471
+ }
12472
+ #elif defined(__ARM_NEON)
12473
+ for (; i + 7 < nb; i += 8) {
12474
+ uint16x8_t v = vld1q_u16(f + i);
12475
+ uint16x8_t vexp = vandq_u16(v, vdupq_n_u16(0x7c00));
12476
+ uint16x8_t cmp = vceqq_u16(vexp, vdupq_n_u16(0x7c00));
12477
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(cmp, 4)), 0);
12478
+ if (mask) {
12479
+ for (size_t j = 0; j < 8; ++j) {
12480
+ if (!validate_fp16(f[i + j], i + j)) {
12481
+ return false;
12482
+ }
12483
+ }
12484
+ GGML_UNREACHABLE();
12485
+ }
12486
+ }
12487
+ #endif
12488
+ for (; i < nb; ++i) {
12489
+ if (!validate_fp16(f[i], i)) {
12490
+ return false;
12491
+ }
12492
+ }
12493
+ } break;
12494
+ case GGML_TYPE_F32:
12495
+ {
12496
+ const float * f = (const float *) data;
12497
+ size_t i = 0;
12498
+ #if defined(__AVX2__)
12499
+ for (; i + 7 < nb; i += 8) {
12500
+ __m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
12501
+ __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi32(0x7f800000));
12502
+ __m256i cmp = _mm256_cmpeq_epi32(vexp, _mm256_set1_epi32(0x7f800000));
12503
+ int mask = _mm256_movemask_epi8(cmp);
12504
+ if (mask) {
12505
+ for (size_t j = 0; j < 8; ++j) {
12506
+ if (!validate_float(f[i + j], i + j)) {
12507
+ return false;
12508
+ }
12509
+ }
12510
+ GGML_UNREACHABLE();
12511
+ }
12512
+ }
12513
+ #elif defined(__ARM_NEON)
12514
+ for (; i + 3 < nb; i += 4) {
12515
+ uint32x4_t v = vld1q_u32((const uint32_t *)f + i);
12516
+ uint32x4_t vexp = vandq_u32(v, vdupq_n_u32(0x7f800000));
12517
+ uint32x4_t cmp = vceqq_u32(vexp, vdupq_n_u32(0x7f800000));
12518
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(cmp, 8)), 0);
12519
+ if (mask) {
12520
+ for (size_t j = 0; j < 4; ++j) {
12521
+ if (!validate_float(f[i + j], i + j)) {
12522
+ return false;
12523
+ }
12524
+ }
12525
+ GGML_UNREACHABLE();
12526
+ }
12527
+ }
12528
+ #endif
12529
+ for (; i < nb; ++i) {
12530
+ if (!validate_float(f[i], i)) {
12531
+ return false;
12532
+ }
12533
+ }
12534
+ } break;
12535
+ case GGML_TYPE_F64:
12536
+ {
12537
+ const double * f = (const double *) data;
12538
+ for (size_t i = 0; i < nb; ++i) {
12539
+ if (!validate_float(f[i], i)) {
12540
+ return false;
12541
+ }
12542
+ }
12543
+ } break;
12544
+ case GGML_TYPE_Q4_0:
12545
+ {
12546
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_q4_0, data, nb);
12547
+ } break;
12548
+ case GGML_TYPE_Q4_1:
12549
+ {
12550
+ VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_1, data, nb, d, m);
12551
+ } break;
12552
+ case GGML_TYPE_Q5_0:
12553
+ {
12554
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_0, data, nb);
12555
+ } break;
12556
+ case GGML_TYPE_Q5_1:
12557
+ {
12558
+ VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_1, data, nb, d, m);
12559
+ } break;
12560
+ case GGML_TYPE_Q8_0:
12561
+ {
12562
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_q8_0, data, nb);
12563
+ } break;
12564
+ case GGML_TYPE_Q2_K:
12565
+ {
12566
+ VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin);
12567
+ } break;
12568
+ case GGML_TYPE_Q3_K:
12569
+ {
12570
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_q3_K, data, nb);
12571
+ } break;
12572
+ case GGML_TYPE_Q4_K:
12573
+ {
12574
+ #ifdef GGML_QKK_64
12575
+ VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d[0], d[1]);
12576
+ #else
12577
+ VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d, dmin);
12578
+ #endif
12579
+ } break;
12580
+ case GGML_TYPE_Q5_K:
12581
+ {
12582
+ #ifdef GGML_QKK_64
12583
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_K, data, nb);
12584
+ #else
12585
+ VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_K, data, nb, d, dmin);
12586
+ #endif
12587
+ } break;
12588
+ case GGML_TYPE_Q6_K:
12589
+ {
12590
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_q6_K, data, nb);
12591
+ } break;
12592
+ case GGML_TYPE_Q8_K:
12593
+ {
12594
+ const block_q8_K * q = (const block_q8_K *) data;
12595
+ for (size_t i = 0; i < nb; ++i) {
12596
+ if (!validate_float(q[i].d, i)) {
12597
+ return false;
12598
+ }
12599
+ }
12600
+ } break;
12601
+ case GGML_TYPE_IQ1_S:
12602
+ {
12603
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq1_s, data, nb);
12604
+ } break;
12605
+ case GGML_TYPE_IQ1_M:
12606
+ {
12607
+ const block_iq1_m * q = (const block_iq1_m *) data;
12608
+ for (size_t i = 0; i < nb; ++i) {
12609
+ #if QK_K == 64
12610
+ if (!validate_fp16(q[i].d, i)) {
12611
+ return false;
12612
+ }
12613
+ #else
12614
+ iq1m_scale_t scale;
12615
+ const uint16_t * sc = (const uint16_t *)q[i].scales;
12616
+ scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
12617
+ if (!validate_fp16(scale.f16, i)) {
12618
+ return false;
12619
+ }
12620
+ #endif
12621
+ }
12622
+ } break;
12623
+ case GGML_TYPE_IQ2_XXS:
12624
+ {
12625
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xxs, data, nb);
12626
+ } break;
12627
+ case GGML_TYPE_IQ2_XS:
12628
+ {
12629
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xs, data, nb);
12630
+ } break;
12631
+ case GGML_TYPE_IQ2_S:
12632
+ {
12633
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_s, data, nb);
12634
+ } break;
12635
+ case GGML_TYPE_IQ3_XXS:
12636
+ {
12637
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_xxs, data, nb);
12638
+ } break;
12639
+
12640
+ case GGML_TYPE_IQ3_S:
12641
+ {
12642
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_s, data, nb);
12643
+ } break;
12644
+ case GGML_TYPE_IQ4_XS:
12645
+ #if QK_K != 64
12646
+ {
12647
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_xs, data, nb);
12648
+ } break;
12649
+ #endif
12650
+ // with QK_K == 64, iq4_xs is iq4_nl
12651
+ case GGML_TYPE_IQ4_NL:
12652
+ {
12653
+ VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
12654
+ } break;
12655
+ case GGML_TYPE_I8:
12656
+ case GGML_TYPE_I16:
12657
+ case GGML_TYPE_I32:
12658
+ case GGML_TYPE_I64:
12659
+ // nothing to validate
12660
+ break;
12661
+ default:
12662
+ {
12663
+ fprintf(stderr, "%s: invalid type %d\n", __func__, type);
12664
+ return false;
12665
+ }
12666
+ }
12667
+
12668
+ return true;
12669
+ }
@@ -13416,11 +13416,16 @@ void print_device_detail(int id, sycl::device &device, std::string device_type)
13416
13416
  version += std::to_string(prop.get_minor_version());
13417
13417
 
13418
13418
  device_type = std::regex_replace(device_type, std::regex("ext_oneapi_"), "");
13419
+ std::string name = std::string(prop.get_name());
13420
+ name = std::regex_replace(name, std::regex("\\(R\\)"), "");
13421
+ name = std::regex_replace(name, std::regex("\\(TM\\)"), "");
13419
13422
 
13420
- fprintf(stderr, "|%2d|%18s|%45s|%10s|%11d|%8d|%7d|%15lu|\n", id, device_type.c_str(),
13421
- prop.get_name(), version.c_str(), prop.get_max_compute_units(),
13423
+ auto global_mem_size = prop.get_global_mem_size()/1000000;
13424
+
13425
+ fprintf(stderr, "|%2d|%19s|%39s|%7s|%7d|%8d|%5d|%6luM|%21s|\n", id, device_type.c_str(),
13426
+ name.c_str(), version.c_str(), prop.get_max_compute_units(),
13422
13427
  prop.get_max_work_group_size(), prop.get_max_sub_group_size(),
13423
- prop.get_global_mem_size());
13428
+ global_mem_size, device.get_info<sycl::info::device::driver_version>().c_str());
13424
13429
  }
13425
13430
 
13426
13431
  void ggml_backend_sycl_print_sycl_devices() {
@@ -13428,9 +13433,10 @@ void ggml_backend_sycl_print_sycl_devices() {
13428
13433
  int device_count = dpct::dev_mgr::instance().device_count();
13429
13434
  std::map<std::string, size_t> DeviceNums;
13430
13435
  fprintf(stderr, "found %d SYCL devices:\n", device_count);
13431
- fprintf(stderr, "| | | |Compute |Max compute|Max work|Max sub| |\n");
13432
- fprintf(stderr, "|ID| Device Type| Name|capability|units |group |group |Global mem size|\n");
13433
- fprintf(stderr, "|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------|\n");
13436
+ fprintf(stderr, "| | | | |Max | |Max |Global | |\n");
13437
+ fprintf(stderr, "| | | | |compute|Max work|sub |mem | |\n");
13438
+ fprintf(stderr, "|ID| Device Type| Name|Version|units |group |group|size | Driver version|\n");
13439
+ fprintf(stderr, "|--|-------------------|---------------------------------------|-------|-------|--------|-----|-------|---------------------|\n");
13434
13440
  for (int id = 0; id < device_count; ++id) {
13435
13441
  sycl::device device = dpct::dev_mgr::instance().get_device(id);
13436
13442
  sycl::backend backend = device.get_backend();
@@ -14738,7 +14744,12 @@ inline void ggml_sycl_op_soft_max(const ggml_tensor *src0,
14738
14744
  GGML_ASSERT(src0->type == GGML_TYPE_F32);
14739
14745
  GGML_ASSERT( dst->type == GGML_TYPE_F32);
14740
14746
 
14747
+ const ggml_tensor * src2 = dst->src[2];
14748
+
14749
+ #pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 and src2 support")
14750
+ #pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
14741
14751
  GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
14752
+ GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32); // src2 contains positions and it is optional
14742
14753
 
14743
14754
  const int64_t ne00 = src0->ne[0];
14744
14755
  const int64_t nrows_x = ggml_nrows(src0);
@@ -14754,7 +14765,6 @@ inline void ggml_sycl_op_soft_max(const ggml_tensor *src0,
14754
14765
  float * src2_dd = nullptr;
14755
14766
  sycl_pool_alloc<float> src2_f;
14756
14767
 
14757
- ggml_tensor * src2 = dst->src[2];
14758
14768
  const bool use_src2 = src2 != nullptr;
14759
14769
 
14760
14770
  if (use_src2) {
@@ -3178,6 +3178,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
3178
3178
  }
3179
3179
  return nullptr;
3180
3180
  case GGML_OP_SOFT_MAX:
3181
+ #pragma message("TODO: add ggml_vk_soft_max() F16 src1 and src2 support")
3182
+ #pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
3183
+ GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32);
3184
+ GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32);
3185
+
3181
3186
  if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && (src2 == nullptr || src2->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
3182
3187
  return ctx->device->pipeline_soft_max_f32;
3183
3188
  }