llama_cpp 0.15.4 → 0.16.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +16 -0
- data/ext/llama_cpp/extconf.rb +3 -2
- data/ext/llama_cpp/llama_cpp.cpp +17 -3
- data/lib/llama_cpp/version.rb +2 -2
- data/sig/llama_cpp.rbs +15 -1
- data/vendor/tmp/llama.cpp/Makefile +166 -82
- data/vendor/tmp/llama.cpp/ggml-alloc.c +82 -26
- data/vendor/tmp/llama.cpp/ggml-backend-impl.h +20 -8
- data/vendor/tmp/llama.cpp/ggml-backend.c +183 -69
- data/vendor/tmp/llama.cpp/ggml-backend.h +4 -4
- data/vendor/tmp/llama.cpp/ggml-blas.cpp +363 -0
- data/vendor/tmp/llama.cpp/ggml-blas.h +23 -0
- data/vendor/tmp/llama.cpp/ggml-common.h +6 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/acc.cu +47 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/arange.cu +34 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/argsort.cu +104 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/binbcast.cu +280 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/clamp.cu +34 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/concat.cu +196 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/convert.cu +686 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/cpy.cu +490 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/diagmask.cu +40 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/dmmv.cu +674 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/fattn-tile-f16.cu +319 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/fattn-tile-f32.cu +312 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/fattn.cu +345 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/getrows.cu +178 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/im2col.cu +104 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/mmq.cu +88 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/mmvq.cu +419 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/norm.cu +221 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/pad.cu +49 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/pool2d.cu +94 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/quantize.cu +112 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/rope.cu +271 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/scale.cu +31 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/softmax.cu +206 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/sumrows.cu +40 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu +10 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu +9 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu +10 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu +10 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu +8 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/tsembd.cu +47 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/unary.cu +286 -0
- data/vendor/tmp/llama.cpp/ggml-cuda/upscale.cu +51 -0
- data/vendor/tmp/llama.cpp/ggml-cuda.cu +103 -135
- data/vendor/tmp/llama.cpp/ggml-kompute.cpp +29 -13
- data/vendor/tmp/llama.cpp/ggml-metal.h +1 -1
- data/vendor/tmp/llama.cpp/ggml-metal.m +45 -33
- data/vendor/tmp/llama.cpp/ggml-metal.metal +83 -59
- data/vendor/tmp/llama.cpp/ggml-rpc.cpp +15 -14
- data/vendor/tmp/llama.cpp/ggml-sycl.cpp +26 -90
- data/vendor/tmp/llama.cpp/ggml-vulkan-shaders.hpp +74522 -14913
- data/vendor/tmp/llama.cpp/ggml-vulkan.cpp +631 -471
- data/vendor/tmp/llama.cpp/ggml.c +278 -603
- data/vendor/tmp/llama.cpp/ggml.h +9 -28
- data/vendor/tmp/llama.cpp/llama.cpp +345 -473
- data/vendor/tmp/llama.cpp/llama.h +21 -43
- metadata +134 -7
- data/vendor/tmp/llama.cpp/ggml-mpi.c +0 -216
- data/vendor/tmp/llama.cpp/ggml-mpi.h +0 -39
- data/vendor/tmp/llama.cpp/ggml-opencl.cpp +0 -2305
- data/vendor/tmp/llama.cpp/ggml-opencl.h +0 -36
@@ -0,0 +1,363 @@
|
|
1
|
+
#include "ggml-blas.h"
|
2
|
+
#include "ggml-backend-impl.h"
|
3
|
+
|
4
|
+
#include <future>
|
5
|
+
#include <vector>
|
6
|
+
|
7
|
+
#if defined(GGML_USE_ACCELERATE)
|
8
|
+
# include <Accelerate/Accelerate.h>
|
9
|
+
#elif defined(GGML_BLAS_USE_MKL)
|
10
|
+
# include <mkl.h>
|
11
|
+
#else
|
12
|
+
# include <cblas.h>
|
13
|
+
# ifdef BLIS_ENABLE_CBLAS
|
14
|
+
# include <blis.h>
|
15
|
+
# endif
|
16
|
+
#endif
|
17
|
+
|
18
|
+
struct ggml_backend_blas_context {
|
19
|
+
int n_threads = GGML_DEFAULT_N_THREADS;
|
20
|
+
std::unique_ptr<char[]> work_data;
|
21
|
+
size_t work_size = 0;
|
22
|
+
#ifndef GGML_USE_OPENMP
|
23
|
+
std::vector<std::future<void>> tasks;
|
24
|
+
#endif
|
25
|
+
};
|
26
|
+
|
27
|
+
// helper function to determine if it is better to use BLAS or not
|
28
|
+
// for large matrices, BLAS is faster
|
29
|
+
static bool ggml_backend_blas_use_blas(const struct ggml_tensor * dst) {
|
30
|
+
const struct ggml_tensor * src0 = dst->src[0];
|
31
|
+
const struct ggml_tensor * src1 = dst->src[1];
|
32
|
+
|
33
|
+
const int64_t ne10 = src1->ne[0];
|
34
|
+
|
35
|
+
const int64_t ne0 = dst->ne[0];
|
36
|
+
const int64_t ne1 = dst->ne[1];
|
37
|
+
|
38
|
+
// TODO: find the optimal values for these
|
39
|
+
if (ggml_is_contiguous(src0) &&
|
40
|
+
ggml_is_contiguous(src1) &&
|
41
|
+
src1->type == GGML_TYPE_F32 &&
|
42
|
+
(ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
|
43
|
+
|
44
|
+
/*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
|
45
|
+
return true;
|
46
|
+
}
|
47
|
+
|
48
|
+
return false;
|
49
|
+
}
|
50
|
+
|
51
|
+
static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) {
|
52
|
+
const struct ggml_tensor * src0 = dst->src[0];
|
53
|
+
const struct ggml_tensor * src1 = dst->src[1];
|
54
|
+
|
55
|
+
GGML_TENSOR_BINARY_OP_LOCALS
|
56
|
+
|
57
|
+
const enum ggml_type type = src0->type;
|
58
|
+
|
59
|
+
GGML_ASSERT(ne0 == ne01);
|
60
|
+
GGML_ASSERT(ne1 == ne11);
|
61
|
+
GGML_ASSERT(ne2 == ne12);
|
62
|
+
GGML_ASSERT(ne3 == ne13);
|
63
|
+
|
64
|
+
// we don't support permuted src0 or src1
|
65
|
+
GGML_ASSERT(nb00 == ggml_type_size(type));
|
66
|
+
GGML_ASSERT(nb10 == ggml_type_size(src1->type));
|
67
|
+
|
68
|
+
// dst cannot be transposed or permuted
|
69
|
+
GGML_ASSERT(nb0 == sizeof(float));
|
70
|
+
GGML_ASSERT(nb0 <= nb1);
|
71
|
+
GGML_ASSERT(nb1 <= nb2);
|
72
|
+
GGML_ASSERT(nb2 <= nb3);
|
73
|
+
|
74
|
+
// broadcast factors
|
75
|
+
const int64_t r2 = ne12/ne02;
|
76
|
+
const int64_t r3 = ne13/ne03;
|
77
|
+
|
78
|
+
const int64_t ne_plane = ne01*ne00;
|
79
|
+
const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float);
|
80
|
+
|
81
|
+
if (ctx->work_size < desired_wsize) {
|
82
|
+
ctx->work_data.reset(new char[desired_wsize]);
|
83
|
+
ctx->work_size = desired_wsize;
|
84
|
+
}
|
85
|
+
void * wdata = ctx->work_data.get();
|
86
|
+
|
87
|
+
// convert src0 to float
|
88
|
+
if (type != GGML_TYPE_F32) {
|
89
|
+
ggml_type_traits_t type_traits = ggml_internal_get_type_traits(type);
|
90
|
+
ggml_to_float_t const to_float = type_traits.to_float;
|
91
|
+
|
92
|
+
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
93
|
+
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
94
|
+
const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
|
95
|
+
float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane;
|
96
|
+
|
97
|
+
const int min_cols_per_thread = 4096;
|
98
|
+
const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1);
|
99
|
+
const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1);
|
100
|
+
|
101
|
+
#ifdef GGML_USE_OPENMP
|
102
|
+
#pragma omp parallel for num_threads(n_threads)
|
103
|
+
for (int64_t i01 = 0; i01 < ne01; i01++) {
|
104
|
+
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
105
|
+
}
|
106
|
+
#else
|
107
|
+
for (int i = 1; i < n_threads; i++) {
|
108
|
+
const int64_t start = i*ne01/n_threads;
|
109
|
+
const int64_t end = (i + 1)*ne01/n_threads;
|
110
|
+
if (start < end) {
|
111
|
+
ctx->tasks.push_back(std::async(std::launch::async, [=]() {
|
112
|
+
for (int64_t i01 = start; i01 < end; i01++) {
|
113
|
+
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
114
|
+
}
|
115
|
+
}));
|
116
|
+
}
|
117
|
+
}
|
118
|
+
{
|
119
|
+
// reuse the current thread for the first task
|
120
|
+
const int64_t start = 0;
|
121
|
+
const int64_t end = ne01/n_threads;
|
122
|
+
for (int64_t i01 = start; i01 < end; i01++) {
|
123
|
+
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
124
|
+
}
|
125
|
+
}
|
126
|
+
#endif
|
127
|
+
}
|
128
|
+
}
|
129
|
+
|
130
|
+
#ifndef GGML_USE_OPENMP
|
131
|
+
// wait for all tasks to finish
|
132
|
+
for (auto & task : ctx->tasks) {
|
133
|
+
task.get();
|
134
|
+
}
|
135
|
+
ctx->tasks.clear();
|
136
|
+
#endif
|
137
|
+
}
|
138
|
+
|
139
|
+
#if defined(OPENBLAS_VERSION)
|
140
|
+
openblas_set_num_threads(ctx->n_threads);
|
141
|
+
#endif
|
142
|
+
|
143
|
+
#if defined(BLIS_ENABLE_CBLAS)
|
144
|
+
bli_thread_set_num_threads(ctx->n_threads);
|
145
|
+
#endif
|
146
|
+
|
147
|
+
for (int64_t i13 = 0; i13 < ne13; i13++) {
|
148
|
+
for (int64_t i12 = 0; i12 < ne12; i12++) {
|
149
|
+
const int64_t i03 = i13/r3;
|
150
|
+
const int64_t i02 = i12/r2;
|
151
|
+
|
152
|
+
const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03);
|
153
|
+
const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
|
154
|
+
float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
|
155
|
+
|
156
|
+
if (type != GGML_TYPE_F32) {
|
157
|
+
x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane;
|
158
|
+
}
|
159
|
+
|
160
|
+
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
|
161
|
+
ne1, ne01, ne10,
|
162
|
+
1.0f, y, ne10,
|
163
|
+
x, ne00,
|
164
|
+
0.0f, d, ne01);
|
165
|
+
}
|
166
|
+
}
|
167
|
+
}
|
168
|
+
|
169
|
+
static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) {
|
170
|
+
const struct ggml_tensor * src0 = dst->src[0];
|
171
|
+
const struct ggml_tensor * src1 = dst->src[1];
|
172
|
+
|
173
|
+
GGML_TENSOR_BINARY_OP_LOCALS
|
174
|
+
|
175
|
+
GGML_ASSERT(ne0 == ne00);
|
176
|
+
GGML_ASSERT(ne1 == ne10);
|
177
|
+
GGML_ASSERT(ne2 == ne02);
|
178
|
+
GGML_ASSERT(ne02 == ne12);
|
179
|
+
GGML_ASSERT(ne3 == ne13);
|
180
|
+
GGML_ASSERT(ne03 == ne13);
|
181
|
+
|
182
|
+
// we don't support permuted src0 or src1
|
183
|
+
GGML_ASSERT(nb00 == sizeof(float));
|
184
|
+
|
185
|
+
// dst cannot be transposed or permuted
|
186
|
+
GGML_ASSERT(nb0 == sizeof(float));
|
187
|
+
// GGML_ASSERT(nb0 <= nb1);
|
188
|
+
// GGML_ASSERT(nb1 <= nb2);
|
189
|
+
// GGML_ASSERT(nb2 <= nb3);
|
190
|
+
|
191
|
+
// Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
|
192
|
+
// src0: (k,n)
|
193
|
+
// src1: (k,m)
|
194
|
+
// dst: (m,n)
|
195
|
+
//
|
196
|
+
// Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
|
197
|
+
// Also expressed as (major,minor)
|
198
|
+
// a: (m,k): so src1 transposed
|
199
|
+
// b: (k,n): so src0
|
200
|
+
// c: (m,n)
|
201
|
+
//
|
202
|
+
// However, if ggml_is_transposed(src1) is true, then
|
203
|
+
// src1->data already contains a transposed version, so sgemm mustn't
|
204
|
+
// transpose it further.
|
205
|
+
|
206
|
+
int n = src0->ne[0];
|
207
|
+
int k = src0->ne[1];
|
208
|
+
int m = src1->ne[0];
|
209
|
+
|
210
|
+
CBLAS_TRANSPOSE transposeA;
|
211
|
+
int lda;
|
212
|
+
|
213
|
+
if (!ggml_is_transposed(src1)) {
|
214
|
+
transposeA = CblasTrans;
|
215
|
+
lda = m;
|
216
|
+
} else {
|
217
|
+
transposeA = CblasNoTrans;
|
218
|
+
lda = k;
|
219
|
+
}
|
220
|
+
|
221
|
+
float * a = (float *) ((char *) src1->data);
|
222
|
+
float * b = (float *) ((char *) src0->data);
|
223
|
+
float * c = (float *) ((char *) dst->data);
|
224
|
+
|
225
|
+
cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
|
226
|
+
|
227
|
+
GGML_UNUSED(ctx);
|
228
|
+
}
|
229
|
+
|
230
|
+
// backend interface
|
231
|
+
|
232
|
+
GGML_CALL static const char * ggml_backend_blas_name(ggml_backend_t backend) {
|
233
|
+
return "BLAS";
|
234
|
+
|
235
|
+
GGML_UNUSED(backend);
|
236
|
+
}
|
237
|
+
|
238
|
+
GGML_CALL static void ggml_backend_blas_free(ggml_backend_t backend) {
|
239
|
+
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context;
|
240
|
+
delete ctx;
|
241
|
+
delete backend;
|
242
|
+
}
|
243
|
+
|
244
|
+
GGML_CALL static ggml_backend_buffer_type_t ggml_backend_blas_get_default_buffer_type(ggml_backend_t backend) {
|
245
|
+
return ggml_backend_cpu_buffer_type();
|
246
|
+
|
247
|
+
GGML_UNUSED(backend);
|
248
|
+
}
|
249
|
+
|
250
|
+
GGML_CALL static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
251
|
+
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context;
|
252
|
+
|
253
|
+
for (int i = 0; i < cgraph->n_nodes; i++) {
|
254
|
+
struct ggml_tensor * node = cgraph->nodes[i];
|
255
|
+
|
256
|
+
switch (node->op) {
|
257
|
+
case GGML_OP_MUL_MAT:
|
258
|
+
ggml_backend_blas_mul_mat(ctx, node);
|
259
|
+
break;
|
260
|
+
|
261
|
+
case GGML_OP_OUT_PROD:
|
262
|
+
ggml_backend_blas_out_prod(ctx, node);
|
263
|
+
break;
|
264
|
+
|
265
|
+
case GGML_OP_NONE:
|
266
|
+
case GGML_OP_RESHAPE:
|
267
|
+
case GGML_OP_VIEW:
|
268
|
+
case GGML_OP_PERMUTE:
|
269
|
+
case GGML_OP_TRANSPOSE:
|
270
|
+
break;
|
271
|
+
|
272
|
+
default:
|
273
|
+
fprintf(stderr, "%s: unsupported op %s\n", __func__, ggml_op_desc(node));
|
274
|
+
GGML_ASSERT(false);
|
275
|
+
}
|
276
|
+
}
|
277
|
+
|
278
|
+
return GGML_STATUS_SUCCESS;
|
279
|
+
|
280
|
+
GGML_UNUSED(backend);
|
281
|
+
}
|
282
|
+
|
283
|
+
GGML_CALL static bool ggml_backend_blas_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
284
|
+
const struct ggml_tensor * src0 = op->src[0];
|
285
|
+
const struct ggml_tensor * src1 = op->src[1];
|
286
|
+
|
287
|
+
return (op->op == GGML_OP_MUL_MAT && ggml_backend_blas_use_blas(op)) ||
|
288
|
+
(op->op == GGML_OP_OUT_PROD && op->src[0]->type == GGML_TYPE_F32 &&
|
289
|
+
op->src[1]->type == GGML_TYPE_F32 &&
|
290
|
+
ggml_is_matrix(src0) &&
|
291
|
+
ggml_is_matrix(src1) &&
|
292
|
+
ggml_is_contiguous(src0) &&
|
293
|
+
(ggml_is_contiguous(src1) || ggml_is_transposed(src1)));
|
294
|
+
|
295
|
+
GGML_UNUSED(backend);
|
296
|
+
}
|
297
|
+
|
298
|
+
GGML_CALL static bool ggml_backend_blas_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
|
299
|
+
return ggml_backend_buft_is_host(buft);
|
300
|
+
|
301
|
+
GGML_UNUSED(backend);
|
302
|
+
}
|
303
|
+
|
304
|
+
static struct ggml_backend_i blas_backend_i = {
|
305
|
+
/* .get_name = */ ggml_backend_blas_name,
|
306
|
+
/* .free = */ ggml_backend_blas_free,
|
307
|
+
/* .get_default_buffer_type = */ ggml_backend_blas_get_default_buffer_type,
|
308
|
+
/* .set_tensor_async = */ NULL,
|
309
|
+
/* .get_tensor_async = */ NULL,
|
310
|
+
/* .cpy_tensor_async = */ NULL,
|
311
|
+
/* .synchronize = */ NULL,
|
312
|
+
/* .graph_plan_create = */ NULL,
|
313
|
+
/* .graph_plan_free = */ NULL,
|
314
|
+
/* .graph_plan_update = */ NULL,
|
315
|
+
/* .graph_plan_compute = */ NULL,
|
316
|
+
/* .graph_compute = */ ggml_backend_blas_graph_compute,
|
317
|
+
/* .supports_op = */ ggml_backend_blas_supports_op,
|
318
|
+
/* .supports_buft = */ ggml_backend_blas_supports_buft,
|
319
|
+
/* .offload_op = */ NULL,
|
320
|
+
/* .event_new = */ NULL,
|
321
|
+
/* .event_free = */ NULL,
|
322
|
+
/* .event_record = */ NULL,
|
323
|
+
/* .event_wait = */ NULL,
|
324
|
+
/* .event_synchronize = */ NULL,
|
325
|
+
};
|
326
|
+
|
327
|
+
static ggml_guid_t ggml_backend_blas_guid(void) {
|
328
|
+
static ggml_guid guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d };
|
329
|
+
return &guid;
|
330
|
+
}
|
331
|
+
|
332
|
+
ggml_backend_t ggml_backend_blas_init(void) {
|
333
|
+
ggml_backend_blas_context * ctx = new ggml_backend_blas_context;
|
334
|
+
|
335
|
+
ggml_backend_t backend = new ggml_backend {
|
336
|
+
/* .guid = */ ggml_backend_blas_guid(),
|
337
|
+
/* .interface = */ blas_backend_i,
|
338
|
+
/* .context = */ ctx,
|
339
|
+
};
|
340
|
+
|
341
|
+
#if !defined(NDEBUG) && defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP)
|
342
|
+
if (openblas_get_parallel() != OPENBLAS_OPENMP) {
|
343
|
+
fprintf(stderr, "%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__);
|
344
|
+
}
|
345
|
+
#endif
|
346
|
+
|
347
|
+
#if !defined(NDEBUG) && defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP)
|
348
|
+
fprintf(stderr, "%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__);
|
349
|
+
#endif
|
350
|
+
|
351
|
+
return backend;
|
352
|
+
}
|
353
|
+
|
354
|
+
GGML_CALL bool ggml_backend_is_blas(ggml_backend_t backend) {
|
355
|
+
return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid());
|
356
|
+
}
|
357
|
+
|
358
|
+
void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) {
|
359
|
+
GGML_ASSERT(ggml_backend_is_blas(backend_blas));
|
360
|
+
|
361
|
+
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context;
|
362
|
+
ctx->n_threads = n_threads;
|
363
|
+
}
|
@@ -0,0 +1,23 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include "ggml.h"
|
4
|
+
#include "ggml-backend.h"
|
5
|
+
|
6
|
+
|
7
|
+
#ifdef __cplusplus
|
8
|
+
extern "C" {
|
9
|
+
#endif
|
10
|
+
|
11
|
+
// backend API
|
12
|
+
GGML_API GGML_CALL ggml_backend_t ggml_backend_blas_init(void);
|
13
|
+
|
14
|
+
GGML_API GGML_CALL bool ggml_backend_is_blas(ggml_backend_t backend);
|
15
|
+
|
16
|
+
// number of threads used for conversion to float
|
17
|
+
// for openblas and blis, this will also set the number of threads used for blas operations
|
18
|
+
GGML_API GGML_CALL void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads);
|
19
|
+
|
20
|
+
|
21
|
+
#ifdef __cplusplus
|
22
|
+
}
|
23
|
+
#endif
|
@@ -123,12 +123,18 @@ typedef sycl::half2 ggml_half2;
|
|
123
123
|
#define QI1_S (QK_K / (4*QR1_S))
|
124
124
|
#define QR1_S 8
|
125
125
|
|
126
|
+
#define QI1_M (QK_K / (4*QR1_M))
|
127
|
+
#define QR1_M 8
|
128
|
+
|
126
129
|
#define QI4_NL (QK4_NL / (4*QR4_NL))
|
127
130
|
#define QR4_NL 2
|
128
131
|
|
129
132
|
#define QI4_XS (QK_K / (4*QR4_XS))
|
130
133
|
#define QR4_XS 8
|
131
134
|
|
135
|
+
#define QI3_S (QK_K / (4*QR3_S))
|
136
|
+
#define QR3_S 8
|
137
|
+
|
132
138
|
#endif // GGML_COMMON_DECL_CUDA || GGML_COMMON_DECL_HIP
|
133
139
|
|
134
140
|
#define QK4_0 32
|
@@ -0,0 +1,47 @@
|
|
1
|
+
#include "acc.cuh"
|
2
|
+
|
3
|
+
static __global__ void acc_f32(const float * x, const float * y, float * dst, const int ne,
|
4
|
+
const int ne10, const int ne11, const int ne12,
|
5
|
+
const int nb1, const int nb2, int offset) {
|
6
|
+
const int i = blockDim.x * blockIdx.x + threadIdx.x;
|
7
|
+
if (i >= ne) {
|
8
|
+
return;
|
9
|
+
}
|
10
|
+
int src1_idx = i - offset;
|
11
|
+
int oz = src1_idx / nb2;
|
12
|
+
int oy = (src1_idx - (oz * nb2)) / nb1;
|
13
|
+
int ox = src1_idx % nb1;
|
14
|
+
if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) {
|
15
|
+
dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11];
|
16
|
+
} else {
|
17
|
+
dst[i] = x[i];
|
18
|
+
}
|
19
|
+
}
|
20
|
+
|
21
|
+
static void acc_f32_cuda(const float * x, const float * y, float * dst, const int n_elements,
|
22
|
+
const int ne10, const int ne11, const int ne12,
|
23
|
+
const int nb1, const int nb2, const int offset, cudaStream_t stream) {
|
24
|
+
int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE;
|
25
|
+
acc_f32<<<num_blocks, CUDA_ACC_BLOCK_SIZE, 0, stream>>>(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset);
|
26
|
+
}
|
27
|
+
|
28
|
+
void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
29
|
+
const ggml_tensor * src0 = dst->src[0];
|
30
|
+
const ggml_tensor * src1 = dst->src[1];
|
31
|
+
const float * src0_d = (const float *)src0->data;
|
32
|
+
const float * src1_d = (const float *)src1->data;
|
33
|
+
float * dst_d = (float *)dst->data;
|
34
|
+
cudaStream_t stream = ctx.stream();
|
35
|
+
|
36
|
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
37
|
+
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
38
|
+
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
39
|
+
GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported
|
40
|
+
|
41
|
+
int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
|
42
|
+
int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
|
43
|
+
// int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
|
44
|
+
int offset = dst->op_params[3] / 4; // offset in bytes
|
45
|
+
|
46
|
+
acc_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, stream);
|
47
|
+
}
|
@@ -0,0 +1,34 @@
|
|
1
|
+
#include "arange.cuh"
|
2
|
+
|
3
|
+
static __global__ void arange_f32(float * dst, const int ne0, const float start, const float step) {
|
4
|
+
// blockIDx.x: idx of ne0 / BLOCK_SIZE
|
5
|
+
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
|
6
|
+
if (nidx >= ne0) {
|
7
|
+
return;
|
8
|
+
}
|
9
|
+
dst[nidx] = start + step * nidx;
|
10
|
+
}
|
11
|
+
|
12
|
+
static void arange_f32_cuda(float * dst, const int ne0, const float start, const float step, cudaStream_t stream) {
|
13
|
+
int num_blocks = (ne0 + CUDA_ARANGE_BLOCK_SIZE - 1) / CUDA_ARANGE_BLOCK_SIZE;
|
14
|
+
arange_f32<<<num_blocks, CUDA_ARANGE_BLOCK_SIZE, 0, stream>>>(dst, ne0, start, step);
|
15
|
+
}
|
16
|
+
|
17
|
+
void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
18
|
+
float * dst_d = (float *)dst->data;
|
19
|
+
cudaStream_t stream = ctx.stream();
|
20
|
+
|
21
|
+
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
22
|
+
|
23
|
+
float start;
|
24
|
+
float stop;
|
25
|
+
float step;
|
26
|
+
memcpy(&start, (float *)dst->op_params + 0, sizeof(float));
|
27
|
+
memcpy(&stop, (float *)dst->op_params + 1, sizeof(float));
|
28
|
+
memcpy(&step, (float *)dst->op_params + 2, sizeof(float));
|
29
|
+
|
30
|
+
int64_t steps = (int64_t)ceil((stop - start) / step);
|
31
|
+
GGML_ASSERT(ggml_nelements(dst) == steps);
|
32
|
+
|
33
|
+
arange_f32_cuda(dst_d, dst->ne[0], start, step, stream);
|
34
|
+
}
|
@@ -0,0 +1,104 @@
|
|
1
|
+
#include "argsort.cuh"
|
2
|
+
|
3
|
+
template<typename T>
|
4
|
+
static inline __device__ void ggml_cuda_swap(T & a, T & b) {
|
5
|
+
T tmp = a;
|
6
|
+
a = b;
|
7
|
+
b = tmp;
|
8
|
+
}
|
9
|
+
|
10
|
+
template<ggml_sort_order order>
|
11
|
+
static __global__ void k_argsort_f32_i32(const float * x, int * dst, const int ncols, int ncols_pad) {
|
12
|
+
// bitonic sort
|
13
|
+
int col = threadIdx.x;
|
14
|
+
int row = blockIdx.y;
|
15
|
+
|
16
|
+
if (col >= ncols_pad) {
|
17
|
+
return;
|
18
|
+
}
|
19
|
+
|
20
|
+
const float * x_row = x + row * ncols;
|
21
|
+
extern __shared__ int dst_row[];
|
22
|
+
|
23
|
+
// initialize indices
|
24
|
+
dst_row[col] = col;
|
25
|
+
|
26
|
+
__syncthreads();
|
27
|
+
|
28
|
+
for (int k = 2; k <= ncols_pad; k *= 2) {
|
29
|
+
for (int j = k / 2; j > 0; j /= 2) {
|
30
|
+
int ixj = col ^ j;
|
31
|
+
if (ixj > col) {
|
32
|
+
if ((col & k) == 0) {
|
33
|
+
if (dst_row[col] >= ncols ||
|
34
|
+
(dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ?
|
35
|
+
x_row[dst_row[col]] > x_row[dst_row[ixj]] :
|
36
|
+
x_row[dst_row[col]] < x_row[dst_row[ixj]]))
|
37
|
+
) {
|
38
|
+
ggml_cuda_swap(dst_row[col], dst_row[ixj]);
|
39
|
+
}
|
40
|
+
} else {
|
41
|
+
if (dst_row[ixj] >= ncols ||
|
42
|
+
(dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ?
|
43
|
+
x_row[dst_row[col]] < x_row[dst_row[ixj]] :
|
44
|
+
x_row[dst_row[col]] > x_row[dst_row[ixj]]))
|
45
|
+
) {
|
46
|
+
ggml_cuda_swap(dst_row[col], dst_row[ixj]);
|
47
|
+
}
|
48
|
+
}
|
49
|
+
}
|
50
|
+
__syncthreads();
|
51
|
+
}
|
52
|
+
}
|
53
|
+
|
54
|
+
// copy the result to dst without the padding
|
55
|
+
if (col < ncols) {
|
56
|
+
dst[row * ncols + col] = dst_row[col];
|
57
|
+
}
|
58
|
+
}
|
59
|
+
|
60
|
+
static int next_power_of_2(int x) {
|
61
|
+
int n = 1;
|
62
|
+
while (n < x) {
|
63
|
+
n *= 2;
|
64
|
+
}
|
65
|
+
return n;
|
66
|
+
}
|
67
|
+
|
68
|
+
static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, const int nrows, ggml_sort_order order, cudaStream_t stream) {
|
69
|
+
// bitonic sort requires ncols to be power of 2
|
70
|
+
const int ncols_pad = next_power_of_2(ncols);
|
71
|
+
|
72
|
+
const dim3 block_dims(ncols_pad, 1, 1);
|
73
|
+
const dim3 block_nums(1, nrows, 1);
|
74
|
+
const size_t shared_mem = ncols_pad * sizeof(int);
|
75
|
+
|
76
|
+
// FIXME: this limit could be raised by ~2-4x on Ampere or newer
|
77
|
+
GGML_ASSERT(shared_mem <= ggml_cuda_info().devices[ggml_cuda_get_device()].smpb);
|
78
|
+
|
79
|
+
if (order == GGML_SORT_ORDER_ASC) {
|
80
|
+
k_argsort_f32_i32<GGML_SORT_ORDER_ASC><<<block_nums, block_dims, shared_mem, stream>>>(x, dst, ncols, ncols_pad);
|
81
|
+
} else if (order == GGML_SORT_ORDER_DESC) {
|
82
|
+
k_argsort_f32_i32<GGML_SORT_ORDER_DESC><<<block_nums, block_dims, shared_mem, stream>>>(x, dst, ncols, ncols_pad);
|
83
|
+
} else {
|
84
|
+
GGML_ASSERT(false);
|
85
|
+
}
|
86
|
+
}
|
87
|
+
|
88
|
+
void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
89
|
+
const ggml_tensor * src0 = dst->src[0];
|
90
|
+
const float * src0_d = (const float *)src0->data;
|
91
|
+
float * dst_d = (float *)dst->data;
|
92
|
+
cudaStream_t stream = ctx.stream();
|
93
|
+
|
94
|
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
95
|
+
GGML_ASSERT( dst->type == GGML_TYPE_I32);
|
96
|
+
GGML_ASSERT(ggml_is_contiguous(src0));
|
97
|
+
|
98
|
+
const int64_t ncols = src0->ne[0];
|
99
|
+
const int64_t nrows = ggml_nrows(src0);
|
100
|
+
|
101
|
+
enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
|
102
|
+
|
103
|
+
argsort_f32_i32_cuda(src0_d, (int *)dst_d, ncols, nrows, order, stream);
|
104
|
+
}
|