whispercpp 1.3.2 → 1.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +6 -3
- data/README.md +71 -14
- data/Rakefile +20 -7
- data/ext/.gitignore +4 -6
- data/ext/dependencies.rb +36 -24
- data/ext/extconf.rb +1 -1
- data/ext/options.rb +48 -184
- data/ext/ruby_whisper.c +18 -0
- data/ext/ruby_whisper_context.c +43 -12
- data/ext/ruby_whisper_model.c +1 -1
- data/ext/ruby_whisper_params.c +4 -2
- data/ext/ruby_whisper_segment.c +81 -4
- data/ext/ruby_whisper_transcribe.cpp +13 -7
- data/ext/ruby_whisper_vad_params.c +1 -1
- data/ext/sources/CMakeLists.txt +5 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/examples/addon.node/__test__/whisper.spec.js +120 -24
- data/ext/sources/examples/addon.node/addon.cpp +150 -31
- data/ext/sources/examples/addon.node/index.js +3 -0
- data/ext/sources/examples/addon.node/vad-example.js +132 -0
- data/ext/sources/examples/bench/bench.cpp +3 -2
- data/ext/sources/examples/cli/cli.cpp +3 -2
- data/ext/sources/examples/command/command.cpp +32 -8
- data/ext/sources/examples/common-whisper.cpp +14 -7
- data/ext/sources/examples/lsp/lsp.cpp +2 -0
- data/ext/sources/examples/quantize/quantize.cpp +3 -0
- data/ext/sources/examples/server/CMakeLists.txt +3 -0
- data/ext/sources/examples/server/server.cpp +169 -22
- data/ext/sources/examples/stream/stream.cpp +6 -0
- data/ext/sources/examples/talk-llama/CMakeLists.txt +4 -1
- data/ext/sources/examples/talk-llama/llama-arch.cpp +171 -3
- data/ext/sources/examples/talk-llama/llama-arch.h +28 -1
- data/ext/sources/examples/talk-llama/llama-batch.cpp +741 -272
- data/ext/sources/examples/talk-llama/llama-batch.h +112 -54
- data/ext/sources/examples/talk-llama/llama-chat.cpp +30 -8
- data/ext/sources/examples/talk-llama/llama-chat.h +1 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +520 -351
- data/ext/sources/examples/talk-llama/llama-context.h +38 -17
- data/ext/sources/examples/talk-llama/llama-cparams.cpp +1 -1
- data/ext/sources/examples/talk-llama/llama-cparams.h +1 -1
- data/ext/sources/examples/talk-llama/llama-graph.cpp +447 -372
- data/ext/sources/examples/talk-llama/llama-graph.h +128 -58
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +10 -2
- data/ext/sources/examples/talk-llama/llama-hparams.h +19 -2
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.cpp +279 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.h +128 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.cpp +1841 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.h +303 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +14 -472
- data/ext/sources/examples/talk-llama/llama-kv-cells.h +86 -26
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +246 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.h +138 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +1125 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +183 -0
- data/ext/sources/examples/talk-llama/llama-memory.cpp +58 -0
- data/ext/sources/examples/talk-llama/llama-memory.h +88 -4
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +1 -1
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +42 -17
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +1 -0
- data/ext/sources/examples/talk-llama/llama-model.cpp +1863 -563
- data/ext/sources/examples/talk-llama/llama-model.h +27 -0
- data/ext/sources/examples/talk-llama/llama-quant.cpp +89 -6
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +65 -28
- data/ext/sources/examples/talk-llama/llama-vocab.h +1 -0
- data/ext/sources/examples/talk-llama/llama.cpp +11 -7
- data/ext/sources/examples/talk-llama/llama.h +147 -40
- data/ext/sources/examples/talk-llama/talk-llama.cpp +2 -0
- data/ext/sources/examples/talk-llama/unicode.cpp +5 -0
- data/ext/sources/examples/vad-speech-segments/speech.cpp +6 -0
- data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +2 -0
- data/ext/sources/ggml/CMakeLists.txt +48 -3
- data/ext/sources/ggml/cmake/common.cmake +24 -0
- data/ext/sources/ggml/include/ggml-backend.h +1 -1
- data/ext/sources/ggml/include/ggml-cpu.h +2 -0
- data/ext/sources/ggml/include/ggml.h +144 -5
- data/ext/sources/ggml/src/CMakeLists.txt +82 -24
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +5 -0
- data/ext/sources/ggml/src/ggml-backend.cpp +46 -23
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +3 -3
- data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +1 -0
- data/ext/sources/ggml/src/ggml-cann/common.h +6 -1
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +33 -9
- data/ext/sources/ggml/src/ggml-common.h +4 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +133 -40
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +11 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +4114 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +2163 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +2639 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp +82 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/quants.c +2732 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +2069 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +397 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +1300 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/wasm/quants.c +1481 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +4311 -0
- data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-aarch64.cpp → arch/x86/repack.cpp} +79 -3225
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +184 -0
- data/ext/sources/ggml/src/ggml-cpu/common.h +4 -3
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +16 -7
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +146 -105
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +12 -8
- data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-hbm.cpp → hbm.cpp} +1 -1
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +58 -8
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +5 -0
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +1057 -174
- data/ext/sources/ggml/src/ggml-cpu/ops.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.c +1158 -0
- data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-quants.h → quants.h} +26 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +1571 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.h +98 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +330 -38
- data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-traits.cpp → traits.cpp} +1 -1
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +111 -18
- data/ext/sources/ggml/src/ggml-cpu/vec.h +303 -94
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +60 -37
- data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cu +161 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cu +91 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +22 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +2 -2
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +5 -2
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +4 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +265 -123
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +19 -0
- data/ext/sources/ggml/src/ggml-cuda/mean.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/mmv.cu +257 -87
- data/ext/sources/ggml/src/ggml-cuda/mmv.cuh +2 -3
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +6 -4
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +5 -18
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +0 -1
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +89 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +7 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +4 -0
- data/ext/sources/ggml/src/ggml-impl.h +127 -183
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +11 -10
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +27 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.m +331 -49
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +564 -282
- data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +2 -2
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +14 -0
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +1859 -489
- data/ext/sources/ggml/src/ggml-opencl/kernels/argsort.cl +86 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +109 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/div.cl +72 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/glu.cl +201 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/group_norm.cl +72 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/pad.cl +30 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +39 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sigmoid.cl +29 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sub.cl +72 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +39 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +63 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tsembd.cl +48 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/upscale.cl +121 -0
- data/ext/sources/ggml/src/ggml-quants.c +6 -8
- data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +18 -15
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +3 -3
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +5 -6
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +20 -48
- data/ext/sources/ggml/src/ggml-sycl/concat.cpp +28 -41
- data/ext/sources/ggml/src/ggml-sycl/conv.cpp +4 -10
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +117 -165
- data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +192 -53
- data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +32 -0
- data/ext/sources/ggml/src/ggml-sycl/dmmv.cpp +49 -67
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +31 -1
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +648 -1039
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +18 -9
- data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +3 -0
- data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +8 -105
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +238 -100
- data/ext/sources/ggml/src/ggml-sycl/gla.cpp +2 -2
- data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/mmq.cpp +60 -80
- data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +158 -203
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +55 -74
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +38 -10
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +138 -27
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +3 -1
- data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +3 -0
- data/ext/sources/ggml/src/ggml-sycl/tsembd.cpp +3 -8
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +108 -16
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +12 -16
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +36 -32
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +726 -282
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +4 -12
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +98 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +13 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp +15 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +9 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +12 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +9 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +10 -1
- data/ext/sources/ggml/src/ggml.c +328 -48
- data/ext/sources/ggml/src/ggml.cpp +26 -0
- data/ext/sources/ggml/src/gguf.cpp +24 -3
- data/ext/sources/include/whisper.h +2 -0
- data/ext/sources/src/CMakeLists.txt +2 -0
- data/ext/sources/src/coreml/whisper-compat.h +10 -0
- data/ext/sources/src/coreml/whisper-compat.m +35 -0
- data/ext/sources/src/coreml/whisper-decoder-impl.m +1 -0
- data/ext/sources/src/coreml/whisper-encoder-impl.m +1 -0
- data/ext/sources/src/whisper.cpp +218 -169
- data/extsources.rb +15 -9
- data/lib/whisper/context.rb +15 -0
- data/lib/whisper/model/uri.rb +56 -1
- data/lib/whisper/segment.rb +58 -0
- data/sig/whisper.rbs +68 -38
- data/{tests → test}/helper.rb +1 -12
- data/{tests → test}/test_model.rb +9 -0
- data/test/test_package.rb +51 -0
- data/test/test_segment.rb +146 -0
- data/{tests → test}/test_whisper.rb +70 -0
- data/whispercpp.gemspec +2 -3
- metadata +91 -43
- data/ext/sources/.dockerignore +0 -3
- data/ext/sources/.github/workflows/bindings-ruby.yml +0 -21
- data/ext/sources/ci/run.sh +0 -336
- data/ext/sources/close-issue.yml +0 -28
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +0 -2739
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +0 -8
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-quants.c +0 -13747
- data/tests/test_package.rb +0 -46
- data/tests/test_segment.rb +0 -74
- /data/ext/sources/ggml/src/ggml-cpu/{cpu-feats-x86.cpp → arch/x86/cpu-feats.cpp} +0 -0
- /data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-hbm.h → hbm.h} +0 -0
- /data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-traits.h → traits.h} +0 -0
- /data/{tests → test}/jfk_reader/.gitignore +0 -0
- /data/{tests → test}/jfk_reader/extconf.rb +0 -0
- /data/{tests → test}/jfk_reader/jfk_reader.c +0 -0
- /data/{tests → test}/test_callback.rb +0 -0
- /data/{tests → test}/test_error.rb +0 -0
- /data/{tests → test}/test_params.rb +0 -0
- /data/{tests → test}/test_vad.rb +0 -0
- /data/{tests → test}/test_vad_params.rb +0 -0
@@ -0,0 +1,1300 @@
|
|
1
|
+
#define GGML_COMMON_IMPL_C
|
2
|
+
#include "ggml-common.h"
|
3
|
+
#include "ggml-quants.h"
|
4
|
+
#include "ggml-impl.h"
|
5
|
+
#include "ggml-cpu.h"
|
6
|
+
#include "simd-mappings.h"
|
7
|
+
|
8
|
+
#include "../../quants.h"
|
9
|
+
#include "../../ggml-cpu-impl.h"
|
10
|
+
|
11
|
+
#include <math.h>
|
12
|
+
#include <string.h>
|
13
|
+
#include <assert.h>
|
14
|
+
#include <float.h>
|
15
|
+
#include <stdlib.h> // for qsort
|
16
|
+
#include <stdio.h> // for GGML_ASSERT
|
17
|
+
|
18
|
+
#define GROUP_MAX_EPS 1e-15f
|
19
|
+
#define GROUP_MAX_EPS_IQ3_XXS 1e-8f
|
20
|
+
#define GROUP_MAX_EPS_IQ2_S 1e-8f
|
21
|
+
#define GROUP_MAX_EPS_IQ1_M 1e-7f
|
22
|
+
#define GROUP_MAX_EPS_IQ1_S 1e-12f
|
23
|
+
|
24
|
+
#define UNUSED GGML_UNUSED
|
25
|
+
|
26
|
+
void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
27
|
+
assert(QK8_0 == 32);
|
28
|
+
assert(k % QK8_0 == 0);
|
29
|
+
const int nb = k / QK8_0;
|
30
|
+
|
31
|
+
block_q8_0 * GGML_RESTRICT y = vy;
|
32
|
+
|
33
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
34
|
+
for (int i = 0; i < nb; i++) {
|
35
|
+
__vector float srcv [8];
|
36
|
+
__vector float asrcv[8];
|
37
|
+
__vector float amaxv[8];
|
38
|
+
|
39
|
+
for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j);
|
40
|
+
for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]);
|
41
|
+
for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]);
|
42
|
+
for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]);
|
43
|
+
for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]);
|
44
|
+
|
45
|
+
const float amax = MAX(MAX(vec_extract(amaxv[0], 0),
|
46
|
+
vec_extract(amaxv[0], 1)),
|
47
|
+
MAX(vec_extract(amaxv[0], 2),
|
48
|
+
vec_extract(amaxv[0], 3)));
|
49
|
+
|
50
|
+
const float d = amax / ((1 << 7) - 1);
|
51
|
+
const float id = d ? 1.0f / d : 0.0f;
|
52
|
+
|
53
|
+
y[i].d = GGML_CPU_FP32_TO_FP16(d);
|
54
|
+
|
55
|
+
for (int j = 0; j < 8; j++) {
|
56
|
+
const __vector float v = vec_mul(srcv[j], vec_splats(id));
|
57
|
+
const __vector int32_t vi = vec_signed(v);
|
58
|
+
|
59
|
+
y[i].qs[4*j + 0] = vec_extract(vi, 0);
|
60
|
+
y[i].qs[4*j + 1] = vec_extract(vi, 1);
|
61
|
+
y[i].qs[4*j + 2] = vec_extract(vi, 2);
|
62
|
+
y[i].qs[4*j + 3] = vec_extract(vi, 3);
|
63
|
+
}
|
64
|
+
}
|
65
|
+
#else
|
66
|
+
GGML_UNUSED(nb);
|
67
|
+
// scalar
|
68
|
+
quantize_row_q8_0_ref(x, y, k);
|
69
|
+
#endif
|
70
|
+
}
|
71
|
+
|
72
|
+
void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
73
|
+
assert(k % QK8_1 == 0);
|
74
|
+
const int nb = k / QK8_1;
|
75
|
+
|
76
|
+
block_q8_1 * GGML_RESTRICT y = vy;
|
77
|
+
|
78
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
79
|
+
for (int i = 0; i < nb; i++) {
|
80
|
+
__vector float srcv [8];
|
81
|
+
__vector float asrcv[8];
|
82
|
+
__vector float amaxv[8];
|
83
|
+
|
84
|
+
for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j);
|
85
|
+
for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]);
|
86
|
+
for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]);
|
87
|
+
for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]);
|
88
|
+
for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]);
|
89
|
+
|
90
|
+
const float amax = MAX(MAX(vec_extract(amaxv[0], 0),
|
91
|
+
vec_extract(amaxv[0], 1)),
|
92
|
+
MAX(vec_extract(amaxv[0], 2),
|
93
|
+
vec_extract(amaxv[0], 3)));
|
94
|
+
|
95
|
+
const float d = amax / ((1 << 7) - 1);
|
96
|
+
const float id = d ? 1.0f / d : 0.0f;
|
97
|
+
|
98
|
+
y[i].d = GGML_CPU_FP32_TO_FP16(d);
|
99
|
+
|
100
|
+
__vector int32_t acc = vec_splats(0);
|
101
|
+
|
102
|
+
for (int j = 0; j < 8; j++) {
|
103
|
+
const __vector float v = vec_mul(srcv[j], vec_splats(id));
|
104
|
+
const __vector int32_t vi = vec_signed(v);
|
105
|
+
|
106
|
+
y[i].qs[4*j + 0] = vec_extract(vi, 0);
|
107
|
+
y[i].qs[4*j + 1] = vec_extract(vi, 1);
|
108
|
+
y[i].qs[4*j + 2] = vec_extract(vi, 2);
|
109
|
+
y[i].qs[4*j + 3] = vec_extract(vi, 3);
|
110
|
+
|
111
|
+
acc = vec_add(acc, vi);
|
112
|
+
}
|
113
|
+
|
114
|
+
y[i].s = GGML_CPU_FP32_TO_FP16(d * (acc[0] + acc[1] + acc[2] + acc[3]));
|
115
|
+
}
|
116
|
+
#else
|
117
|
+
GGML_UNUSED(nb);
|
118
|
+
// scalar
|
119
|
+
quantize_row_q8_1_ref(x, y, k);
|
120
|
+
#endif
|
121
|
+
}
|
122
|
+
|
123
|
+
|
124
|
+
//===================================== Dot products =================================
|
125
|
+
|
126
|
+
void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
127
|
+
const int qk = QK8_0;
|
128
|
+
const int nb = n / qk;
|
129
|
+
|
130
|
+
assert(n % qk == 0);
|
131
|
+
assert(nrc == 1);
|
132
|
+
UNUSED(nrc);
|
133
|
+
UNUSED(bx);
|
134
|
+
UNUSED(by);
|
135
|
+
UNUSED(bs);
|
136
|
+
|
137
|
+
const block_q4_0 * GGML_RESTRICT x = vx;
|
138
|
+
const block_q8_0 * GGML_RESTRICT y = vy;
|
139
|
+
|
140
|
+
int ib = 0;
|
141
|
+
float sumf = 0;
|
142
|
+
|
143
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
144
|
+
__vector float acc = vec_splats(0.0f);
|
145
|
+
|
146
|
+
const __vector uint8_t v_m = vec_splats((const uint8_t)0x0F);
|
147
|
+
const __vector int8_t v_s = vec_splats( (const int8_t)0x08);
|
148
|
+
|
149
|
+
for (; ib < nb; ++ib) {
|
150
|
+
const __vector uint8_t v_x = vec_xl(0, x[ib].qs);
|
151
|
+
const __vector int8_t v_xl = (const __vector int8_t)(v_x & v_m);
|
152
|
+
const __vector int8_t v_xh = (const __vector int8_t)(v_x >> 4);
|
153
|
+
|
154
|
+
const __vector int8_t v_xls = vec_sub(v_xl, v_s);
|
155
|
+
const __vector int8_t v_xhs = vec_sub(v_xh, v_s);
|
156
|
+
|
157
|
+
const __vector int8_t v_yl = vec_xl(0 , y[ib].qs);
|
158
|
+
const __vector int8_t v_yh = vec_xl(QK8_0/2, y[ib].qs);
|
159
|
+
|
160
|
+
const __vector int16_t v_xylso = vec_mulo(v_xls, v_yl);
|
161
|
+
const __vector int16_t v_xylse = vec_mule(v_xls, v_yl);
|
162
|
+
const __vector int16_t v_xyhso = vec_mulo(v_xhs, v_yh);
|
163
|
+
const __vector int16_t v_xyhse = vec_mule(v_xhs, v_yh);
|
164
|
+
|
165
|
+
__vector int16_t v_xy_ = v_xylso + v_xylse + v_xyhso + v_xyhse; v_xy_ += vec_reve(v_xy_);
|
166
|
+
|
167
|
+
const __vector float v_xy = vec_float(vec_unpackh(v_xy_));
|
168
|
+
const __vector float v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d));
|
169
|
+
|
170
|
+
acc = vec_madd(v_xy, v_d, acc);
|
171
|
+
}
|
172
|
+
|
173
|
+
sumf = acc[0] + acc[1] + acc[2] + acc[3];
|
174
|
+
|
175
|
+
#endif
|
176
|
+
for (; ib < nb; ++ib) {
|
177
|
+
int sumi0 = 0;
|
178
|
+
int sumi1 = 0;
|
179
|
+
|
180
|
+
for (int j = 0; j < qk/2; ++j) {
|
181
|
+
const int v0 = (x[ib].qs[j] & 0x0F) - 8;
|
182
|
+
const int v1 = (x[ib].qs[j] >> 4) - 8;
|
183
|
+
|
184
|
+
sumi0 += (v0 * y[ib].qs[j]);
|
185
|
+
sumi1 += (v1 * y[ib].qs[j + qk/2]);
|
186
|
+
}
|
187
|
+
|
188
|
+
int sumi = sumi0 + sumi1;
|
189
|
+
sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
|
190
|
+
}
|
191
|
+
|
192
|
+
*s = sumf;
|
193
|
+
}
|
194
|
+
|
195
|
+
void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
196
|
+
const int qk = QK8_1;
|
197
|
+
const int nb = n / qk;
|
198
|
+
|
199
|
+
assert(n % qk == 0);
|
200
|
+
assert(nrc == 1);
|
201
|
+
UNUSED(nrc);
|
202
|
+
UNUSED(bx);
|
203
|
+
UNUSED(by);
|
204
|
+
UNUSED(bs);
|
205
|
+
|
206
|
+
const block_q4_1 * GGML_RESTRICT x = vx;
|
207
|
+
const block_q8_1 * GGML_RESTRICT y = vy;
|
208
|
+
|
209
|
+
int ib = 0;
|
210
|
+
float sumf = 0;
|
211
|
+
|
212
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
213
|
+
float summs = 0;
|
214
|
+
float32x4_t acc = vec_splats(0.0f);
|
215
|
+
|
216
|
+
const uint8x16_t v_m = vec_splat_u8(0x0F);
|
217
|
+
|
218
|
+
#pragma GCC unroll 4
|
219
|
+
for (; ib < nb; ++ib) {
|
220
|
+
__builtin_prefetch(x[ib].qs, 0, 1);
|
221
|
+
__builtin_prefetch(y[ib].qs, 0, 1);
|
222
|
+
|
223
|
+
summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s);
|
224
|
+
|
225
|
+
const uint8x16_t v_x = vec_xl(0, x[ib].qs);
|
226
|
+
const int8x16_t v_xl = (const int8x16_t)(v_x & v_m);
|
227
|
+
const int8x16_t v_xh = (const int8x16_t)(v_x >> 4);
|
228
|
+
|
229
|
+
const int8x16_t v_yl = vec_xl(0 , y[ib].qs);
|
230
|
+
const int8x16_t v_yh = vec_xl(QK8_1/2, y[ib].qs);
|
231
|
+
|
232
|
+
const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh);
|
233
|
+
const float32x4_t v_xy = vec_float(v_xy_);
|
234
|
+
|
235
|
+
const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d));
|
236
|
+
|
237
|
+
acc = vec_madd(v_xy, v_d, acc);
|
238
|
+
}
|
239
|
+
|
240
|
+
sumf = acc[0] + acc[1] + acc[2] + acc[3] + summs;
|
241
|
+
|
242
|
+
#endif
|
243
|
+
for (; ib < nb; ++ib) {
|
244
|
+
int sumi0 = 0;
|
245
|
+
int sumi1 = 0;
|
246
|
+
|
247
|
+
for (int j = 0; j < qk/2; ++j) {
|
248
|
+
const int v0 = (x[ib].qs[j] & 0x0F);
|
249
|
+
const int v1 = (x[ib].qs[j] >> 4);
|
250
|
+
|
251
|
+
sumi0 += (v0 * y[ib].qs[j]);
|
252
|
+
sumi1 += (v1 * y[ib].qs[j + qk/2]);
|
253
|
+
}
|
254
|
+
|
255
|
+
int sumi = sumi0 + sumi1;
|
256
|
+
sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
|
257
|
+
}
|
258
|
+
|
259
|
+
*s = sumf;
|
260
|
+
}
|
261
|
+
|
262
|
+
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
263
|
+
const int qk = QK8_0;
|
264
|
+
const int nb = n / qk;
|
265
|
+
|
266
|
+
assert(n % qk == 0);
|
267
|
+
assert(nrc == 1);
|
268
|
+
UNUSED(nrc);
|
269
|
+
UNUSED(bx);
|
270
|
+
UNUSED(by);
|
271
|
+
UNUSED(bs);
|
272
|
+
|
273
|
+
const block_q8_0 * GGML_RESTRICT x = vx;
|
274
|
+
const block_q8_0 * GGML_RESTRICT y = vy;
|
275
|
+
|
276
|
+
int ib = 0;
|
277
|
+
float sumf = 0;
|
278
|
+
|
279
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
280
|
+
__vector float acc = vec_splats(0.0f);
|
281
|
+
|
282
|
+
#pragma GCC unroll 8
|
283
|
+
for (; ib < nb; ++ib) {
|
284
|
+
__builtin_prefetch(x[ib].qs, 0, 1);
|
285
|
+
__builtin_prefetch(y[ib].qs, 0, 1);
|
286
|
+
|
287
|
+
const int8x16_t v_xl = vec_xl(0 , x[ib].qs);
|
288
|
+
const int8x16_t v_xh = vec_xl(QK8_0/2, x[ib].qs);
|
289
|
+
const int8x16_t v_yl = vec_xl(0 , y[ib].qs);
|
290
|
+
const int8x16_t v_yh = vec_xl(QK8_0/2, y[ib].qs);
|
291
|
+
|
292
|
+
const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh);
|
293
|
+
const float32x4_t v_xy = vec_float(v_xy_);
|
294
|
+
const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d));
|
295
|
+
|
296
|
+
acc = vec_madd(v_xy, v_d, acc);
|
297
|
+
}
|
298
|
+
|
299
|
+
sumf = acc[0] + acc[1] + acc[2] + acc[3];
|
300
|
+
|
301
|
+
#endif
|
302
|
+
for (; ib < nb; ++ib) {
|
303
|
+
int sumi = 0;
|
304
|
+
|
305
|
+
for (int j = 0; j < qk; j++) {
|
306
|
+
sumi += x[ib].qs[j]*y[ib].qs[j];
|
307
|
+
}
|
308
|
+
|
309
|
+
sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
|
310
|
+
}
|
311
|
+
|
312
|
+
*s = sumf;
|
313
|
+
}
|
314
|
+
|
315
|
+
void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
316
|
+
assert(n % QK_K == 0);
|
317
|
+
assert(nrc == 1);
|
318
|
+
UNUSED(nrc);
|
319
|
+
UNUSED(bx);
|
320
|
+
UNUSED(by);
|
321
|
+
UNUSED(bs);
|
322
|
+
|
323
|
+
const uint32_t kmask1 = 0x03030303;
|
324
|
+
const uint32_t kmask2 = 0x0f0f0f0f;
|
325
|
+
|
326
|
+
const block_q3_K * GGML_RESTRICT x = vx;
|
327
|
+
const block_q8_K * GGML_RESTRICT y = vy;
|
328
|
+
|
329
|
+
const int nb = n / QK_K;
|
330
|
+
|
331
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
332
|
+
uint32_t aux[3];
|
333
|
+
uint32_t utmp[4];
|
334
|
+
|
335
|
+
const int32x4_t v_z = vec_splat_s32(0);
|
336
|
+
const uint8x16_t v_3m = vec_splat_u8(0x03);
|
337
|
+
|
338
|
+
const uint8x16_t v_0c = vec_splat_u8(1);
|
339
|
+
const uint8x16_t v_1c = vec_sl(v_0c, 1);
|
340
|
+
const uint8x16_t v_2c = vec_sl(v_0c, 2);
|
341
|
+
const uint8x16_t v_3c = vec_sl(v_0c, 3);
|
342
|
+
|
343
|
+
uint8x16_t q3h[4];
|
344
|
+
uint8x16_t q3b[2];
|
345
|
+
int8x16_t q3bytes[4];
|
346
|
+
int8x16_t q8bytes[4];
|
347
|
+
uint8x16_t qhbits[2];
|
348
|
+
|
349
|
+
float sum = 0;
|
350
|
+
|
351
|
+
for (int i = 0; i < nb; ++i) {
|
352
|
+
const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
|
353
|
+
|
354
|
+
const uint8_t * restrict x0l = x[i].qs;
|
355
|
+
const uint8_t * restrict x0h = x[i].hmask;
|
356
|
+
const int8_t * restrict y0 = y[i].qs;
|
357
|
+
|
358
|
+
qhbits[0] = vec_xl(0 , x0h);
|
359
|
+
qhbits[1] = vec_xl(16, x0h);
|
360
|
+
|
361
|
+
int32_t isum = 0;
|
362
|
+
|
363
|
+
memcpy(aux, x[i].scales, 12);
|
364
|
+
utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
|
365
|
+
utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
|
366
|
+
utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
|
367
|
+
utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
|
368
|
+
|
369
|
+
int8_t * scale = (int8_t *)utmp;
|
370
|
+
for (int j = 0; j < 16; ++j) scale[j] -= 32;
|
371
|
+
|
372
|
+
for (int j = 0; j < QK_K/128; ++j) {
|
373
|
+
int32x4_t isum0, isum1, isum2, isum3;
|
374
|
+
|
375
|
+
q3b[0] = vec_xl(0 , x0l);
|
376
|
+
q3b[1] = vec_xl(16, x0l);
|
377
|
+
x0l += 32;
|
378
|
+
|
379
|
+
q8bytes[0] = vec_xl(0 , y0);
|
380
|
+
q8bytes[1] = vec_xl(16 , y0);
|
381
|
+
q8bytes[2] = vec_xl(32 , y0);
|
382
|
+
q8bytes[3] = vec_xl(48 , y0);
|
383
|
+
q8bytes[4] = vec_xl(64 , y0);
|
384
|
+
q8bytes[5] = vec_xl(80 , y0);
|
385
|
+
q8bytes[6] = vec_xl(96 , y0);
|
386
|
+
q8bytes[7] = vec_xl(112, y0);
|
387
|
+
y0 += 128;
|
388
|
+
|
389
|
+
q3h[0] = vec_sl(vec_andc(v_0c, qhbits[0]), 2);
|
390
|
+
q3h[1] = vec_sl(vec_andc(v_0c, qhbits[1]), 2);
|
391
|
+
q3h[2] = vec_sl(vec_andc(v_1c, qhbits[0]), 1);
|
392
|
+
q3h[3] = vec_sl(vec_andc(v_1c, qhbits[1]), 1);
|
393
|
+
|
394
|
+
q3bytes[0] = vec_sub((int8x16_t)vec_and(q3b[0], v_3m), (int8x16_t)q3h[0]);
|
395
|
+
q3bytes[1] = vec_sub((int8x16_t)vec_and(q3b[1], v_3m), (int8x16_t)q3h[1]);
|
396
|
+
q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 2), v_3m), (int8x16_t)q3h[2]);
|
397
|
+
q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 2), v_3m), (int8x16_t)q3h[3]);
|
398
|
+
|
399
|
+
isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[0]);
|
400
|
+
isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[1]);
|
401
|
+
isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[2]);
|
402
|
+
isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[3]);
|
403
|
+
|
404
|
+
isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0];
|
405
|
+
isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1];
|
406
|
+
isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2];
|
407
|
+
isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3];
|
408
|
+
|
409
|
+
scale += 4;
|
410
|
+
|
411
|
+
q3h[0] = vec_andc(v_2c, qhbits[0]);
|
412
|
+
q3h[1] = vec_andc(v_2c, qhbits[1]);
|
413
|
+
q3h[2] = vec_sr(vec_andc(v_3c, qhbits[0]), 1);
|
414
|
+
q3h[3] = vec_sr(vec_andc(v_3c, qhbits[1]), 1);
|
415
|
+
|
416
|
+
q3bytes[0] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 4), v_3m), (int8x16_t)q3h[0]);
|
417
|
+
q3bytes[1] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 4), v_3m), (int8x16_t)q3h[1]);
|
418
|
+
q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 6), v_3m), (int8x16_t)q3h[2]);
|
419
|
+
q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 6), v_3m), (int8x16_t)q3h[3]);
|
420
|
+
|
421
|
+
isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[4]);
|
422
|
+
isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[5]);
|
423
|
+
isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[6]);
|
424
|
+
isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[7]);
|
425
|
+
|
426
|
+
isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0];
|
427
|
+
isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1];
|
428
|
+
isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2];
|
429
|
+
isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3];
|
430
|
+
|
431
|
+
scale += 4;
|
432
|
+
|
433
|
+
if (j == 0) {
|
434
|
+
qhbits[0] = vec_sr(qhbits[0], 4);
|
435
|
+
qhbits[1] = vec_sr(qhbits[1], 4);
|
436
|
+
}
|
437
|
+
}
|
438
|
+
|
439
|
+
sum += d * isum;
|
440
|
+
}
|
441
|
+
|
442
|
+
*s = sum;
|
443
|
+
|
444
|
+
#else
|
445
|
+
// scalar version
|
446
|
+
// This function is written like this so the compiler can manage to vectorize most of it
|
447
|
+
// Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
|
448
|
+
// manually vectorized version above. Every other version I tried would run at least 4 times slower.
|
449
|
+
// The ideal situation would be if we could just write the code once, and the compiler would
|
450
|
+
// automatically produce the best possible set of machine instructions, instead of us having to manually
|
451
|
+
// write vectorized versions for AVX, ARM_NEON, etc.
|
452
|
+
|
453
|
+
int8_t aux8[QK_K];
|
454
|
+
int16_t aux16[8];
|
455
|
+
float sums [8];
|
456
|
+
int32_t aux32[8];
|
457
|
+
memset(sums, 0, 8*sizeof(float));
|
458
|
+
|
459
|
+
uint32_t auxs[4];
|
460
|
+
const int8_t * scales = (const int8_t*)auxs;
|
461
|
+
|
462
|
+
float sumf = 0;
|
463
|
+
for (int i = 0; i < nb; ++i) {
|
464
|
+
const uint8_t * GGML_RESTRICT q3 = x[i].qs;
|
465
|
+
const uint8_t * GGML_RESTRICT hm = x[i].hmask;
|
466
|
+
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
467
|
+
memset(aux32, 0, 8*sizeof(int32_t));
|
468
|
+
int8_t * GGML_RESTRICT a = aux8;
|
469
|
+
uint8_t m = 1;
|
470
|
+
for (int j = 0; j < QK_K; j += 128) {
|
471
|
+
for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
|
472
|
+
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
473
|
+
a += 32; m <<= 1;
|
474
|
+
for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
|
475
|
+
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
476
|
+
a += 32; m <<= 1;
|
477
|
+
for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
|
478
|
+
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
479
|
+
a += 32; m <<= 1;
|
480
|
+
for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
|
481
|
+
for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
|
482
|
+
a += 32; m <<= 1;
|
483
|
+
q3 += 32;
|
484
|
+
}
|
485
|
+
a = aux8;
|
486
|
+
|
487
|
+
memcpy(auxs, x[i].scales, 12);
|
488
|
+
uint32_t tmp = auxs[2];
|
489
|
+
auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
|
490
|
+
auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
|
491
|
+
auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
|
492
|
+
auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
|
493
|
+
for (int j = 0; j < QK_K/16; ++j) {
|
494
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
495
|
+
for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
|
496
|
+
q8 += 8; a += 8;
|
497
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
498
|
+
for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
|
499
|
+
q8 += 8; a += 8;
|
500
|
+
}
|
501
|
+
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
502
|
+
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
503
|
+
}
|
504
|
+
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
505
|
+
*s = sumf;
|
506
|
+
|
507
|
+
#endif
|
508
|
+
|
509
|
+
}
|
510
|
+
|
511
|
+
void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
512
|
+
assert(n % QK_K == 0);
|
513
|
+
assert(nrc == 1);
|
514
|
+
UNUSED(nrc);
|
515
|
+
UNUSED(bx);
|
516
|
+
UNUSED(by);
|
517
|
+
UNUSED(bs);
|
518
|
+
|
519
|
+
const block_q4_K * GGML_RESTRICT x = vx;
|
520
|
+
const block_q8_K * GGML_RESTRICT y = vy;
|
521
|
+
|
522
|
+
const int nb = n / QK_K;
|
523
|
+
|
524
|
+
static const uint32_t kmask1 = 0x3f3f3f3f;
|
525
|
+
static const uint32_t kmask2 = 0x0f0f0f0f;
|
526
|
+
static const uint32_t kmask3 = 0x03030303;
|
527
|
+
|
528
|
+
uint32_t utmp[4];
|
529
|
+
|
530
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
531
|
+
const uint8x16_t v_lm = vec_splat_u8(0x0F);
|
532
|
+
const int32x4_t v_z = vec_splat_s32(0);
|
533
|
+
|
534
|
+
uint8x16_t v_x[2];
|
535
|
+
int8x16_t v_xl[2];
|
536
|
+
int8x16_t v_y[2];
|
537
|
+
|
538
|
+
float sumf = 0;
|
539
|
+
|
540
|
+
for (int i = 0; i < nb; ++i) {
|
541
|
+
const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
|
542
|
+
const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
|
543
|
+
|
544
|
+
const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums);
|
545
|
+
const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums);
|
546
|
+
const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh);
|
547
|
+
|
548
|
+
memcpy(utmp, x[i].scales, 12);
|
549
|
+
|
550
|
+
uint32x4_t v_mins8 = { 0 };
|
551
|
+
v_mins8 = vec_insert(utmp[1] & kmask1, v_mins8, 0);
|
552
|
+
v_mins8 = vec_insert(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), v_mins8, 1);
|
553
|
+
|
554
|
+
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
555
|
+
utmp[0] &= kmask1;
|
556
|
+
|
557
|
+
const int16x8_t v_minsh = (int16x8_t)vec_unpackh((uint8x16_t)v_mins8);
|
558
|
+
|
559
|
+
const int32x4_t v_minso = vec_mulo(v_ysums, v_minsh);
|
560
|
+
const int32x4_t v_minse = vec_mule(v_ysums, v_minsh);
|
561
|
+
const int32x4_t v_mins = v_minso + v_minse;
|
562
|
+
sumf -= dmin * (v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]);
|
563
|
+
|
564
|
+
const uint8_t * scales = (const uint8_t *)utmp;
|
565
|
+
const uint8_t * GGML_RESTRICT x0 = x[i].qs;
|
566
|
+
const int8_t * GGML_RESTRICT y0 = y[i].qs;
|
567
|
+
|
568
|
+
int32_t sumi1 = 0;
|
569
|
+
int32_t sumi2 = 0;
|
570
|
+
|
571
|
+
for (int j = 0; j < QK_K/64; ++j) {
|
572
|
+
v_x[0] = vec_xl(0 , x0);
|
573
|
+
v_x[1] = vec_xl(16, x0);
|
574
|
+
x0 += 32;
|
575
|
+
|
576
|
+
v_y[0] = vec_xl(0 , y0);
|
577
|
+
v_y[1] = vec_xl(16, y0);
|
578
|
+
y0 += 32;
|
579
|
+
|
580
|
+
v_xl[0] = (int8x16_t)vec_and(v_x[0], v_lm);
|
581
|
+
v_xl[1] = (int8x16_t)vec_and(v_x[1], v_lm);
|
582
|
+
|
583
|
+
const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]);
|
584
|
+
sumi1 += (p1[0] + p1[1] + p1[2] + p1[3]) * scales[2*j+0];
|
585
|
+
|
586
|
+
v_y[0] = vec_xl(0 , y0);
|
587
|
+
v_y[1] = vec_xl(16, y0);
|
588
|
+
y0 += 32;
|
589
|
+
|
590
|
+
v_xl[0] = (int8x16_t)vec_sr(v_x[0], 4);
|
591
|
+
v_xl[1] = (int8x16_t)vec_sr(v_x[1], 4);
|
592
|
+
|
593
|
+
const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]);
|
594
|
+
sumi2 += (p2[0] + p2[1] + p2[2] + p2[3]) * scales[2*j+1];
|
595
|
+
}
|
596
|
+
|
597
|
+
sumf += d * (sumi1 + sumi2);
|
598
|
+
}
|
599
|
+
|
600
|
+
*s = sumf;
|
601
|
+
|
602
|
+
#else
|
603
|
+
|
604
|
+
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
605
|
+
const uint8_t * mins = (const uint8_t*)&utmp[2];
|
606
|
+
|
607
|
+
int8_t aux8[QK_K];
|
608
|
+
int16_t aux16[8];
|
609
|
+
float sums [8];
|
610
|
+
int32_t aux32[8];
|
611
|
+
memset(sums, 0, 8*sizeof(float));
|
612
|
+
|
613
|
+
float sumf = 0;
|
614
|
+
for (int i = 0; i < nb; ++i) {
|
615
|
+
const uint8_t * GGML_RESTRICT q4 = x[i].qs;
|
616
|
+
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
617
|
+
memset(aux32, 0, 8*sizeof(int32_t));
|
618
|
+
int8_t * GGML_RESTRICT a = aux8;
|
619
|
+
for (int j = 0; j < QK_K/64; ++j) {
|
620
|
+
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
|
621
|
+
a += 32;
|
622
|
+
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
|
623
|
+
a += 32; q4 += 32;
|
624
|
+
}
|
625
|
+
memcpy(utmp, x[i].scales, 12);
|
626
|
+
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
627
|
+
const uint32_t uaux = utmp[1] & kmask1;
|
628
|
+
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
629
|
+
utmp[2] = uaux;
|
630
|
+
utmp[0] &= kmask1;
|
631
|
+
|
632
|
+
int sumi = 0;
|
633
|
+
for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
|
634
|
+
a = aux8;
|
635
|
+
int is = 0;
|
636
|
+
for (int j = 0; j < QK_K/32; ++j) {
|
637
|
+
int32_t scale = scales[is++];
|
638
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
639
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
640
|
+
q8 += 8; a += 8;
|
641
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
642
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
643
|
+
q8 += 8; a += 8;
|
644
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
645
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
646
|
+
q8 += 8; a += 8;
|
647
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
648
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
649
|
+
q8 += 8; a += 8;
|
650
|
+
}
|
651
|
+
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
652
|
+
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
653
|
+
const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
|
654
|
+
sumf -= dmin * sumi;
|
655
|
+
}
|
656
|
+
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
657
|
+
*s = sumf;
|
658
|
+
#endif
|
659
|
+
}
|
660
|
+
|
661
|
+
void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
662
|
+
assert(n % QK_K == 0);
|
663
|
+
assert(nrc == 1);
|
664
|
+
UNUSED(nrc);
|
665
|
+
UNUSED(bx);
|
666
|
+
UNUSED(by);
|
667
|
+
UNUSED(bs);
|
668
|
+
|
669
|
+
const block_q5_K * GGML_RESTRICT x = vx;
|
670
|
+
const block_q8_K * GGML_RESTRICT y = vy;
|
671
|
+
|
672
|
+
const int nb = n / QK_K;
|
673
|
+
|
674
|
+
static const uint32_t kmask1 = 0x3f3f3f3f;
|
675
|
+
static const uint32_t kmask2 = 0x0f0f0f0f;
|
676
|
+
static const uint32_t kmask3 = 0x03030303;
|
677
|
+
|
678
|
+
uint32_t utmp[4];
|
679
|
+
|
680
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
681
|
+
const uint8x16_t v_lm = vec_splat_u8(0x0F);
|
682
|
+
const uint8x16_t v_1m = vec_splat_u8(0x01);
|
683
|
+
const uint8x16_t v_2m = vec_splat_u8(0x02);
|
684
|
+
|
685
|
+
const int32x4_t v_z = vec_splat_s32(0);
|
686
|
+
|
687
|
+
const uchar8x16_t v_minsm = {
|
688
|
+
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
689
|
+
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
|
690
|
+
};
|
691
|
+
|
692
|
+
int8x16_t q5b[4];
|
693
|
+
uint8x16_t q5h[4];
|
694
|
+
|
695
|
+
uint8x16_t v_xl[2];
|
696
|
+
uint8x16_t v_xh[2];
|
697
|
+
int8x16_t v_y[4];
|
698
|
+
|
699
|
+
float sumf = 0;
|
700
|
+
|
701
|
+
for (int i = 0; i < nb; ++i) {
|
702
|
+
const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
|
703
|
+
const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
|
704
|
+
|
705
|
+
const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums);
|
706
|
+
const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums);
|
707
|
+
const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh);
|
708
|
+
|
709
|
+
memcpy(utmp, x[i].scales, 12);
|
710
|
+
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
711
|
+
const uint32_t uaux = utmp[1] & kmask1;
|
712
|
+
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
713
|
+
utmp[2] = uaux;
|
714
|
+
utmp[0] &= kmask1;
|
715
|
+
|
716
|
+
const uint8x16_t v_mins16 = vec_xl(0, (const uint8_t *)utmp);
|
717
|
+
const uint8x16_t v_mins8 = vec_perm(v_mins16, v_mins16, v_minsm);
|
718
|
+
const int16x8_t v_minsh = (int16x8_t)vec_unpackh(v_mins8);
|
719
|
+
|
720
|
+
const int32x4_t v_minsho = vec_mulo(v_ysums, v_minsh);
|
721
|
+
const int32x4_t v_minshe = vec_mule(v_ysums, v_minsh);
|
722
|
+
const int32x4_t v_mins = vec_add(v_minsho, v_minshe);
|
723
|
+
const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3];
|
724
|
+
|
725
|
+
const uint8_t * scales = (const uint8_t *)utmp;
|
726
|
+
const uint8_t * GGML_RESTRICT x0l = x[i].qs;
|
727
|
+
const uint8_t * GGML_RESTRICT x0h = x[i].qh;
|
728
|
+
const int8_t * GGML_RESTRICT y0 = y[i].qs;
|
729
|
+
|
730
|
+
v_xh[0] = vec_xl(0 , x0h);
|
731
|
+
v_xh[1] = vec_xl(16, x0h);
|
732
|
+
|
733
|
+
int32_t sumi = 0;
|
734
|
+
for (int j = 0; j < QK_K/64; ++j) {
|
735
|
+
v_xl[0] = vec_xl(0 , x0l);
|
736
|
+
v_xl[1] = vec_xl(16, x0l);
|
737
|
+
x0l += 32;
|
738
|
+
|
739
|
+
v_y[0] = vec_xl(0 , y0);
|
740
|
+
v_y[1] = vec_xl(16, y0);
|
741
|
+
v_y[2] = vec_xl(32, y0);
|
742
|
+
v_y[3] = vec_xl(48, y0);
|
743
|
+
y0 += 64;
|
744
|
+
|
745
|
+
q5h[0] = vec_sl(vec_and(v_1m, v_xh[0]), 4);
|
746
|
+
q5h[1] = vec_sl(vec_and(v_1m, v_xh[1]), 4);
|
747
|
+
q5h[2] = vec_sl(vec_and(v_2m, v_xh[0]), 3);
|
748
|
+
q5h[3] = vec_sl(vec_and(v_2m, v_xh[1]), 3);
|
749
|
+
v_xh[0] = vec_sr(v_xh[0], 2);
|
750
|
+
v_xh[1] = vec_sr(v_xh[1], 2);
|
751
|
+
|
752
|
+
q5b[0] = (int8x16_t)vec_or(vec_and(v_xl[0], v_lm), q5h[0]);
|
753
|
+
q5b[1] = (int8x16_t)vec_or(vec_and(v_xl[1], v_lm), q5h[1]);
|
754
|
+
q5b[2] = (int8x16_t)vec_or(vec_sr(v_xl[0], 4), q5h[2]);
|
755
|
+
q5b[3] = (int8x16_t)vec_or(vec_sr(v_xl[1], 4), q5h[3]);
|
756
|
+
|
757
|
+
int32x4_t sumi0 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[0], v_y[0]), q5b[1], v_y[1]);
|
758
|
+
int32x4_t sumi1 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[2], v_y[2]), q5b[3], v_y[3]);
|
759
|
+
|
760
|
+
sumi += (sumi0[0] + sumi0[1] + sumi0[2] + sumi0[3]) * *scales++;
|
761
|
+
sumi += (sumi1[0] + sumi1[1] + sumi1[2] + sumi1[3]) * *scales++;
|
762
|
+
}
|
763
|
+
|
764
|
+
sumf += d * sumi - dmin * mins;
|
765
|
+
}
|
766
|
+
|
767
|
+
*s = sumf;
|
768
|
+
|
769
|
+
#else
|
770
|
+
|
771
|
+
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
772
|
+
const uint8_t * mins = (const uint8_t*)&utmp[2];
|
773
|
+
|
774
|
+
int8_t aux8[QK_K];
|
775
|
+
int16_t aux16[8];
|
776
|
+
float sums [8];
|
777
|
+
int32_t aux32[8];
|
778
|
+
memset(sums, 0, 8*sizeof(float));
|
779
|
+
|
780
|
+
float sumf = 0;
|
781
|
+
for (int i = 0; i < nb; ++i) {
|
782
|
+
const uint8_t * GGML_RESTRICT q4 = x[i].qs;
|
783
|
+
const uint8_t * GGML_RESTRICT hm = x[i].qh;
|
784
|
+
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
785
|
+
memset(aux32, 0, 8*sizeof(int32_t));
|
786
|
+
int8_t * GGML_RESTRICT a = aux8;
|
787
|
+
uint8_t m = 1;
|
788
|
+
for (int j = 0; j < QK_K/64; ++j) {
|
789
|
+
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
|
790
|
+
for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
|
791
|
+
a += 32; m <<= 1;
|
792
|
+
for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
|
793
|
+
for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
|
794
|
+
a += 32; m <<= 1;
|
795
|
+
q4 += 32;
|
796
|
+
}
|
797
|
+
memcpy(utmp, x[i].scales, 12);
|
798
|
+
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
799
|
+
const uint32_t uaux = utmp[1] & kmask1;
|
800
|
+
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
801
|
+
utmp[2] = uaux;
|
802
|
+
utmp[0] &= kmask1;
|
803
|
+
|
804
|
+
int sumi = 0;
|
805
|
+
for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
|
806
|
+
a = aux8;
|
807
|
+
int is = 0;
|
808
|
+
for (int j = 0; j < QK_K/32; ++j) {
|
809
|
+
int32_t scale = scales[is++];
|
810
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
811
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
812
|
+
q8 += 8; a += 8;
|
813
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
814
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
815
|
+
q8 += 8; a += 8;
|
816
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
817
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
818
|
+
q8 += 8; a += 8;
|
819
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
820
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
821
|
+
q8 += 8; a += 8;
|
822
|
+
}
|
823
|
+
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
824
|
+
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
825
|
+
const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
|
826
|
+
sumf -= dmin * sumi;
|
827
|
+
}
|
828
|
+
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
829
|
+
*s = sumf;
|
830
|
+
#endif
|
831
|
+
}
|
832
|
+
|
833
|
+
void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
834
|
+
assert(n % QK_K == 0);
|
835
|
+
assert(nrc == 1);
|
836
|
+
UNUSED(nrc);
|
837
|
+
UNUSED(bx);
|
838
|
+
UNUSED(by);
|
839
|
+
UNUSED(bs);
|
840
|
+
|
841
|
+
const block_q6_K * GGML_RESTRICT x = vx;
|
842
|
+
const block_q8_K * GGML_RESTRICT y = vy;
|
843
|
+
|
844
|
+
const int nb = n / QK_K;
|
845
|
+
|
846
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
847
|
+
float sum = 0;
|
848
|
+
|
849
|
+
// Lower 4-bit and upper 2-bit masks
|
850
|
+
const uint8x16_t v_lm = vec_splat_u8(0x0F);
|
851
|
+
const uint8x16_t v_um = vec_splat_u8(0x03);
|
852
|
+
|
853
|
+
const int32x4_t v_z = vec_splat_s32(0);
|
854
|
+
|
855
|
+
int8x16_t q6b[4];
|
856
|
+
uint8x16_t q6h[4];
|
857
|
+
|
858
|
+
uint8x16_t v_xl[4];
|
859
|
+
uint8x16_t v_xh[2];
|
860
|
+
int8x16_t v_y[4];
|
861
|
+
|
862
|
+
for (int i = 0; i < nb; ++i) {
|
863
|
+
const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d);
|
864
|
+
|
865
|
+
const uint8_t * GGML_RESTRICT x0l = x[i].ql;
|
866
|
+
const uint8_t * GGML_RESTRICT x0h = x[i].qh;
|
867
|
+
const int8_t * GGML_RESTRICT y0 = y[i].qs;
|
868
|
+
|
869
|
+
const int8_t * GGML_RESTRICT scale = x[i].scales;
|
870
|
+
|
871
|
+
const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums);
|
872
|
+
const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums);
|
873
|
+
|
874
|
+
const int8x16_t v_scale = vec_xl(0, scale);
|
875
|
+
const int16x8_t v_scalel = vec_unpackh(v_scale);
|
876
|
+
const int16x8_t v_scaleh = vec_unpackl(v_scale);
|
877
|
+
|
878
|
+
const int32x4_t v_minslo = vec_mulo(v_ysumsl, v_scalel);
|
879
|
+
const int32x4_t v_minsle = vec_mule(v_ysumsl, v_scalel);
|
880
|
+
const int32x4_t v_minsho = vec_mulo(v_ysumsh, v_scaleh);
|
881
|
+
const int32x4_t v_minshe = vec_mule(v_ysumsh, v_scaleh);
|
882
|
+
const int32x4_t v_mins = v_minslo + v_minsle + v_minsho + v_minshe;
|
883
|
+
|
884
|
+
const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3];
|
885
|
+
|
886
|
+
int32_t isum = 0;
|
887
|
+
for (int j = 0; j < QK_K/128; ++j) {
|
888
|
+
// Load model upper 2 bits
|
889
|
+
v_xh[0] = vec_xl(0 , x0h);
|
890
|
+
v_xh[1] = vec_xl(16, x0h);
|
891
|
+
x0h += 32;
|
892
|
+
|
893
|
+
// Load model lower 4 bits
|
894
|
+
v_xl[0] = vec_xl(0 , x0l);
|
895
|
+
v_xl[1] = vec_xl(16, x0l);
|
896
|
+
v_xl[2] = vec_xl(32, x0l);
|
897
|
+
v_xl[3] = vec_xl(48, x0l);
|
898
|
+
x0l += 64;
|
899
|
+
|
900
|
+
// Load activation quants
|
901
|
+
v_y[0] = vec_xl(0 , y0);
|
902
|
+
v_y[1] = vec_xl(16, y0);
|
903
|
+
v_y[2] = vec_xl(32, y0);
|
904
|
+
v_y[3] = vec_xl(48, y0);
|
905
|
+
y0 += 64;
|
906
|
+
|
907
|
+
q6h[0] = vec_sl(vec_and(v_um, v_xh[0]), 4);
|
908
|
+
q6h[1] = vec_sl(vec_and(v_um, v_xh[1]), 4);
|
909
|
+
uint8x16_t shifted = vec_sr(v_xh[0], 2);
|
910
|
+
q6h[2] = vec_sl(vec_and(v_um, shifted), 4);
|
911
|
+
shifted = vec_sr(v_xh[1], 2);
|
912
|
+
q6h[3] = vec_sl(vec_and(v_um, shifted), 4);
|
913
|
+
|
914
|
+
q6b[0] = (int8x16_t)(vec_or(vec_and(v_xl[0], v_lm), q6h[0]));
|
915
|
+
q6b[1] = (int8x16_t)(vec_or(vec_and(v_xl[1], v_lm), q6h[1]));
|
916
|
+
q6b[2] = (int8x16_t)(vec_or(vec_and(v_xl[2], v_lm), q6h[2]));
|
917
|
+
q6b[3] = (int8x16_t)(vec_or(vec_and(v_xl[3], v_lm), q6h[3]));
|
918
|
+
|
919
|
+
int32x4_t summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]);
|
920
|
+
int32x4_t summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]);
|
921
|
+
int32x4_t summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]);
|
922
|
+
int32x4_t summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]);
|
923
|
+
|
924
|
+
isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] +
|
925
|
+
(summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] +
|
926
|
+
(summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] +
|
927
|
+
(summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3];
|
928
|
+
|
929
|
+
scale += 4;
|
930
|
+
|
931
|
+
|
932
|
+
// Load activation quants
|
933
|
+
v_y[0] = vec_xl(0 , y0);
|
934
|
+
v_y[1] = vec_xl(16, y0);
|
935
|
+
v_y[2] = vec_xl(32, y0);
|
936
|
+
v_y[3] = vec_xl(48, y0);
|
937
|
+
y0 += 64;
|
938
|
+
|
939
|
+
shifted = vec_sr(v_xh[0], 4);
|
940
|
+
q6h[0] = vec_sl(vec_and(v_um, shifted), 4);
|
941
|
+
shifted = vec_sr(v_xh[1], 4);
|
942
|
+
q6h[1] = vec_sl(vec_and(v_um, shifted), 4);
|
943
|
+
shifted = vec_sr(v_xh[0], 6);
|
944
|
+
q6h[2] = vec_sl(vec_and(v_um, shifted), 4);
|
945
|
+
shifted = vec_sr(v_xh[1], 6);
|
946
|
+
q6h[3] = vec_sl(vec_and(v_um, shifted), 4);
|
947
|
+
|
948
|
+
q6b[0] = (int8x16_t)(vec_or(vec_sr(v_xl[0], 4), q6h[0]));
|
949
|
+
q6b[1] = (int8x16_t)(vec_or(vec_sr(v_xl[1], 4), q6h[1]));
|
950
|
+
q6b[2] = (int8x16_t)(vec_or(vec_sr(v_xl[2], 4), q6h[2]));
|
951
|
+
q6b[3] = (int8x16_t)(vec_or(vec_sr(v_xl[3], 4), q6h[3]));
|
952
|
+
|
953
|
+
summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]);
|
954
|
+
summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]);
|
955
|
+
summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]);
|
956
|
+
summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]);
|
957
|
+
|
958
|
+
isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] +
|
959
|
+
(summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] +
|
960
|
+
(summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] +
|
961
|
+
(summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3];
|
962
|
+
|
963
|
+
scale += 4;
|
964
|
+
}
|
965
|
+
|
966
|
+
sum += d_all * y[i].d * (isum - 32 * mins);
|
967
|
+
}
|
968
|
+
|
969
|
+
*s = sum;
|
970
|
+
|
971
|
+
#else
|
972
|
+
|
973
|
+
int8_t aux8[QK_K];
|
974
|
+
int16_t aux16[8];
|
975
|
+
float sums [8];
|
976
|
+
int32_t aux32[8];
|
977
|
+
memset(sums, 0, 8*sizeof(float));
|
978
|
+
|
979
|
+
float sumf = 0;
|
980
|
+
for (int i = 0; i < nb; ++i) {
|
981
|
+
const uint8_t * GGML_RESTRICT q4 = x[i].ql;
|
982
|
+
const uint8_t * GGML_RESTRICT qh = x[i].qh;
|
983
|
+
const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
984
|
+
memset(aux32, 0, 8*sizeof(int32_t));
|
985
|
+
int8_t * GGML_RESTRICT a = aux8;
|
986
|
+
for (int j = 0; j < QK_K; j += 128) {
|
987
|
+
for (int l = 0; l < 32; ++l) {
|
988
|
+
a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
|
989
|
+
a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
|
990
|
+
a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
|
991
|
+
a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
|
992
|
+
}
|
993
|
+
a += 128;
|
994
|
+
q4 += 64;
|
995
|
+
qh += 32;
|
996
|
+
}
|
997
|
+
a = aux8;
|
998
|
+
int is = 0;
|
999
|
+
for (int j = 0; j < QK_K/16; ++j) {
|
1000
|
+
int scale = x[i].scales[is++];
|
1001
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
1002
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
1003
|
+
q8 += 8; a += 8;
|
1004
|
+
for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
|
1005
|
+
for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
|
1006
|
+
q8 += 8; a += 8;
|
1007
|
+
}
|
1008
|
+
const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
1009
|
+
for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
|
1010
|
+
}
|
1011
|
+
for (int l = 0; l < 8; ++l) sumf += sums[l];
|
1012
|
+
*s = sumf;
|
1013
|
+
#endif
|
1014
|
+
}
|
1015
|
+
|
1016
|
+
// #if defined(__VXE__) || defined(__VXE2__)
|
1017
|
+
// static const int8_t keven_signs_q2xs[1024] = {
|
1018
|
+
// 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
|
1019
|
+
// 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
|
1020
|
+
// 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
|
1021
|
+
// 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
|
1022
|
+
// 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
|
1023
|
+
// 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
|
1024
|
+
// 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
|
1025
|
+
// 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
|
1026
|
+
// 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
|
1027
|
+
// 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
|
1028
|
+
// 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
|
1029
|
+
// 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
|
1030
|
+
// 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
|
1031
|
+
// 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
|
1032
|
+
// 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
|
1033
|
+
// 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
|
1034
|
+
// 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
|
1035
|
+
// 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
|
1036
|
+
// 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
|
1037
|
+
// 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
|
1038
|
+
// 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
|
1039
|
+
// 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
|
1040
|
+
// 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
|
1041
|
+
// 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
|
1042
|
+
// 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
|
1043
|
+
// 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
|
1044
|
+
// 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
|
1045
|
+
// 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
|
1046
|
+
// 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
|
1047
|
+
// 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
|
1048
|
+
// 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
|
1049
|
+
// 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
|
1050
|
+
// };
|
1051
|
+
// #endif
|
1052
|
+
|
1053
|
+
// void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
1054
|
+
// assert(n % QK_K == 0);
|
1055
|
+
// assert(nrc == 1);
|
1056
|
+
// UNUSED(nrc);
|
1057
|
+
// UNUSED(bx);
|
1058
|
+
// UNUSED(by);
|
1059
|
+
// UNUSED(bs);
|
1060
|
+
|
1061
|
+
// const block_iq2_xxs * GGML_RESTRICT x = vx;
|
1062
|
+
// const block_q8_K * GGML_RESTRICT y = vy;
|
1063
|
+
|
1064
|
+
// const int nb = n / QK_K;
|
1065
|
+
|
1066
|
+
// #if defined(__VXE__) || defined(__VXE2__)
|
1067
|
+
// const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
|
1068
|
+
|
1069
|
+
// uint32_t aux32[4];
|
1070
|
+
// const uint8_t * aux8 = (const uint8_t *)aux32;
|
1071
|
+
|
1072
|
+
// float sumf = 0;
|
1073
|
+
|
1074
|
+
// for (int i = 0; i < nb; ++i) {
|
1075
|
+
// const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
1076
|
+
// const uint16_t * GGML_RESTRICT q2 = x[i].qs;
|
1077
|
+
// const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
1078
|
+
|
1079
|
+
// float sumf1 = 0, sumf2 = 0;
|
1080
|
+
|
1081
|
+
// for (int ib32 = 0; ib32 < QK_K/32; ib += 2) {
|
1082
|
+
// int8x16_t q8b0 = vec_xl( 0, q8);
|
1083
|
+
// int8x16_t qb81 = vec_xl(16, q8);
|
1084
|
+
// int8x16_t q8b2 = vec_xl(32, q8);
|
1085
|
+
// int8x16_t q8b3 = vec_xl(48, q8);
|
1086
|
+
// q8 += 64;
|
1087
|
+
|
1088
|
+
// memcpy(aux32, q2, 4 * sizeof(uint32_t));
|
1089
|
+
// q2 += 8;
|
1090
|
+
|
1091
|
+
// int8x16_t q2u0 = { *(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1]) };
|
1092
|
+
// int8x16_t q2u1 = { *(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3]) };
|
1093
|
+
// int8x16_t q2u2 = { *(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9]) };
|
1094
|
+
// int8x16_t q2u3 = { *(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11]) };
|
1095
|
+
|
1096
|
+
// int8x16_t q2s0 = { *(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127)) };
|
1097
|
+
// int8x16_t q2s1 = { *(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127)) };
|
1098
|
+
// int8x16_t q2s2 = { *(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127)) };
|
1099
|
+
// int8x16_t q2s3 = { *(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127)) };
|
1100
|
+
|
1101
|
+
// q2u0 = vec_mul(q2u0, q2s0);
|
1102
|
+
// q2u1 = vec_mul(q2u1, q2s1);
|
1103
|
+
// q2u2 = vec_mul(q2u2, q2s2);
|
1104
|
+
// q2u3 = vec_mul(q2u3, q2s3);
|
1105
|
+
|
1106
|
+
// const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u0, q8b0), q2u1, q8b1);
|
1107
|
+
// const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u2, q8b2), q2u3, q8b3);
|
1108
|
+
|
1109
|
+
// sumf1 += (p1[0] + p1[1] + p1[2] + p1[3]) * (0.5f + (aux32[1] >> 28));
|
1110
|
+
// sumf2 += (p2[0] + p2[1] + p2[2] + p2[3]) * (0.5f + (aux32[3] >> 28));
|
1111
|
+
// }
|
1112
|
+
|
1113
|
+
// sumf += d * (sumf1 + sumf2);
|
1114
|
+
// }
|
1115
|
+
|
1116
|
+
// *s = 0.25f * sumf;
|
1117
|
+
|
1118
|
+
// #else
|
1119
|
+
|
1120
|
+
// uint32_t aux32[2];
|
1121
|
+
// const uint8_t * aux8 = (const uint8_t *)aux32;
|
1122
|
+
|
1123
|
+
// float sumf = 0.f;
|
1124
|
+
// for (int i = 0; i < nb; ++i) {
|
1125
|
+
// const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
|
1126
|
+
// const uint16_t * GGML_RESTRICT q2 = x[i].qs;
|
1127
|
+
// const int8_t * GGML_RESTRICT q8 = y[i].qs;
|
1128
|
+
// int32_t bsum = 0;
|
1129
|
+
// for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
|
1130
|
+
// memcpy(aux32, q2, 2*sizeof(uint32_t));
|
1131
|
+
// q2 += 4;
|
1132
|
+
// const uint32_t ls = 2*(aux32[1] >> 28) + 1;
|
1133
|
+
// int32_t sumi = 0;
|
1134
|
+
// for (int l = 0; l < 4; ++l) {
|
1135
|
+
// const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
|
1136
|
+
// const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
|
1137
|
+
// for (int j = 0; j < 8; ++j) {
|
1138
|
+
// sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
|
1139
|
+
// }
|
1140
|
+
// q8 += 8;
|
1141
|
+
// }
|
1142
|
+
// bsum += sumi * ls;
|
1143
|
+
// }
|
1144
|
+
// sumf += d * bsum;
|
1145
|
+
// }
|
1146
|
+
// *s = 0.125f * sumf;
|
1147
|
+
// #endif
|
1148
|
+
// }
|
1149
|
+
|
1150
|
+
void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
1151
|
+
assert(nrc == 1);
|
1152
|
+
UNUSED(nrc);
|
1153
|
+
UNUSED(bx);
|
1154
|
+
UNUSED(by);
|
1155
|
+
UNUSED(bs);
|
1156
|
+
assert(n % QK4_NL == 0);
|
1157
|
+
static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same");
|
1158
|
+
|
1159
|
+
const block_iq4_nl * GGML_RESTRICT x = vx;
|
1160
|
+
const block_q8_0 * GGML_RESTRICT y = vy;
|
1161
|
+
|
1162
|
+
const int nb = n / QK4_NL;
|
1163
|
+
|
1164
|
+
int ib = 0;
|
1165
|
+
float sumf = 0;
|
1166
|
+
|
1167
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
1168
|
+
const int8x16_t v_k = vec_xl(0, kvalues_iq4nl);
|
1169
|
+
const uint8x16_t v_m = vec_splat_u8(0x0F);
|
1170
|
+
|
1171
|
+
for (; ib < nb; ++ib) {
|
1172
|
+
const block_iq4_nl * GGML_RESTRICT x0 = &x[ib];
|
1173
|
+
const block_q8_0 * GGML_RESTRICT y0 = &y[ib];
|
1174
|
+
|
1175
|
+
const uint8x16_t v_x = vec_xl(0, x0->qs);
|
1176
|
+
int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m);
|
1177
|
+
int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4);
|
1178
|
+
|
1179
|
+
v_xl = vec_perm(v_k, v_k, (uchar8x16_t)v_xl);
|
1180
|
+
v_xh = vec_perm(v_k, v_k, (uchar8x16_t)v_xh);
|
1181
|
+
|
1182
|
+
const int8x16_t v_yl = vec_xl(0 , y0->qs);
|
1183
|
+
const int8x16_t v_yh = vec_xl(QK8_0/2, y0->qs);
|
1184
|
+
const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh);
|
1185
|
+
|
1186
|
+
sumf += GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d) * (v_xy[0] + v_xy[1] + v_xy[2] + v_xy[3]);
|
1187
|
+
}
|
1188
|
+
|
1189
|
+
#endif
|
1190
|
+
for (; ib < nb; ++ib) {
|
1191
|
+
const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d);
|
1192
|
+
int sumi1 = 0, sumi2 = 0;
|
1193
|
+
for (int j = 0; j < QK4_NL/2; ++j) {
|
1194
|
+
sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
|
1195
|
+
sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
|
1196
|
+
}
|
1197
|
+
sumf += d * (sumi1 + sumi2);
|
1198
|
+
}
|
1199
|
+
*s = sumf;
|
1200
|
+
}
|
1201
|
+
|
1202
|
+
void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
|
1203
|
+
assert(nrc == 1);
|
1204
|
+
UNUSED(nrc);
|
1205
|
+
UNUSED(bx);
|
1206
|
+
UNUSED(by);
|
1207
|
+
UNUSED(bs);
|
1208
|
+
assert(n % QK_K == 0);
|
1209
|
+
|
1210
|
+
const block_iq4_xs * GGML_RESTRICT x = vx;
|
1211
|
+
const block_q8_K * GGML_RESTRICT y = vy;
|
1212
|
+
|
1213
|
+
const int nb = n / QK_K;
|
1214
|
+
|
1215
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
1216
|
+
const int8x16_t v_k = vec_xl(0, kvalues_iq4nl);
|
1217
|
+
const uint8x16_t v_m = vec_splat_u8(0x0F);
|
1218
|
+
|
1219
|
+
float sumf = 0;
|
1220
|
+
|
1221
|
+
for (int ibl = 0; ibl < nb; ++ibl) {
|
1222
|
+
const uint8_t * GGML_RESTRICT q4 = x[ibl].qs;
|
1223
|
+
const int8_t * GGML_RESTRICT q8 = y[ibl].qs;
|
1224
|
+
|
1225
|
+
uint16_t h = x[ibl].scales_h;
|
1226
|
+
|
1227
|
+
int sumi1 = 0, sumi2 = 0;
|
1228
|
+
for (int ib = 0; ib < QK_K/64; ++ib) {
|
1229
|
+
const uint8x16_t v_x0 = vec_xl(0 , q4);
|
1230
|
+
const uint8x16_t v_x1 = vec_xl(QK4_NL/2, q4);
|
1231
|
+
q4 += 32;
|
1232
|
+
|
1233
|
+
int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m);
|
1234
|
+
int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4);
|
1235
|
+
int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m);
|
1236
|
+
int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4);
|
1237
|
+
|
1238
|
+
v_x0l = vec_perm(v_k, v_k, (uchar8x16_t)v_x0l);
|
1239
|
+
v_x0h = vec_perm(v_k, v_k, (uchar8x16_t)v_x0h);
|
1240
|
+
v_x1l = vec_perm(v_k, v_k, (uchar8x16_t)v_x1l);
|
1241
|
+
v_x1h = vec_perm(v_k, v_k, (uchar8x16_t)v_x1h);
|
1242
|
+
|
1243
|
+
const int8x16_t v_y0 = vec_xl( 0, q8);
|
1244
|
+
const int8x16_t v_y1 = vec_xl(16, q8);
|
1245
|
+
const int8x16_t v_y2 = vec_xl(32, q8);
|
1246
|
+
const int8x16_t v_y3 = vec_xl(48, q8);
|
1247
|
+
q8 += 64;
|
1248
|
+
|
1249
|
+
int32x4_t vsumi0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0l, v_y0), v_x0h, v_y1);
|
1250
|
+
int32x4_t vsumi1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1l, v_y2), v_x1h, v_y3);
|
1251
|
+
|
1252
|
+
int ls1 = ((x[ibl].scales_l[ib] & 0xF) | ((h << 4) & 0x30)) - 32;
|
1253
|
+
int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32;
|
1254
|
+
|
1255
|
+
h >>= 4;
|
1256
|
+
|
1257
|
+
sumi1 += (vsumi0[0] + vsumi0[1] + vsumi0[2] + vsumi0[3]) * ls1;
|
1258
|
+
sumi2 += (vsumi1[0] + vsumi1[1] + vsumi1[2] + vsumi1[3]) * ls2;
|
1259
|
+
}
|
1260
|
+
|
1261
|
+
sumf += GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2);
|
1262
|
+
}
|
1263
|
+
|
1264
|
+
*s = sumf;
|
1265
|
+
|
1266
|
+
#else
|
1267
|
+
float sumf = 0;
|
1268
|
+
for (int ibl = 0; ibl < nb; ++ibl) {
|
1269
|
+
const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
|
1270
|
+
uint16_t h = x[ibl].scales_h;
|
1271
|
+
const uint8_t * qs = x[ibl].qs;
|
1272
|
+
const int8_t * q8 = y[ibl].qs;
|
1273
|
+
for (int ib = 0; ib < QK_K/32; ib += 2) {
|
1274
|
+
const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
|
1275
|
+
const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
|
1276
|
+
h >>= 4;
|
1277
|
+
const float d1 = d4d8*(ls1 - 32);
|
1278
|
+
const float d2 = d4d8*(ls2 - 32);
|
1279
|
+
int sumi1 = 0, sumi2 = 0;
|
1280
|
+
for (int j = 0; j < 16; ++j) {
|
1281
|
+
sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
|
1282
|
+
sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
|
1283
|
+
}
|
1284
|
+
sumf += d1 * (sumi1 + sumi2);
|
1285
|
+
qs += 16;
|
1286
|
+
q8 += 32;
|
1287
|
+
sumi1 = sumi2 = 0;
|
1288
|
+
for (int j = 0; j < 16; ++j) {
|
1289
|
+
sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
|
1290
|
+
sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
|
1291
|
+
}
|
1292
|
+
sumf += d2 * (sumi1 + sumi2);
|
1293
|
+
qs += 16;
|
1294
|
+
q8 += 32;
|
1295
|
+
}
|
1296
|
+
}
|
1297
|
+
*s = sumf;
|
1298
|
+
#endif
|
1299
|
+
}
|
1300
|
+
|