@fugood/llama.node 0.3.0 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CMakeLists.txt +1 -10
- package/bin/darwin/arm64/llama-node.node +0 -0
- package/bin/darwin/x64/llama-node.node +0 -0
- package/bin/linux/arm64/llama-node.node +0 -0
- package/bin/linux/x64/llama-node.node +0 -0
- package/bin/linux-vulkan/arm64/llama-node.node +0 -0
- package/bin/linux-vulkan/x64/llama-node.node +0 -0
- package/bin/win32/arm64/llama-node.node +0 -0
- package/bin/win32/arm64/node.lib +0 -0
- package/bin/win32/x64/llama-node.node +0 -0
- package/bin/win32/x64/node.lib +0 -0
- package/bin/win32-vulkan/arm64/llama-node.node +0 -0
- package/bin/win32-vulkan/arm64/node.lib +0 -0
- package/bin/win32-vulkan/x64/llama-node.node +0 -0
- package/bin/win32-vulkan/x64/node.lib +0 -0
- package/package.json +6 -4
- package/src/LlamaCompletionWorker.cpp +6 -6
- package/src/LlamaContext.cpp +7 -9
- package/src/common.hpp +2 -1
- package/src/llama.cpp/.github/workflows/build.yml +98 -24
- package/src/llama.cpp/.github/workflows/close-issue.yml +5 -0
- package/src/llama.cpp/.github/workflows/docker.yml +43 -34
- package/src/llama.cpp/.github/workflows/nix-ci-aarch64.yml +7 -0
- package/src/llama.cpp/.github/workflows/nix-ci.yml +7 -0
- package/src/llama.cpp/.github/workflows/python-check-requirements.yml +2 -4
- package/src/llama.cpp/.github/workflows/python-type-check.yml +3 -1
- package/src/llama.cpp/.github/workflows/server.yml +7 -0
- package/src/llama.cpp/CMakeLists.txt +20 -8
- package/src/llama.cpp/common/CMakeLists.txt +12 -10
- package/src/llama.cpp/common/arg.cpp +2006 -0
- package/src/llama.cpp/common/arg.h +77 -0
- package/src/llama.cpp/common/common.cpp +496 -1632
- package/src/llama.cpp/common/common.h +161 -63
- package/src/llama.cpp/common/console.cpp +3 -0
- package/src/llama.cpp/common/log.cpp +401 -0
- package/src/llama.cpp/common/log.h +66 -698
- package/src/llama.cpp/common/ngram-cache.cpp +3 -0
- package/src/llama.cpp/common/sampling.cpp +348 -350
- package/src/llama.cpp/common/sampling.h +62 -139
- package/src/llama.cpp/common/stb_image.h +5990 -6398
- package/src/llama.cpp/common/train.cpp +2 -0
- package/src/llama.cpp/docs/build.md +36 -1
- package/src/llama.cpp/examples/CMakeLists.txt +0 -1
- package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +1 -2
- package/src/llama.cpp/examples/batched/batched.cpp +39 -55
- package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +34 -44
- package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +55 -52
- package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +15 -15
- package/src/llama.cpp/examples/cvector-generator/pca.hpp +3 -13
- package/src/llama.cpp/examples/embedding/embedding.cpp +143 -87
- package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +33 -33
- package/src/llama.cpp/examples/export-lora/export-lora.cpp +36 -35
- package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +14 -39
- package/src/llama.cpp/examples/gen-docs/CMakeLists.txt +5 -0
- package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +83 -0
- package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +58 -39
- package/src/llama.cpp/examples/gritlm/gritlm.cpp +34 -27
- package/src/llama.cpp/examples/imatrix/imatrix.cpp +59 -62
- package/src/llama.cpp/examples/infill/infill.cpp +117 -132
- package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +265 -58
- package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +29 -22
- package/src/llama.cpp/examples/llava/CMakeLists.txt +7 -0
- package/src/llama.cpp/examples/llava/clip.cpp +685 -150
- package/src/llama.cpp/examples/llava/clip.h +11 -2
- package/src/llama.cpp/examples/llava/llava-cli.cpp +47 -58
- package/src/llama.cpp/examples/llava/llava.cpp +110 -24
- package/src/llama.cpp/examples/llava/llava.h +2 -3
- package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +323 -0
- package/src/llama.cpp/examples/llava/requirements.txt +1 -0
- package/src/llama.cpp/examples/lookahead/lookahead.cpp +42 -43
- package/src/llama.cpp/examples/lookup/lookup-create.cpp +10 -8
- package/src/llama.cpp/examples/lookup/lookup-stats.cpp +23 -22
- package/src/llama.cpp/examples/lookup/lookup.cpp +40 -43
- package/src/llama.cpp/examples/main/main.cpp +210 -262
- package/src/llama.cpp/examples/parallel/parallel.cpp +49 -49
- package/src/llama.cpp/examples/passkey/passkey.cpp +42 -50
- package/src/llama.cpp/examples/perplexity/perplexity.cpp +187 -200
- package/src/llama.cpp/examples/quantize/CMakeLists.txt +1 -1
- package/src/llama.cpp/examples/quantize/quantize.cpp +27 -9
- package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +2 -3
- package/src/llama.cpp/examples/retrieval/retrieval.cpp +49 -44
- package/src/llama.cpp/examples/rpc/rpc-server.cpp +24 -1
- package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +32 -35
- package/src/llama.cpp/examples/server/CMakeLists.txt +3 -5
- package/src/llama.cpp/examples/server/server.cpp +1027 -1073
- package/src/llama.cpp/examples/server/tests/requirements.txt +2 -1
- package/src/llama.cpp/examples/server/utils.hpp +107 -105
- package/src/llama.cpp/examples/simple/simple.cpp +35 -41
- package/src/llama.cpp/examples/speculative/speculative.cpp +129 -103
- package/src/llama.cpp/examples/sycl/run-llama2.sh +10 -19
- package/src/llama.cpp/examples/sycl/win-run-llama2.bat +1 -1
- package/src/llama.cpp/examples/tokenize/tokenize.cpp +25 -27
- package/src/llama.cpp/ggml/CMakeLists.txt +14 -3
- package/src/llama.cpp/ggml/include/ggml-alloc.h +3 -3
- package/src/llama.cpp/ggml/include/ggml-backend.h +145 -60
- package/src/llama.cpp/ggml/include/ggml-blas.h +3 -3
- package/src/llama.cpp/ggml/include/ggml-cann.h +15 -19
- package/src/llama.cpp/ggml/include/ggml-cuda.h +16 -16
- package/src/llama.cpp/ggml/include/ggml-metal.h +5 -8
- package/src/llama.cpp/ggml/include/ggml-rpc.h +5 -5
- package/src/llama.cpp/ggml/include/ggml-sycl.h +8 -8
- package/src/llama.cpp/ggml/include/ggml-vulkan.h +7 -7
- package/src/llama.cpp/ggml/include/ggml.h +293 -186
- package/src/llama.cpp/ggml/src/CMakeLists.txt +86 -44
- package/src/llama.cpp/ggml/src/ggml-aarch64.c +2135 -1119
- package/src/llama.cpp/ggml/src/ggml-alloc.c +6 -0
- package/src/llama.cpp/ggml/src/ggml-backend-impl.h +152 -70
- package/src/llama.cpp/ggml/src/{ggml-backend.c → ggml-backend.cpp} +606 -286
- package/src/llama.cpp/ggml/src/ggml-blas.cpp +9 -10
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +4 -27
- package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +32 -4
- package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +179 -41
- package/src/llama.cpp/ggml/src/ggml-cann/common.h +1 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +2 -1
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/ascendc_kernels.h +2 -0
- package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +278 -0
- package/src/llama.cpp/ggml/src/ggml-cann.cpp +215 -216
- package/src/llama.cpp/ggml/src/ggml-common.h +20 -0
- package/src/llama.cpp/ggml/src/ggml-cpu-impl.h +614 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +14 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +178 -0
- package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +134 -0
- package/src/llama.cpp/ggml/src/ggml-impl.h +49 -603
- package/src/llama.cpp/ggml/src/ggml-kompute.cpp +4 -24
- package/src/llama.cpp/ggml/src/ggml-quants.c +972 -92
- package/src/llama.cpp/ggml/src/ggml-quants.h +15 -0
- package/src/llama.cpp/ggml/src/ggml-rpc.cpp +116 -66
- package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +3 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +11 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +52 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +99 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +21 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +57 -57
- package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +106 -106
- package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +4 -4
- package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +16 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +101 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +125 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +23 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +6 -3
- package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +2 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +1 -1
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +71 -0
- package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +21 -0
- package/src/llama.cpp/ggml/src/ggml-sycl.cpp +97 -169
- package/src/llama.cpp/ggml/src/ggml-vulkan.cpp +1508 -1124
- package/src/llama.cpp/ggml/src/ggml.c +3001 -1647
- package/src/llama.cpp/ggml/src/llamafile/sgemm.cpp +192 -0
- package/src/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt +2 -0
- package/src/llama.cpp/ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp +88 -40
- package/src/llama.cpp/include/llama.h +241 -264
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +112 -0
- package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +46 -0
- package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +1 -1
- package/src/llama.cpp/src/llama-grammar.cpp +721 -122
- package/src/llama.cpp/src/llama-grammar.h +120 -15
- package/src/llama.cpp/src/llama-impl.h +156 -1
- package/src/llama.cpp/src/llama-sampling.cpp +1375 -303
- package/src/llama.cpp/src/llama-sampling.h +20 -47
- package/src/llama.cpp/src/llama-vocab.cpp +343 -120
- package/src/llama.cpp/src/llama-vocab.h +33 -17
- package/src/llama.cpp/src/llama.cpp +4247 -1525
- package/src/llama.cpp/src/unicode-data.cpp +6 -4
- package/src/llama.cpp/src/unicode-data.h +4 -4
- package/src/llama.cpp/src/unicode.cpp +15 -7
- package/src/llama.cpp/tests/CMakeLists.txt +3 -0
- package/src/llama.cpp/tests/test-arg-parser.cpp +131 -0
- package/src/llama.cpp/tests/test-backend-ops.cpp +1592 -289
- package/src/llama.cpp/tests/test-barrier.cpp +93 -0
- package/src/llama.cpp/tests/test-grad0.cpp +187 -70
- package/src/llama.cpp/tests/test-grammar-integration.cpp +23 -38
- package/src/llama.cpp/tests/test-grammar-parser.cpp +6 -4
- package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +6 -4
- package/src/llama.cpp/tests/test-llama-grammar.cpp +9 -8
- package/src/llama.cpp/tests/test-log.cpp +39 -0
- package/src/llama.cpp/tests/test-quantize-fns.cpp +6 -0
- package/src/llama.cpp/tests/test-rope.cpp +1 -1
- package/src/llama.cpp/tests/test-sampling.cpp +157 -98
- package/src/llama.cpp/tests/test-tokenizer-0.cpp +55 -35
- package/patches/llama.patch +0 -22
- package/src/llama.cpp/.github/workflows/bench.yml +0 -310
- package/src/llama.cpp/common/grammar-parser.cpp +0 -536
- package/src/llama.cpp/common/grammar-parser.h +0 -29
- package/src/llama.cpp/examples/benchmark/CMakeLists.txt +0 -6
- package/src/llama.cpp/examples/benchmark/benchmark-matmult.cpp +0 -275
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
#include "kernel_operator.h"
|
|
2
|
+
|
|
3
|
+
using namespace AscendC;
|
|
4
|
+
|
|
5
|
+
#define BUFFER_NUM 2
|
|
6
|
+
#define Group_Size 32
|
|
7
|
+
|
|
8
|
+
template <typename SRC_T>
|
|
9
|
+
class QUANTIZE_FLOAT_TO_Q4_0 {
|
|
10
|
+
public:
|
|
11
|
+
__aicore__ inline QUANTIZE_FLOAT_TO_Q4_0() {}
|
|
12
|
+
__aicore__ inline void init(GM_ADDR input, GM_ADDR output,
|
|
13
|
+
int64_t *input_ne_ub, size_t *input_nb_ub,
|
|
14
|
+
int64_t *output_ne_ub) {
|
|
15
|
+
// TODO: fix test_case CPY(type_src=f16,type_dst=q4_0,ne=[256,4,4,4],
|
|
16
|
+
// permute=[0,0,0,0]):
|
|
17
|
+
// [CPY] NMSE = 0.000008343 > 0.000001000 FAIL
|
|
18
|
+
int64_t op_block_num = GetBlockNum();
|
|
19
|
+
int64_t op_block_idx = GetBlockIdx();
|
|
20
|
+
|
|
21
|
+
// input stride of data elements
|
|
22
|
+
for (int i = 0; i < 4; i++) {
|
|
23
|
+
input_ne[i] = input_ne_ub[i];
|
|
24
|
+
input_stride[i] = input_nb_ub[i] / input_nb_ub[0];
|
|
25
|
+
output_ne[i] = output_ne_ub[i];
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
// output stride of data elements
|
|
29
|
+
output_stride[0] = 1;
|
|
30
|
+
for (int i = 1; i < 4; i++) {
|
|
31
|
+
output_stride[i] = output_stride[i - 1] * output_ne[i - 1];
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// scale saved one by one after data:. [group1_scale, group2_scale, ...]
|
|
35
|
+
scale_ne = input_ne;
|
|
36
|
+
scale_stride[0] = 1;
|
|
37
|
+
scale_stride[1] = input_ne[0] / Group_Size;
|
|
38
|
+
for (int i = 2; i < 4; i++) {
|
|
39
|
+
scale_stride[i] = scale_stride[i - 1] * scale_ne[i - 1];
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// split input tensor by rows.
|
|
43
|
+
uint64_t nr = input_ne[1] * input_ne[2] * input_ne[3];
|
|
44
|
+
dr = nr / op_block_num;
|
|
45
|
+
|
|
46
|
+
uint64_t tails = nr % op_block_num;
|
|
47
|
+
if (op_block_idx < tails) {
|
|
48
|
+
dr += 1;
|
|
49
|
+
ir = dr * op_block_idx;
|
|
50
|
+
} else {
|
|
51
|
+
ir = dr * op_block_idx + tails;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
group_size_in_row = scale_stride[1];
|
|
55
|
+
int64_t scale_offset = output_ne[0] * output_ne[1] * output_ne[2] *
|
|
56
|
+
output_ne[3] * sizeof(uint8_t) / 2;
|
|
57
|
+
|
|
58
|
+
input_gm.SetGlobalBuffer((__gm__ SRC_T *)input);
|
|
59
|
+
output_gm.SetGlobalBuffer((__gm__ int8_t *)output);
|
|
60
|
+
scale_gm.SetGlobalBuffer((__gm__ half *)(output + scale_offset + ir *
|
|
61
|
+
group_size_in_row *
|
|
62
|
+
sizeof(half)));
|
|
63
|
+
|
|
64
|
+
pipe.InitBuffer(input_queue, BUFFER_NUM, Group_Size * sizeof(SRC_T));
|
|
65
|
+
pipe.InitBuffer(output_queue, BUFFER_NUM,
|
|
66
|
+
Group_Size * sizeof(int8_t) / 2);
|
|
67
|
+
pipe.InitBuffer(cast_queue , 1, Group_Size * sizeof(float));
|
|
68
|
+
pipe.InitBuffer(work_queue, 1, Group_Size * sizeof(float));
|
|
69
|
+
pipe.InitBuffer(max_queue, 1, Group_Size * sizeof(float));
|
|
70
|
+
pipe.InitBuffer(min_queue, 1, Group_Size * sizeof(float));
|
|
71
|
+
pipe.InitBuffer(scale_queue, 1, Group_Size / 2 * sizeof(half));
|
|
72
|
+
pipe.InitBuffer(int8_queue, 1, Group_Size * sizeof(int8_t));
|
|
73
|
+
pipe.InitBuffer(half_queue, 1, Group_Size * sizeof(half));
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
__aicore__ inline void copy_in(uint32_t offset) {
|
|
77
|
+
LocalTensor<SRC_T> input_local = input_queue.AllocTensor<SRC_T>();
|
|
78
|
+
DataCopy(input_local, input_gm[offset], Group_Size);
|
|
79
|
+
input_queue.EnQue(input_local);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
__aicore__ inline void copy_out(uint32_t offset) {
|
|
83
|
+
// reinterpretcast Group_Size(32) * int4b_t to Group_Size / 2 * int8_t,
|
|
84
|
+
// and using DataCopyPad to avoid 32 bits align.
|
|
85
|
+
LocalTensor<int4b_t> output_local = output_queue.DeQue<int4b_t>();
|
|
86
|
+
LocalTensor<int8_t> output_int8_local =
|
|
87
|
+
output_local.ReinterpretCast<int8_t>();
|
|
88
|
+
|
|
89
|
+
DataCopyExtParams dataCopyParams;
|
|
90
|
+
dataCopyParams.blockCount = 1;
|
|
91
|
+
dataCopyParams.blockLen = Group_Size / 2 * sizeof(int8_t);
|
|
92
|
+
DataCopyPad(output_gm[offset], output_int8_local, dataCopyParams);
|
|
93
|
+
|
|
94
|
+
output_queue.FreeTensor(output_local);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
__aicore__ inline void input_to_cast(LocalTensor<float> cast_local,
|
|
98
|
+
LocalTensor<float> input_local) {
|
|
99
|
+
DataCopy(cast_local, input_local, Group_Size);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
__aicore__ inline void input_to_cast(LocalTensor<float> cast_local,
|
|
103
|
+
LocalTensor<half> input_local) {
|
|
104
|
+
Cast(cast_local, input_local, RoundMode::CAST_NONE, Group_Size);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
__aicore__ inline half calculate_group(int64_t row, int64_t group) {
|
|
108
|
+
const int64_t i3 = row / (input_ne[1] * input_ne[2]);
|
|
109
|
+
const int64_t i2 = (row - i3 * input_ne[1] * input_ne[2]) / input_ne[1];
|
|
110
|
+
const int64_t i1 =
|
|
111
|
+
row - i3 * input_ne[1] * input_ne[2] - i2 * input_ne[1];
|
|
112
|
+
|
|
113
|
+
const int64_t input_offset = i1 * input_stride[1] +
|
|
114
|
+
i2 * input_stride[2] +
|
|
115
|
+
i3 * input_stride[3] + Group_Size * group;
|
|
116
|
+
|
|
117
|
+
// output_offset is stride for output_gm which datatype is int8_t and
|
|
118
|
+
// divided by 2 is needed for int4b_t.
|
|
119
|
+
const int64_t output_offset = (i1 * output_stride[1] +
|
|
120
|
+
i2 * output_stride[2] +
|
|
121
|
+
i3 * output_stride[3] +
|
|
122
|
+
Group_Size * group) / 2;
|
|
123
|
+
copy_in(input_offset);
|
|
124
|
+
|
|
125
|
+
LocalTensor<SRC_T> input_local = input_queue.DeQue<SRC_T>();
|
|
126
|
+
LocalTensor<int4b_t> output_local = output_queue.AllocTensor<int4b_t>();
|
|
127
|
+
LocalTensor<float> cast_local = cast_queue.AllocTensor<float>();
|
|
128
|
+
LocalTensor<float> work_local = work_queue.AllocTensor<float>();
|
|
129
|
+
LocalTensor<float> max_local = max_queue.AllocTensor<float>();
|
|
130
|
+
LocalTensor<float> min_local = min_queue.AllocTensor<float>();
|
|
131
|
+
LocalTensor<int8_t> int8_local = int8_queue.AllocTensor<int8_t>();
|
|
132
|
+
LocalTensor<half> half_local = half_queue.AllocTensor<half>();
|
|
133
|
+
|
|
134
|
+
input_to_cast(cast_local, input_local);
|
|
135
|
+
|
|
136
|
+
ReduceMax(max_local, cast_local, work_local, Group_Size);
|
|
137
|
+
ReduceMin(min_local, cast_local, work_local, Group_Size);
|
|
138
|
+
const float max_value = max_local.GetValue(0);
|
|
139
|
+
const float min_value = min_local.GetValue(0);
|
|
140
|
+
float d = max_value;
|
|
141
|
+
if (min_value < 0 && (-1 * min_value) > max_value) {
|
|
142
|
+
d = min_value;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
d = d / (-8);
|
|
146
|
+
if (d != 0) {
|
|
147
|
+
Muls(cast_local, cast_local, 1.0f / d, Group_Size);
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// range: [-8,8] -> [0.5,16.5] -> [0,16] -> [0,15] -> [-8,7]
|
|
151
|
+
float scalar = 8.5f;
|
|
152
|
+
Adds(cast_local, cast_local, scalar, Group_Size);
|
|
153
|
+
Cast(cast_local, cast_local, RoundMode::CAST_FLOOR, Group_Size);
|
|
154
|
+
scalar = 15.0f;
|
|
155
|
+
Mins(cast_local, cast_local, scalar, Group_Size);
|
|
156
|
+
scalar = -8.0f;
|
|
157
|
+
Adds(cast_local, cast_local, scalar, Group_Size);
|
|
158
|
+
|
|
159
|
+
// float->half->int4b
|
|
160
|
+
Cast(half_local, cast_local, RoundMode::CAST_NONE, Group_Size);
|
|
161
|
+
Cast(output_local, half_local, RoundMode::CAST_NONE, Group_Size);
|
|
162
|
+
|
|
163
|
+
output_queue.EnQue(output_local);
|
|
164
|
+
copy_out(output_offset);
|
|
165
|
+
|
|
166
|
+
input_queue.FreeTensor(input_local);
|
|
167
|
+
work_queue.FreeTensor(work_local);
|
|
168
|
+
max_queue.FreeTensor(max_local);
|
|
169
|
+
min_queue.FreeTensor(min_local);
|
|
170
|
+
int8_queue.FreeTensor(int8_local);
|
|
171
|
+
half_queue.FreeTensor(half_local);
|
|
172
|
+
cast_queue.FreeTensor(cast_local);
|
|
173
|
+
return (half)d;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
__aicore__ inline void calculate() {
|
|
177
|
+
LocalTensor<half> scale_local = scale_queue.AllocTensor<half>();
|
|
178
|
+
uint32_t scale_local_offset = 0;
|
|
179
|
+
uint32_t scale_global_offset = 0;
|
|
180
|
+
for (int64_t i = ir; i < ir + dr; i++) {
|
|
181
|
+
for (int64_t j = 0; j < group_size_in_row; j++) {
|
|
182
|
+
half scale = calculate_group(i, j);
|
|
183
|
+
scale_local.SetValue(scale_local_offset++, scale);
|
|
184
|
+
// Copy Group_Size/2 length data each time.
|
|
185
|
+
if (scale_local_offset == Group_Size / 2) {
|
|
186
|
+
scale_local_offset = 0;
|
|
187
|
+
// TODO: OPTIMIZE ME
|
|
188
|
+
pipe_barrier(PIPE_ALL);
|
|
189
|
+
DataCopy(scale_gm[scale_global_offset], scale_local,
|
|
190
|
+
Group_Size / 2);
|
|
191
|
+
pipe_barrier(PIPE_ALL);
|
|
192
|
+
scale_global_offset += Group_Size / 2;
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
if (scale_local_offset != 0) {
|
|
198
|
+
pipe_barrier(PIPE_ALL);
|
|
199
|
+
DataCopyExtParams dataCopyParams;
|
|
200
|
+
dataCopyParams.blockCount = 1;
|
|
201
|
+
dataCopyParams.blockLen = scale_local_offset * sizeof(half);
|
|
202
|
+
DataCopyPad(scale_gm[scale_global_offset], scale_local,
|
|
203
|
+
dataCopyParams);
|
|
204
|
+
pipe_barrier(PIPE_ALL);
|
|
205
|
+
}
|
|
206
|
+
scale_queue.FreeTensor(scale_local);
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
private:
|
|
210
|
+
int64_t input_ne[4];
|
|
211
|
+
size_t input_stride[4];
|
|
212
|
+
|
|
213
|
+
int64_t *scale_ne;
|
|
214
|
+
size_t scale_stride[4];
|
|
215
|
+
|
|
216
|
+
int64_t output_ne[4];
|
|
217
|
+
size_t output_stride[4];
|
|
218
|
+
|
|
219
|
+
int64_t group_size_in_row;
|
|
220
|
+
|
|
221
|
+
int64_t ir;
|
|
222
|
+
int64_t dr;
|
|
223
|
+
|
|
224
|
+
TPipe pipe;
|
|
225
|
+
GlobalTensor<SRC_T> input_gm;
|
|
226
|
+
GlobalTensor<half> scale_gm;
|
|
227
|
+
GlobalTensor<int8_t> output_gm;
|
|
228
|
+
TQue<QuePosition::VECIN, BUFFER_NUM> input_queue;
|
|
229
|
+
TQue<QuePosition::VECOUT, BUFFER_NUM> output_queue;
|
|
230
|
+
TQue<QuePosition::VECIN, BUFFER_NUM> work_queue;
|
|
231
|
+
TQue<QuePosition::VECOUT, BUFFER_NUM> max_queue;
|
|
232
|
+
TQue<QuePosition::VECOUT, BUFFER_NUM> min_queue;
|
|
233
|
+
TQue<QuePosition::VECOUT, BUFFER_NUM> scale_queue;
|
|
234
|
+
TQue<QuePosition::VECOUT, BUFFER_NUM> cast_queue;
|
|
235
|
+
TQue<QuePosition::VECOUT, BUFFER_NUM> int8_queue;
|
|
236
|
+
TQue<QuePosition::VECOUT, BUFFER_NUM> half_queue;
|
|
237
|
+
};
|
|
238
|
+
|
|
239
|
+
template <typename T>
|
|
240
|
+
__aicore__ inline void copy_to_ub(GM_ADDR gm, T *ub, size_t size) {
|
|
241
|
+
auto gm_ptr = (__gm__ uint8_t *)gm;
|
|
242
|
+
auto ub_ptr = (uint8_t *)(ub);
|
|
243
|
+
for (int32_t i = 0; i < size; ++i, ++ub_ptr, ++gm_ptr) {
|
|
244
|
+
*ub_ptr = *gm_ptr;
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
extern "C" __global__ __aicore__ void ascendc_quantize_f16_to_q4_0(
|
|
249
|
+
GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm,
|
|
250
|
+
GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) {
|
|
251
|
+
int64_t input_ne_ub[4];
|
|
252
|
+
size_t input_nb_ub[4];
|
|
253
|
+
int64_t output_ne_ub[4];
|
|
254
|
+
|
|
255
|
+
copy_to_ub(input_ne_gm, input_ne_ub, 32);
|
|
256
|
+
copy_to_ub(input_nb_gm, input_nb_ub, 32);
|
|
257
|
+
copy_to_ub(output_ne_gm, output_ne_ub, 32);
|
|
258
|
+
|
|
259
|
+
QUANTIZE_FLOAT_TO_Q4_0<half> op;
|
|
260
|
+
op.init(input_gm, output_gm, input_ne_ub, input_nb_ub, output_ne_ub);
|
|
261
|
+
op.calculate();
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
extern "C" __global__ __aicore__ void ascendc_quantize_f32_to_q4_0(
|
|
265
|
+
GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm,
|
|
266
|
+
GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) {
|
|
267
|
+
int64_t input_ne_ub[4];
|
|
268
|
+
size_t input_nb_ub[4];
|
|
269
|
+
int64_t output_ne_ub[4];
|
|
270
|
+
|
|
271
|
+
copy_to_ub(input_ne_gm, input_ne_ub, 32);
|
|
272
|
+
copy_to_ub(input_nb_gm, input_nb_ub, 32);
|
|
273
|
+
copy_to_ub(output_ne_gm, output_ne_ub, 32);
|
|
274
|
+
|
|
275
|
+
QUANTIZE_FLOAT_TO_Q4_0<float> op;
|
|
276
|
+
op.init(input_gm, output_gm, input_ne_ub, input_nb_ub, output_ne_ub);
|
|
277
|
+
op.calculate();
|
|
278
|
+
}
|