cui-llama.rn 1.3.3 → 1.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +5 -7
- package/android/src/main/java/com/rnllama/LlamaContext.java +4 -4
- package/android/src/main/jni.cpp +9 -9
- package/cpp/common.cpp +21 -40
- package/cpp/common.h +21 -12
- package/cpp/ggml-backend-impl.h +38 -20
- package/cpp/ggml-backend-reg.cpp +216 -87
- package/cpp/ggml-backend.h +1 -0
- package/cpp/ggml-common.h +42 -48
- package/cpp/{ggml-cpu-aarch64.c → ggml-cpu-aarch64.cpp} +591 -152
- package/cpp/ggml-cpu-aarch64.h +2 -26
- package/cpp/ggml-cpu-traits.cpp +36 -0
- package/cpp/ggml-cpu-traits.h +38 -0
- package/cpp/ggml-cpu.c +14122 -13971
- package/cpp/ggml-cpu.cpp +618 -715
- package/cpp/ggml-cpu.h +0 -17
- package/cpp/ggml-impl.h +6 -6
- package/cpp/ggml-metal.m +482 -24
- package/cpp/ggml-quants.c +0 -9
- package/cpp/ggml-threading.h +4 -2
- package/cpp/ggml.c +132 -43
- package/cpp/ggml.h +44 -13
- package/cpp/llama-sampling.cpp +35 -90
- package/cpp/llama-vocab.cpp +2 -1
- package/cpp/llama.cpp +737 -233
- package/cpp/llama.h +20 -16
- package/cpp/sampling.cpp +11 -16
- package/cpp/speculative.cpp +4 -0
- package/cpp/unicode.cpp +51 -51
- package/cpp/unicode.h +9 -10
- package/lib/commonjs/index.js +38 -1
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/index.js +36 -0
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +2 -3
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +36 -2
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +3 -3
- package/src/index.ts +46 -2
- package/cpp/amx/amx.cpp +0 -196
- package/cpp/amx/amx.h +0 -20
- package/cpp/amx/common.h +0 -101
- package/cpp/amx/mmq.cpp +0 -2524
- package/cpp/amx/mmq.h +0 -16
- package/cpp/ggml-aarch64.c +0 -129
- package/cpp/ggml-aarch64.h +0 -19
@@ -17,11 +17,11 @@ export type NativeContextParams = {
|
|
17
17
|
/**
|
18
18
|
* KV cache data type for the K (Experimental in llama.cpp)
|
19
19
|
*/
|
20
|
-
cache_type_k?:
|
20
|
+
cache_type_k?: number;
|
21
21
|
/**
|
22
22
|
* KV cache data type for the V (Experimental in llama.cpp)
|
23
23
|
*/
|
24
|
-
cache_type_v?:
|
24
|
+
cache_type_v?: number;
|
25
25
|
use_mlock?: boolean;
|
26
26
|
use_mmap?: boolean;
|
27
27
|
vocab_only?: boolean;
|
@@ -104,7 +104,6 @@ export type NativeCompletionParams = {
|
|
104
104
|
/**
|
105
105
|
* Penalize newline tokens when applying the repeat penalty. Default: `false`
|
106
106
|
*/
|
107
|
-
penalize_nl?: boolean;
|
108
107
|
/**
|
109
108
|
* Enable Mirostat sampling, controlling perplexity during text generation. Default: `0`, where `0` is disabled, `1` is Mirostat, and `2` is Mirostat 2.0.
|
110
109
|
*/
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"NativeRNLlama.d.ts","sourceRoot":"","sources":["../../src/NativeRNLlama.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,cAAc,CAAA;AAG/C,MAAM,MAAM,qBAAqB,GAAG;IAClC,cAAc,CAAC,EAAE,MAAM,CAAA;CACxB,CAAA;AAED,MAAM,MAAM,mBAAmB,GAAG;IAChC,KAAK,EAAE,MAAM,CAAA;IACb,cAAc,CAAC,EAAE,OAAO,CAAA;IACxB,qBAAqB,CAAC,EAAE,OAAO,CAAA;IAE/B,KAAK,CAAC,EAAE,MAAM,CAAA;IACd,OAAO,CAAC,EAAE,MAAM,CAAA;IAEhB,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB,YAAY,CAAC,EAAE,MAAM,CAAA;IAErB;;OAEG;IACH,UAAU,CAAC,EAAE,OAAO,CAAA;IAEpB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IAErB,SAAS,CAAC,EAAE,OAAO,CAAA;IACnB,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,UAAU,CAAC,EAAE,OAAO,CAAA;IAEpB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,WAAW,CAAC,EAAE,MAAM,CAAA;IAEpB,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB,eAAe,CAAC,EAAE,MAAM,CAAA;IAExB,YAAY,CAAC,EAAE,MAAM,CAAA;IAGrB,SAAS,CAAC,EAAE,OAAO,CAAA;IACnB,cAAc,CAAC,EAAE,MAAM,CAAA;CACxB,CAAA;AAED,MAAM,MAAM,sBAAsB,GAAG;IACnC,MAAM,EAAE,MAAM,CAAA;IACd,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB;;;OAGG;IACH,IAAI,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAA;IACpB;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB;;;;OAIG;IACH,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAA;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAA;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAA;IACd;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAA;IACxB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAA;IACtB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAA;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAA;IACxB;;OAEG;
|
1
|
+
{"version":3,"file":"NativeRNLlama.d.ts","sourceRoot":"","sources":["../../src/NativeRNLlama.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,cAAc,CAAA;AAG/C,MAAM,MAAM,qBAAqB,GAAG;IAClC,cAAc,CAAC,EAAE,MAAM,CAAA;CACxB,CAAA;AAED,MAAM,MAAM,mBAAmB,GAAG;IAChC,KAAK,EAAE,MAAM,CAAA;IACb,cAAc,CAAC,EAAE,OAAO,CAAA;IACxB,qBAAqB,CAAC,EAAE,OAAO,CAAA;IAE/B,KAAK,CAAC,EAAE,MAAM,CAAA;IACd,OAAO,CAAC,EAAE,MAAM,CAAA;IAEhB,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB,YAAY,CAAC,EAAE,MAAM,CAAA;IAErB;;OAEG;IACH,UAAU,CAAC,EAAE,OAAO,CAAA;IAEpB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IAErB,SAAS,CAAC,EAAE,OAAO,CAAA;IACnB,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,UAAU,CAAC,EAAE,OAAO,CAAA;IAEpB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,WAAW,CAAC,EAAE,MAAM,CAAA;IAEpB,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB,eAAe,CAAC,EAAE,MAAM,CAAA;IAExB,YAAY,CAAC,EAAE,MAAM,CAAA;IAGrB,SAAS,CAAC,EAAE,OAAO,CAAA;IACnB,cAAc,CAAC,EAAE,MAAM,CAAA;CACxB,CAAA;AAED,MAAM,MAAM,sBAAsB,GAAG;IACnC,MAAM,EAAE,MAAM,CAAA;IACd,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB;;;OAGG;IACH,IAAI,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAA;IACpB;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB;;;;OAIG;IACH,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAA;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAA;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAA;IACd;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAA;IACxB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAA;IACtB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAA;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAA;IACxB;;OAEG;IAEH;;OAEG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAA;IACjB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB;;OAEG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAA;IACjB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,CAAA;IAC3B;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,CAAA;IAC3B;;OAEG;IACH,qBAAqB,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAA;IACrC;;OAEG;IACH,UAAU,CAAC,EAAE,OAAO,CAAA;IACpB;;;;;;OAMG;IACH,UAAU,CAAC,EAAE,KAAK,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAA;IACjC;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAA;IAEb,uBAAuB,EAAE,OAAO,CAAA;CACjC,CAAA;AAED,MAAM,MAAM,6BAA6B,GAAG;IAC1C,OAAO,EAAE,MAAM,CAAA;IACf,IAAI,EAAE,MAAM,CAAA;CACb,CAAA;AAED,MAAM,MAAM,yBAAyB,GAAG;IACtC,OAAO,EAAE,MAAM,CAAA;IACf,KAAK,EAAE,KAAK,CAAC,6BAA6B,CAAC,CAAA;CAC5C,CAAA;AAED,MAAM,MAAM,6BAA6B,GAAG;IAC1C,QAAQ,EAAE,MAAM,CAAA;IAChB,SAAS,EAAE,MAAM,CAAA;IACjB,mBAAmB,EAAE,MAAM,CAAA;IAC3B,iBAAiB,EAAE,MAAM,CAAA;IACzB,WAAW,EAAE,MAAM,CAAA;IACnB,YAAY,EAAE,MAAM,CAAA;IACpB,sBAAsB,EAAE,MAAM,CAAA;IAC9B,oBAAoB,EAAE,MAAM,CAAA;CAC7B,CAAA;AAED,MAAM,MAAM,sBAAsB,GAAG;IACnC,IAAI,EAAE,MAAM,CAAA;IAEZ,gBAAgB,EAAE,MAAM,CAAA;IACxB,gBAAgB,EAAE,MAAM,CAAA;IACxB,SAAS,EAAE,OAAO,CAAA;IAClB,WAAW,EAAE,OAAO,CAAA;IACpB,YAAY,EAAE,MAAM,CAAA;IACpB,aAAa,EAAE,MAAM,CAAA;IACrB,aAAa,EAAE,MAAM,CAAA;IACrB,aAAa,EAAE,MAAM,CAAA;IACrB,OAAO,EAAE,6BAA6B,CAAA;IAEtC,wBAAwB,CAAC,EAAE,KAAK,CAAC,yBAAyB,CAAC,CAAA;CAC5D,CAAA;AAED,MAAM,MAAM,oBAAoB,GAAG;IACjC,MAAM,EAAE,KAAK,CAAC,MAAM,CAAC,CAAA;CACtB,CAAA;AAED,MAAM,MAAM,qBAAqB,GAAG;IAClC,SAAS,EAAE,KAAK,CAAC,MAAM,CAAC,CAAA;CACzB,CAAA;AAED,MAAM,MAAM,kBAAkB,GAAG;IAC/B,SAAS,EAAE,MAAM,CAAA;IACjB,GAAG,EAAE,OAAO,CAAA;IACZ,WAAW,EAAE,MAAM,CAAA;IACnB,KAAK,EAAE,MAAM,CAAA;CACd,CAAA;AAED,MAAM,MAAM,uBAAuB,GAAG;IACpC,aAAa,EAAE,MAAM,CAAA;IACrB,MAAM,EAAE,MAAM,CAAA;CACf,CAAA;AAED,MAAM,MAAM,sBAAsB,GAAG;IACnC,IAAI,EAAE,MAAM,CAAA;IACZ,OAAO,EAAE,MAAM,CAAA;CAChB,CAAA;AAED,MAAM,MAAM,iBAAiB,GAAG;IAC9B,KAAK,EAAE,OAAO,CAAA;IACd,IAAI,EAAE,OAAO,CAAA;IACb,OAAO,EAAE,OAAO,CAAA;CACjB,CAAA;AAED,MAAM,WAAW,IAAK,SAAQ,WAAW;IACvC,eAAe,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAE7C,SAAS,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA;IACzD,WAAW,CAAC,SAAS,EAAE,MAAM,EAAE,MAAM,EAAE,mBAAmB,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAAA;IAExF,WAAW,CACT,SAAS,EAAE,MAAM,EACjB,QAAQ,EAAE,MAAM,GACf,OAAO,CAAC,uBAAuB,CAAC,CAAA;IACnC,WAAW,CACT,SAAS,EAAE,MAAM,EACjB,QAAQ,EAAE,MAAM,EAChB,IAAI,EAAE,MAAM,GACX,OAAO,CAAC,MAAM,CAAC,CAAA;IAClB,UAAU,CACR,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,sBAAsB,GAC7B,OAAO,CAAC,sBAAsB,CAAC,CAAA;IAClC,cAAc,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAChD,aAAa,CAAC,SAAS,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,oBAAoB,CAAC,CAAA;IAC7E,YAAY,CAAC,SAAS,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,GAAG,oBAAoB,CAAA;IACnE,cAAc,IAAK,OAAO,CAAC,iBAAiB,CAAC,CAAA;IAC7C,gBAAgB,CACd,SAAS,EAAE,MAAM,EACjB,QAAQ,EAAE,sBAAsB,EAAE,EAClC,YAAY,CAAC,EAAE,MAAM,GACpB,OAAO,CAAC,MAAM,CAAC,CAAA;IAClB,UAAU,CAAC,SAAS,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA;IAChE,SAAS,CACP,SAAS,EAAE,MAAM,EACjB,IAAI,EAAE,MAAM,EACZ,MAAM,EAAE,qBAAqB,GAC5B,OAAO,CAAC,qBAAqB,CAAC,CAAA;IACjC,KAAK,CACH,SAAS,EAAE,MAAM,EACjB,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,MAAM,GACT,OAAO,CAAC,MAAM,CAAC,CAAA;IAElB,cAAc,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAEhD,kBAAkB,IAAI,OAAO,CAAC,IAAI,CAAC,CAAA;CACpC;;AAED,wBAA+D"}
|
@@ -8,9 +8,43 @@ export type TokenData = {
|
|
8
8
|
token: string;
|
9
9
|
completion_probabilities?: Array<NativeCompletionTokenProb>;
|
10
10
|
};
|
11
|
+
export declare enum GGML_TYPE {
|
12
|
+
LM_GGML_TYPE_F32 = 0,
|
13
|
+
LM_GGML_TYPE_F16 = 1,
|
14
|
+
LM_GGML_TYPE_Q4_0 = 2,
|
15
|
+
LM_GGML_TYPE_Q4_1 = 3,
|
16
|
+
LM_GGML_TYPE_Q5_0 = 6,
|
17
|
+
LM_GGML_TYPE_Q5_1 = 7,
|
18
|
+
LM_GGML_TYPE_Q8_0 = 8,
|
19
|
+
LM_GGML_TYPE_Q8_1 = 9,
|
20
|
+
LM_GGML_TYPE_Q2_K = 10,
|
21
|
+
LM_GGML_TYPE_Q3_K = 11,
|
22
|
+
LM_GGML_TYPE_Q4_K = 12,
|
23
|
+
LM_GGML_TYPE_Q5_K = 13,
|
24
|
+
LM_GGML_TYPE_Q6_K = 14,
|
25
|
+
LM_GGML_TYPE_Q8_K = 15,
|
26
|
+
LM_GGML_TYPE_IQ2_XXS = 16,
|
27
|
+
LM_GGML_TYPE_IQ2_XS = 17,
|
28
|
+
LM_GGML_TYPE_IQ3_XXS = 18,
|
29
|
+
LM_GGML_TYPE_IQ1_S = 19,
|
30
|
+
LM_GGML_TYPE_IQ4_NL = 20,
|
31
|
+
LM_GGML_TYPE_IQ3_S = 21,
|
32
|
+
LM_GGML_TYPE_IQ2_S = 22,
|
33
|
+
LM_GGML_TYPE_IQ4_XS = 23,
|
34
|
+
LM_GGML_TYPE_I8 = 24,
|
35
|
+
LM_GGML_TYPE_I16 = 25,
|
36
|
+
LM_GGML_TYPE_I32 = 26,
|
37
|
+
LM_GGML_TYPE_I64 = 27,
|
38
|
+
LM_GGML_TYPE_F64 = 28,
|
39
|
+
LM_GGML_TYPE_IQ1_M = 29,
|
40
|
+
LM_GGML_TYPE_BF16 = 30,
|
41
|
+
LM_GGML_TYPE_TQ1_0 = 34,
|
42
|
+
LM_GGML_TYPE_TQ2_0 = 35,
|
43
|
+
LM_GGML_TYPE_COUNT = 39
|
44
|
+
}
|
11
45
|
export type ContextParams = Omit<NativeContextParams, 'cache_type_k' | 'cache_type_v' | 'pooling_type'> & {
|
12
|
-
cache_type_k?:
|
13
|
-
cache_type_v?:
|
46
|
+
cache_type_k?: GGML_TYPE;
|
47
|
+
cache_type_v?: GGML_TYPE;
|
14
48
|
pooling_type?: 'none' | 'mean' | 'cls' | 'last' | 'rank';
|
15
49
|
};
|
16
50
|
export type EmbeddingParams = NativeEmbeddingParams;
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EACV,mBAAmB,EACnB,kBAAkB,EAClB,sBAAsB,EACtB,yBAAyB,EACzB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACrB,uBAAuB,EACvB,iBAAiB,EACjB,qBAAqB,EACrB,6BAA6B,EAC7B,6BAA6B,EAC9B,MAAM,iBAAiB,CAAA;AACxB,OAAO,KAAK,EAAE,+BAA+B,EAAE,iCAAiC,EAAE,MAAM,WAAW,CAAA;AACnG,OAAO,EAAE,sBAAsB,EAAE,0BAA0B,EAAE,MAAM,WAAW,CAAA;AAC9E,OAAO,KAAK,EAAE,kBAAkB,EAAE,2BAA2B,EAAE,MAAM,QAAQ,CAAA;AAG7E,YAAY,EACV,mBAAmB,EACnB,kBAAkB,EAClB,sBAAsB,EACtB,yBAAyB,EACzB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACrB,uBAAuB,EACvB,qBAAqB,EACrB,6BAA6B,EAC7B,6BAA6B,EAC7B,kBAAkB,EAClB,2BAA2B,EAC3B,+BAA+B,EAC/B,iCAAiC,GAClC,CAAA;AAED,OAAO,EAAE,sBAAsB,EAAE,0BAA0B,EAAE,CAAA;AAc7D,MAAM,MAAM,SAAS,GAAG;IACtB,KAAK,EAAE,MAAM,CAAA;IACb,wBAAwB,CAAC,EAAE,KAAK,CAAC,yBAAyB,CAAC,CAAA;CAC5D,CAAA;AAOD,
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EACV,mBAAmB,EACnB,kBAAkB,EAClB,sBAAsB,EACtB,yBAAyB,EACzB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACrB,uBAAuB,EACvB,iBAAiB,EACjB,qBAAqB,EACrB,6BAA6B,EAC7B,6BAA6B,EAC9B,MAAM,iBAAiB,CAAA;AACxB,OAAO,KAAK,EAAE,+BAA+B,EAAE,iCAAiC,EAAE,MAAM,WAAW,CAAA;AACnG,OAAO,EAAE,sBAAsB,EAAE,0BAA0B,EAAE,MAAM,WAAW,CAAA;AAC9E,OAAO,KAAK,EAAE,kBAAkB,EAAE,2BAA2B,EAAE,MAAM,QAAQ,CAAA;AAG7E,YAAY,EACV,mBAAmB,EACnB,kBAAkB,EAClB,sBAAsB,EACtB,yBAAyB,EACzB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACrB,uBAAuB,EACvB,qBAAqB,EACrB,6BAA6B,EAC7B,6BAA6B,EAC7B,kBAAkB,EAClB,2BAA2B,EAC3B,+BAA+B,EAC/B,iCAAiC,GAClC,CAAA;AAED,OAAO,EAAE,sBAAsB,EAAE,0BAA0B,EAAE,CAAA;AAc7D,MAAM,MAAM,SAAS,GAAG;IACtB,KAAK,EAAE,MAAM,CAAA;IACb,wBAAwB,CAAC,EAAE,KAAK,CAAC,yBAAyB,CAAC,CAAA;CAC5D,CAAA;AAOD,oBAAY,SAAS;IACnB,gBAAgB,IAAQ;IACxB,gBAAgB,IAAQ;IACxB,iBAAiB,IAAO;IACxB,iBAAiB,IAAO;IAGxB,iBAAiB,IAAO;IACxB,iBAAiB,IAAO;IACxB,iBAAiB,IAAO;IACxB,iBAAiB,IAAO;IACxB,iBAAiB,KAAQ;IACzB,iBAAiB,KAAQ;IACzB,iBAAiB,KAAQ;IACzB,iBAAiB,KAAQ;IACzB,iBAAiB,KAAQ;IACzB,iBAAiB,KAAQ;IACzB,oBAAoB,KAAK;IACzB,mBAAmB,KAAM;IACzB,oBAAoB,KAAK;IACzB,kBAAkB,KAAO;IACzB,mBAAmB,KAAM;IACzB,kBAAkB,KAAO;IACzB,kBAAkB,KAAO;IACzB,mBAAmB,KAAM;IACzB,eAAe,KAAU;IACzB,gBAAgB,KAAS;IACzB,gBAAgB,KAAS;IACzB,gBAAgB,KAAS;IACzB,gBAAgB,KAAS;IACzB,kBAAkB,KAAO;IACzB,iBAAiB,KAAQ;IAIzB,kBAAkB,KAAO;IACzB,kBAAkB,KAAO;IAIzB,kBAAkB,KAAO;CAC1B;AAGD,MAAM,MAAM,aAAa,GAAG,IAAI,CAC9B,mBAAmB,EACnB,cAAc,GAAG,cAAc,GAAI,cAAc,CAClD,GAAG;IACF,YAAY,CAAC,EAAE,SAAS,CAAA;IACxB,YAAY,CAAC,EAAE,SAAS,CAAA;IACxB,YAAY,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,KAAK,GAAG,MAAM,GAAG,MAAM,CAAA;CACzD,CAAA;AAED,MAAM,MAAM,eAAe,GAAG,qBAAqB,CAAA;AAEnD,MAAM,MAAM,gBAAgB,GAAG,IAAI,CACjC,sBAAsB,EACtB,yBAAyB,GAAG,QAAQ,CACrC,GAAG;IACF,MAAM,CAAC,EAAE,MAAM,CAAA;IACf,QAAQ,CAAC,EAAE,2BAA2B,EAAE,CAAA;IACxC,YAAY,CAAC,EAAE,MAAM,CAAA;CACtB,CAAA;AAED,MAAM,MAAM,WAAW,GAAG;IACxB,SAAS,EAAE,MAAM,CAAA;IACjB,SAAS,EAAE,MAAM,CAAA;IACjB,YAAY,EAAE,MAAM,CAAA;IACpB,KAAK,EAAE,MAAM,CAAA;IACb,KAAK,EAAE,MAAM,CAAA;IACb,KAAK,EAAE,MAAM,CAAA;IACb,KAAK,EAAE,MAAM,CAAA;CACd,CAAA;AAED,qBAAa,YAAY;IACvB,EAAE,EAAE,MAAM,CAAA;IAEV,GAAG,EAAE,OAAO,CAAQ;IAEpB,WAAW,EAAE,MAAM,CAAK;IAExB,KAAK,EAAE;QACL,uBAAuB,CAAC,EAAE,OAAO,CAAA;KAClC,CAAK;gBAEM,EAAE,SAAS,EAAE,GAAG,EAAE,WAAW,EAAE,KAAK,EAAE,EAAE,kBAAkB;IAOtE;;OAEG;IACG,WAAW,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,uBAAuB,CAAC;IAMrE;;OAEG;IACG,WAAW,CACf,QAAQ,EAAE,MAAM,EAChB,OAAO,CAAC,EAAE;QAAE,SAAS,EAAE,MAAM,CAAA;KAAE,GAC9B,OAAO,CAAC,MAAM,CAAC;IAIZ,gBAAgB,CACpB,QAAQ,EAAE,2BAA2B,EAAE,EACvC,QAAQ,CAAC,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,CAAC;IAOZ,UAAU,CACd,MAAM,EAAE,gBAAgB,EACxB,QAAQ,CAAC,EAAE,CAAC,IAAI,EAAE,SAAS,KAAK,IAAI,GACnC,OAAO,CAAC,sBAAsB,CAAC;IAkClC,cAAc,IAAI,OAAO,CAAC,IAAI,CAAC;IAI/B,aAAa,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,oBAAoB,CAAC;IAI1D,YAAY,CAAC,IAAI,EAAE,MAAM,GAAG,oBAAoB;IAIhD,UAAU,CAAC,MAAM,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,MAAM,CAAC;IAI7C,SAAS,CACP,IAAI,EAAE,MAAM,EACZ,MAAM,CAAC,EAAE,eAAe,GACvB,OAAO,CAAC,qBAAqB,CAAC;IAI3B,KAAK,CACT,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,MAAM,EACV,EAAE,EAAE,MAAM,GACT,OAAO,CAAC,WAAW,CAAC;IAejB,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;CAG/B;AAED,wBAAsB,cAAc,IAAK,OAAO,CAAC,iBAAiB,CAAC,CAElE;AAED,wBAAsB,eAAe,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAElE;AAYD,wBAAsB,kBAAkB,CAAC,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAIvE;AAWD,wBAAsB,SAAS,CAC7B,EACE,KAAK,EACL,cAAc,EAAE,YAAY,EAC5B,YAAY,EAAE,WAAW,EACzB,IAAI,EACJ,GAAG,IAAI,EACR,EAAE,aAAa,EAChB,UAAU,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,GACtC,OAAO,CAAC,YAAY,CAAC,CAuCvB;AAED,wBAAsB,eAAe,IAAI,OAAO,CAAC,IAAI,CAAC,CAErD"}
|
package/package.json
CHANGED
package/src/NativeRNLlama.ts
CHANGED
@@ -24,11 +24,11 @@ export type NativeContextParams = {
|
|
24
24
|
/**
|
25
25
|
* KV cache data type for the K (Experimental in llama.cpp)
|
26
26
|
*/
|
27
|
-
cache_type_k?:
|
27
|
+
cache_type_k?: number
|
28
28
|
/**
|
29
29
|
* KV cache data type for the V (Experimental in llama.cpp)
|
30
30
|
*/
|
31
|
-
cache_type_v?:
|
31
|
+
cache_type_v?: number
|
32
32
|
|
33
33
|
use_mlock?: boolean
|
34
34
|
use_mmap?: boolean
|
@@ -118,7 +118,7 @@ export type NativeCompletionParams = {
|
|
118
118
|
/**
|
119
119
|
* Penalize newline tokens when applying the repeat penalty. Default: `false`
|
120
120
|
*/
|
121
|
-
penalize_nl?: boolean
|
121
|
+
// penalize_nl?: boolean
|
122
122
|
/**
|
123
123
|
* Enable Mirostat sampling, controlling perplexity during text generation. Default: `0`, where `0` is disabled, `1` is Mirostat, and `2` is Mirostat 2.0.
|
124
124
|
*/
|
package/src/index.ts
CHANGED
@@ -62,12 +62,56 @@ type TokenNativeEvent = {
|
|
62
62
|
tokenResult: TokenData
|
63
63
|
}
|
64
64
|
|
65
|
+
export enum GGML_TYPE {
|
66
|
+
LM_GGML_TYPE_F32 = 0,
|
67
|
+
LM_GGML_TYPE_F16 = 1,
|
68
|
+
LM_GGML_TYPE_Q4_0 = 2,
|
69
|
+
LM_GGML_TYPE_Q4_1 = 3,
|
70
|
+
// LM_GGML_TYPE_Q4_2 = 4, support has been removed
|
71
|
+
// LM_GGML_TYPE_Q4_3 = 5, support has been removed
|
72
|
+
LM_GGML_TYPE_Q5_0 = 6,
|
73
|
+
LM_GGML_TYPE_Q5_1 = 7,
|
74
|
+
LM_GGML_TYPE_Q8_0 = 8,
|
75
|
+
LM_GGML_TYPE_Q8_1 = 9,
|
76
|
+
LM_GGML_TYPE_Q2_K = 10,
|
77
|
+
LM_GGML_TYPE_Q3_K = 11,
|
78
|
+
LM_GGML_TYPE_Q4_K = 12,
|
79
|
+
LM_GGML_TYPE_Q5_K = 13,
|
80
|
+
LM_GGML_TYPE_Q6_K = 14,
|
81
|
+
LM_GGML_TYPE_Q8_K = 15,
|
82
|
+
LM_GGML_TYPE_IQ2_XXS = 16,
|
83
|
+
LM_GGML_TYPE_IQ2_XS = 17,
|
84
|
+
LM_GGML_TYPE_IQ3_XXS = 18,
|
85
|
+
LM_GGML_TYPE_IQ1_S = 19,
|
86
|
+
LM_GGML_TYPE_IQ4_NL = 20,
|
87
|
+
LM_GGML_TYPE_IQ3_S = 21,
|
88
|
+
LM_GGML_TYPE_IQ2_S = 22,
|
89
|
+
LM_GGML_TYPE_IQ4_XS = 23,
|
90
|
+
LM_GGML_TYPE_I8 = 24,
|
91
|
+
LM_GGML_TYPE_I16 = 25,
|
92
|
+
LM_GGML_TYPE_I32 = 26,
|
93
|
+
LM_GGML_TYPE_I64 = 27,
|
94
|
+
LM_GGML_TYPE_F64 = 28,
|
95
|
+
LM_GGML_TYPE_IQ1_M = 29,
|
96
|
+
LM_GGML_TYPE_BF16 = 30,
|
97
|
+
// LM_GGML_TYPE_Q4_0_4_4 = 31, support has been removed from gguf files
|
98
|
+
// LM_GGML_TYPE_Q4_0_4_8 = 32,
|
99
|
+
// LM_GGML_TYPE_Q4_0_8_8 = 33,
|
100
|
+
LM_GGML_TYPE_TQ1_0 = 34,
|
101
|
+
LM_GGML_TYPE_TQ2_0 = 35,
|
102
|
+
// LM_GGML_TYPE_IQ4_NL_4_4 = 36,
|
103
|
+
// LM_GGML_TYPE_IQ4_NL_4_8 = 37,
|
104
|
+
// LM_GGML_TYPE_IQ4_NL_8_8 = 38,
|
105
|
+
LM_GGML_TYPE_COUNT = 39,
|
106
|
+
};
|
107
|
+
|
108
|
+
|
65
109
|
export type ContextParams = Omit<
|
66
110
|
NativeContextParams,
|
67
111
|
'cache_type_k' | 'cache_type_v' | 'pooling_type'
|
68
112
|
> & {
|
69
|
-
cache_type_k?:
|
70
|
-
cache_type_v?:
|
113
|
+
cache_type_k?: GGML_TYPE
|
114
|
+
cache_type_v?: GGML_TYPE
|
71
115
|
pooling_type?: 'none' | 'mean' | 'cls' | 'last' | 'rank'
|
72
116
|
}
|
73
117
|
|
package/cpp/amx/amx.cpp
DELETED
@@ -1,196 +0,0 @@
|
|
1
|
-
#include "amx.h"
|
2
|
-
#include "common.h"
|
3
|
-
#include "mmq.h"
|
4
|
-
#include "ggml-backend-impl.h"
|
5
|
-
#include "ggml-backend.h"
|
6
|
-
#include "ggml-impl.h"
|
7
|
-
#include "ggml-cpu.h"
|
8
|
-
|
9
|
-
#if defined(__gnu_linux__)
|
10
|
-
#include <sys/syscall.h>
|
11
|
-
#include <unistd.h>
|
12
|
-
#endif
|
13
|
-
|
14
|
-
#include <cstdlib>
|
15
|
-
#include <cstring>
|
16
|
-
#include <memory>
|
17
|
-
|
18
|
-
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
19
|
-
|
20
|
-
// AMX buffer interface
|
21
|
-
static void lm_ggml_backend_amx_buffer_free_buffer(lm_ggml_backend_buffer_t buffer) {
|
22
|
-
free(buffer->context);
|
23
|
-
}
|
24
|
-
|
25
|
-
static void * lm_ggml_backend_amx_buffer_get_base(lm_ggml_backend_buffer_t buffer) {
|
26
|
-
return (void *)(buffer->context);
|
27
|
-
}
|
28
|
-
|
29
|
-
static void lm_ggml_backend_amx_buffer_memset_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
30
|
-
memset((char *)tensor->data + offset, value, size);
|
31
|
-
|
32
|
-
LM_GGML_UNUSED(buffer);
|
33
|
-
}
|
34
|
-
|
35
|
-
static void lm_ggml_backend_amx_buffer_set_tensor(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
36
|
-
if (qtype_has_amx_kernels(tensor->type)) {
|
37
|
-
lm_ggml_backend_amx_convert_weight(tensor, data, offset, size);
|
38
|
-
} else {
|
39
|
-
memcpy((char *)tensor->data + offset, data, size);
|
40
|
-
}
|
41
|
-
|
42
|
-
LM_GGML_UNUSED(buffer);
|
43
|
-
}
|
44
|
-
|
45
|
-
static void lm_ggml_backend_amx_buffer_get_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
46
|
-
LM_GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
|
47
|
-
memcpy(data, (const char *)tensor->data + offset, size);
|
48
|
-
|
49
|
-
LM_GGML_UNUSED(buffer);
|
50
|
-
}
|
51
|
-
|
52
|
-
static bool lm_ggml_backend_amx_buffer_cpy_tensor(lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst) {
|
53
|
-
if (lm_ggml_backend_buffer_is_host(src->buffer)) {
|
54
|
-
if (qtype_has_amx_kernels(src->type)) {
|
55
|
-
lm_ggml_backend_amx_convert_weight(dst, src->data, 0, lm_ggml_nbytes(dst));
|
56
|
-
} else {
|
57
|
-
memcpy(dst->data, src->data, lm_ggml_nbytes(src));
|
58
|
-
}
|
59
|
-
return true;
|
60
|
-
}
|
61
|
-
return false;
|
62
|
-
|
63
|
-
LM_GGML_UNUSED(buffer);
|
64
|
-
}
|
65
|
-
|
66
|
-
static void lm_ggml_backend_amx_buffer_clear(lm_ggml_backend_buffer_t buffer, uint8_t value) {
|
67
|
-
memset(buffer->context, value, buffer->size);
|
68
|
-
}
|
69
|
-
|
70
|
-
static lm_ggml_backend_buffer_i lm_ggml_backend_amx_buffer_interface = {
|
71
|
-
/* .free_buffer = */ lm_ggml_backend_amx_buffer_free_buffer,
|
72
|
-
/* .get_base = */ lm_ggml_backend_amx_buffer_get_base,
|
73
|
-
/* .init_tensor = */ NULL, // no initialization required
|
74
|
-
/* .memset_tensor = */ lm_ggml_backend_amx_buffer_memset_tensor,
|
75
|
-
/* .set_tensor = */ lm_ggml_backend_amx_buffer_set_tensor,
|
76
|
-
/* .get_tensor = */ lm_ggml_backend_amx_buffer_get_tensor,
|
77
|
-
/* .cpy_tensor = */ lm_ggml_backend_amx_buffer_cpy_tensor,
|
78
|
-
/* .clear = */ lm_ggml_backend_amx_buffer_clear,
|
79
|
-
/* .reset = */ NULL,
|
80
|
-
};
|
81
|
-
|
82
|
-
static const char * lm_ggml_backend_amx_buffer_type_get_name(lm_ggml_backend_buffer_type_t buft) {
|
83
|
-
return "AMX";
|
84
|
-
|
85
|
-
LM_GGML_UNUSED(buft);
|
86
|
-
}
|
87
|
-
|
88
|
-
static lm_ggml_backend_buffer_t lm_ggml_backend_amx_buffer_type_alloc_buffer(lm_ggml_backend_buffer_type_t buft, size_t size) {
|
89
|
-
void * data = aligned_alloc(TENSOR_ALIGNMENT, size);
|
90
|
-
if (data == NULL) {
|
91
|
-
fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
|
92
|
-
return NULL;
|
93
|
-
}
|
94
|
-
|
95
|
-
return lm_ggml_backend_buffer_init(buft, lm_ggml_backend_amx_buffer_interface, data, size);
|
96
|
-
}
|
97
|
-
|
98
|
-
static size_t lm_ggml_backend_amx_buffer_type_get_alignment(lm_ggml_backend_buffer_type_t buft) {
|
99
|
-
return TENSOR_ALIGNMENT;
|
100
|
-
|
101
|
-
LM_GGML_UNUSED(buft);
|
102
|
-
}
|
103
|
-
|
104
|
-
static size_t lm_ggml_backend_amx_buffer_type_get_alloc_size(lm_ggml_backend_buffer_type_t buft, const lm_ggml_tensor* tensor) {
|
105
|
-
return lm_ggml_backend_amx_get_alloc_size(tensor);
|
106
|
-
|
107
|
-
LM_GGML_UNUSED(buft);
|
108
|
-
}
|
109
|
-
|
110
|
-
static bool lm_ggml_backend_amx_buffer_type_is_host(lm_ggml_backend_buffer_type_t buft) {
|
111
|
-
return false;
|
112
|
-
|
113
|
-
LM_GGML_UNUSED(buft);
|
114
|
-
}
|
115
|
-
|
116
|
-
#define ARCH_GET_XCOMP_PERM 0x1022
|
117
|
-
#define ARCH_REQ_XCOMP_PERM 0x1023
|
118
|
-
#define XFEATURE_XTILECFG 17
|
119
|
-
#define XFEATURE_XTILEDATA 18
|
120
|
-
|
121
|
-
static bool lm_ggml_amx_init() {
|
122
|
-
#if defined(__gnu_linux__)
|
123
|
-
if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
|
124
|
-
fprintf(stderr, "AMX is not ready to be used!\n");
|
125
|
-
return false;
|
126
|
-
}
|
127
|
-
return true;
|
128
|
-
#elif defined(_WIN32)
|
129
|
-
return true;
|
130
|
-
#endif
|
131
|
-
}
|
132
|
-
lm_ggml_backend_buffer_type_t lm_ggml_backend_amx_buffer_type() {
|
133
|
-
static struct lm_ggml_backend_buffer_type lm_ggml_backend_buffer_type_amx = {
|
134
|
-
/* .iface = */ {
|
135
|
-
/* .get_name = */ lm_ggml_backend_amx_buffer_type_get_name,
|
136
|
-
/* .alloc_buffer = */ lm_ggml_backend_amx_buffer_type_alloc_buffer,
|
137
|
-
/* .get_alignment = */ lm_ggml_backend_amx_buffer_type_get_alignment,
|
138
|
-
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
|
139
|
-
/* .get_alloc_size = */ lm_ggml_backend_amx_buffer_type_get_alloc_size,
|
140
|
-
/* .is_host = */ lm_ggml_backend_amx_buffer_type_is_host,
|
141
|
-
},
|
142
|
-
/* .device = */ lm_ggml_backend_reg_dev_get(lm_ggml_backend_cpu_reg(), 0),
|
143
|
-
/* .context = */ NULL,
|
144
|
-
};
|
145
|
-
|
146
|
-
if (!lm_ggml_amx_init()) {
|
147
|
-
return NULL;
|
148
|
-
}
|
149
|
-
|
150
|
-
return &lm_ggml_backend_buffer_type_amx;
|
151
|
-
}
|
152
|
-
|
153
|
-
bool lm_ggml_backend_amx_buft_is_amx(lm_ggml_backend_buffer_type_t buft) {
|
154
|
-
return buft->iface.get_name == lm_ggml_backend_amx_buffer_type_get_name;
|
155
|
-
}
|
156
|
-
|
157
|
-
bool lm_ggml_backend_amx_device_supports_op(const struct lm_ggml_tensor * op) {
|
158
|
-
// handle only 2d gemm for now
|
159
|
-
auto is_contiguous_2d = [](const struct lm_ggml_tensor * t) {
|
160
|
-
return lm_ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
|
161
|
-
};
|
162
|
-
|
163
|
-
switch (op->op) {
|
164
|
-
case LM_GGML_OP_NONE:
|
165
|
-
case LM_GGML_OP_RESHAPE:
|
166
|
-
case LM_GGML_OP_VIEW:
|
167
|
-
case LM_GGML_OP_PERMUTE:
|
168
|
-
case LM_GGML_OP_TRANSPOSE:
|
169
|
-
return true;
|
170
|
-
|
171
|
-
case LM_GGML_OP_MUL_MAT: {
|
172
|
-
const struct lm_ggml_tensor * src0 = op->src[0];
|
173
|
-
const struct lm_ggml_tensor * src1 = op->src[1];
|
174
|
-
|
175
|
-
const enum lm_ggml_type type = src0->type;
|
176
|
-
const int64_t ne0 = op->ne[0];
|
177
|
-
|
178
|
-
// amx kernels enables for Q4_0, Q4_1, Q8_0, F16
|
179
|
-
// Q4_K, Q5_K, Q6_K, IQ4_XS enabled for QK_K = 256
|
180
|
-
bool has_amx_kernels = qtype_has_amx_kernels(type) || (type == LM_GGML_TYPE_F16);
|
181
|
-
|
182
|
-
bool can_use_amx =
|
183
|
-
is_contiguous_2d(src0) && // src0 must be contiguous
|
184
|
-
is_contiguous_2d(src1) && // src1 must be contiguous
|
185
|
-
src1->type == LM_GGML_TYPE_F32 && // src1 must be float32
|
186
|
-
has_amx_kernels && // with amx kernel impls
|
187
|
-
ne0 % (TILE_N * 2) == 0; // out_features is 32x
|
188
|
-
|
189
|
-
return can_use_amx;
|
190
|
-
}
|
191
|
-
default:
|
192
|
-
return false;
|
193
|
-
}
|
194
|
-
}
|
195
|
-
|
196
|
-
#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
package/cpp/amx/amx.h
DELETED
@@ -1,20 +0,0 @@
|
|
1
|
-
#include "ggml-backend.h"
|
2
|
-
#include "ggml-cpu-impl.h"
|
3
|
-
|
4
|
-
#ifdef __cplusplus
|
5
|
-
extern "C" {
|
6
|
-
#endif
|
7
|
-
|
8
|
-
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
9
|
-
|
10
|
-
lm_ggml_backend_buffer_type_t lm_ggml_backend_amx_buffer_type(void);
|
11
|
-
bool lm_ggml_backend_amx_buft_is_amx(lm_ggml_backend_buffer_type_t buft);
|
12
|
-
bool lm_ggml_backend_amx_device_supports_op(const struct lm_ggml_tensor * op);
|
13
|
-
void lm_ggml_backend_amx_mul_mat(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
|
14
|
-
size_t lm_ggml_backend_amx_desired_wsize(const struct lm_ggml_tensor * dst);
|
15
|
-
|
16
|
-
#endif
|
17
|
-
|
18
|
-
#ifdef __cplusplus
|
19
|
-
}
|
20
|
-
#endif
|
package/cpp/amx/common.h
DELETED
@@ -1,101 +0,0 @@
|
|
1
|
-
#pragma once
|
2
|
-
|
3
|
-
#include "ggml.h"
|
4
|
-
#include "ggml-cpu-impl.h"
|
5
|
-
|
6
|
-
#include <algorithm>
|
7
|
-
#include <memory>
|
8
|
-
#include <type_traits>
|
9
|
-
|
10
|
-
#if defined(_OPENMP)
|
11
|
-
#include <omp.h>
|
12
|
-
#endif
|
13
|
-
|
14
|
-
#define TILE_M 16
|
15
|
-
#define TILE_N 16
|
16
|
-
#define TILE_K 32
|
17
|
-
#define VNNI_BLK 4
|
18
|
-
|
19
|
-
#define AMX_BLK_SIZE 32
|
20
|
-
|
21
|
-
#define TMM0 0
|
22
|
-
#define TMM1 1
|
23
|
-
#define TMM2 2
|
24
|
-
#define TMM3 3
|
25
|
-
#define TMM4 4
|
26
|
-
#define TMM5 5
|
27
|
-
#define TMM6 6
|
28
|
-
#define TMM7 7
|
29
|
-
|
30
|
-
// parallel routines
|
31
|
-
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
|
32
|
-
inline T div_up(T x, T y) { return (x + y - 1) / y; }
|
33
|
-
|
34
|
-
template <typename T>
|
35
|
-
inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
|
36
|
-
#if 0
|
37
|
-
// onednn partition pattern
|
38
|
-
T& n_my = n_end;
|
39
|
-
if (nth <= 1 || n == 0) {
|
40
|
-
n_start = 0;
|
41
|
-
n_my = n;
|
42
|
-
} else {
|
43
|
-
T n1 = div_up(n, nth);
|
44
|
-
T n2 = n1 - 1;
|
45
|
-
T T1 = n - n2 * nth;
|
46
|
-
n_my = ith < T1 ? n1 : n2;
|
47
|
-
n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
|
48
|
-
}
|
49
|
-
n_end += n_start;
|
50
|
-
#else
|
51
|
-
// pytorch aten partition pattern
|
52
|
-
T n_my = div_up(n, nth);
|
53
|
-
n_start = ith * n_my;
|
54
|
-
n_end = std::min(n_start + n_my, n);
|
55
|
-
#endif
|
56
|
-
}
|
57
|
-
|
58
|
-
template <typename func_t>
|
59
|
-
inline void parallel_for(int nth, int n, const func_t& f) {
|
60
|
-
#if defined(_OPENMP)
|
61
|
-
#pragma omp parallel num_threads(nth)
|
62
|
-
{
|
63
|
-
//int nth = omp_get_num_threads();
|
64
|
-
int ith = omp_get_thread_num();
|
65
|
-
int tbegin, tend;
|
66
|
-
balance211(n, nth, ith, tbegin, tend);
|
67
|
-
f(tbegin, tend);
|
68
|
-
}
|
69
|
-
#else
|
70
|
-
f(0, n);
|
71
|
-
|
72
|
-
LM_GGML_UNUSED(nth);
|
73
|
-
#endif
|
74
|
-
}
|
75
|
-
|
76
|
-
template <typename func_t>
|
77
|
-
inline void parallel_for_ggml(const lm_ggml_compute_params * params, int n, const func_t & f) {
|
78
|
-
int tbegin, tend;
|
79
|
-
balance211(n, params->nth, params->ith, tbegin, tend);
|
80
|
-
f(tbegin, tend);
|
81
|
-
lm_ggml_barrier(params->threadpool); // TODO: might not always be needed
|
82
|
-
}
|
83
|
-
|
84
|
-
// quantized types that have AMX support
|
85
|
-
inline bool qtype_has_amx_kernels(const enum lm_ggml_type type) {
|
86
|
-
// TODO: fix padding for vnni format
|
87
|
-
return (type == LM_GGML_TYPE_Q4_0) ||
|
88
|
-
(type == LM_GGML_TYPE_Q4_1) ||
|
89
|
-
(type == LM_GGML_TYPE_Q8_0) ||
|
90
|
-
(type == LM_GGML_TYPE_Q4_K) ||
|
91
|
-
(type == LM_GGML_TYPE_Q5_K) ||
|
92
|
-
(type == LM_GGML_TYPE_Q6_K) ||
|
93
|
-
(type == LM_GGML_TYPE_IQ4_XS);
|
94
|
-
}
|
95
|
-
|
96
|
-
// ggml backend context
|
97
|
-
struct lm_ggml_backend_amx_context {
|
98
|
-
int n_threads = LM_GGML_DEFAULT_N_THREADS;
|
99
|
-
std::unique_ptr<char[]> work_data;
|
100
|
-
size_t work_size = 0;
|
101
|
-
};
|