@simulatte/doppler 0.1.7 → 0.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -0
- package/README.md +25 -6
- package/package.json +25 -38
- package/src/browser/browser-converter.js +5 -0
- package/src/client/doppler-api.browser.js +6 -0
- package/src/client/doppler-api.d.ts +3 -0
- package/src/client/doppler-api.js +11 -2
- package/src/client/doppler-registry.js +3 -5
- package/src/client/doppler-registry.json +2 -2
- package/src/config/kernel-path-loader.d.ts +5 -0
- package/src/config/kernel-path-loader.js +13 -0
- package/src/config/kernels/kernel-ref-digests.js +23 -21
- package/src/config/kernels/moe/mixtral.paths.json +46 -0
- package/src/config/kernels/registry.json +74 -0
- package/src/config/loader.js +9 -0
- package/src/config/merge-contract-check.js +7 -0
- package/src/config/platforms/loader.js +3 -1
- package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f32a-nosubgroups.json +16 -16
- package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f32a-online.json +8 -8
- package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f32a-small-attn.json +61 -0
- package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f32w-f32a-online.json +56 -0
- package/src/config/presets/kernel-paths/lfm2-q4k-dequant-f32a-nosubgroups.json +61 -0
- package/src/config/presets/kernel-paths/registry.json +21 -0
- package/src/config/presets/models/gemma2.json +2 -1
- package/src/config/presets/models/gemma3.json +4 -1
- package/src/config/presets/models/gemma4.json +61 -0
- package/src/config/presets/models/granite-docling.json +70 -0
- package/src/config/presets/models/lfm2.json +6 -1
- package/src/config/presets/models/qwen3.json +4 -3
- package/src/config/presets/models/qwen3_5.json +16 -0
- package/src/config/presets/models/qwen3_vl.json +40 -0
- package/src/config/presets/runtime/experiments/bench/gemma3-bench-q4k.json +2 -1
- package/src/config/presets/runtime/experiments/verify/lfm2-verify.json +46 -0
- package/src/config/presets/runtime/experiments/verify/translategemma-verify.json +39 -0
- package/src/config/presets/runtime/model/qwen3-5-layer-probe.json +52 -0
- package/src/config/presets/runtime/model/qwen3-5-linear-attn-debug.json +90 -0
- package/src/config/presets/runtime/modes/trace-layers.json +1 -0
- package/src/config/presets/runtime/tiers/gemma4-16gb.json +69 -0
- package/src/config/presets/runtime/tiers/gemma4-24gb.json +66 -0
- package/src/config/presets/runtime/tiers/gemma4-32gb.json +66 -0
- package/src/config/runtime.js +3 -0
- package/src/config/schema/conversion.schema.d.ts +1 -0
- package/src/config/schema/debug.schema.d.ts +40 -0
- package/src/config/schema/debug.schema.js +28 -0
- package/src/config/schema/index.js +2 -0
- package/src/config/schema/inference-defaults.schema.js +1 -1
- package/src/config/schema/kernel-path.schema.d.ts +1 -0
- package/src/config/schema/manifest.schema.d.ts +1 -1
- package/src/config/schema/manifest.schema.js +1 -1
- package/src/config/schema/memory-limits.schema.js +2 -2
- package/src/config/schema/storage.schema.js +2 -2
- package/src/converter/conversion-plan.js +11 -3
- package/src/converter/core.js +19 -8
- package/src/converter/manifest-inference.js +12 -22
- package/src/converter/parsers/transformer.js +4 -0
- package/src/converter/quantization-info.js +5 -1
- package/src/converter/quantizer.d.ts +5 -0
- package/src/converter/quantizer.js +34 -12
- package/src/converter/rope-config.js +8 -6
- package/src/converter/tokenizer-utils.d.ts +1 -0
- package/src/converter/tokenizer-utils.js +4 -1
- package/src/debug/reference/hf_qwen35_linear_attn_debug.py +268 -0
- package/src/distribution/shard-delivery.js +40 -1
- package/src/formats/rdrr/classification.js +32 -0
- package/src/formats/rdrr/parsing.d.ts +4 -0
- package/src/formats/rdrr/parsing.js +14 -1
- package/src/gpu/kernel-runtime.js +4 -2
- package/src/gpu/kernels/attention.js +2 -1
- package/src/gpu/kernels/dequant_f16_out.wgsl +4 -2
- package/src/gpu/kernels/dequant_f16_out_vec4.wgsl +5 -2
- package/src/gpu/kernels/dequant_shared.wgsl +4 -2
- package/src/gpu/kernels/dequant_shared_vec4.wgsl +4 -2
- package/src/gpu/kernels/dequant_subgroup.wgsl +6 -2
- package/src/gpu/kernels/gated-short-conv.d.ts +63 -0
- package/src/gpu/kernels/gated-short-conv.js +284 -0
- package/src/gpu/kernels/index.d.ts +8 -0
- package/src/gpu/kernels/index.js +6 -0
- package/src/gpu/kernels/linear-attention-core.js +37 -17
- package/src/gpu/kernels/matmul-selection.js +48 -4
- package/src/gpu/kernels/matmul.d.ts +5 -0
- package/src/gpu/kernels/matmul.js +71 -2
- package/src/gpu/kernels/matmul_gemv_subgroup.wgsl +77 -79
- package/src/gpu/kernels/rmsnorm.js +9 -2
- package/src/gpu/kernels/sample.js +1 -3
- package/src/gpu/kernels/sample.wgsl +39 -9
- package/src/gpu/kernels/sample_f16.wgsl +38 -8
- package/src/gpu/kernels/shader-cache.js +9 -4
- package/src/gpu/kernels/split_qg.d.ts +50 -0
- package/src/gpu/kernels/split_qg.js +46 -0
- package/src/gpu/kernels/split_qg.wgsl +58 -0
- package/src/gpu/kernels/split_qg_f16.wgsl +62 -0
- package/src/gpu/weight-buffer.d.ts +1 -1
- package/src/gpu/weight-buffer.js +1 -1
- package/src/inference/browser-harness.d.ts +2 -0
- package/src/inference/browser-harness.js +20 -1
- package/src/inference/kv-cache/base.js +3 -10
- package/src/inference/pipelines/diffusion/helpers.js +3 -0
- package/src/inference/pipelines/diffusion/pipeline.js +2 -1
- package/src/inference/pipelines/diffusion/text-encoder-gpu.js +10 -3
- package/src/inference/pipelines/text/attention/output-projection.d.ts +12 -0
- package/src/inference/pipelines/text/attention/output-projection.js +8 -0
- package/src/inference/pipelines/text/attention/projections.d.ts +13 -1
- package/src/inference/pipelines/text/attention/projections.js +54 -13
- package/src/inference/pipelines/text/attention/record.js +16 -6
- package/src/inference/pipelines/text/attention/run.js +59 -6
- package/src/inference/pipelines/text/config.d.ts +1 -0
- package/src/inference/pipelines/text/config.js +46 -4
- package/src/inference/pipelines/text/embed.js +26 -7
- package/src/inference/pipelines/text/execution-plan.js +5 -4
- package/src/inference/pipelines/text/execution-v0-runtime-builders.js +10 -3
- package/src/inference/pipelines/text/execution-v0.js +12 -1
- package/src/inference/pipelines/text/generator-helpers.js +1 -0
- package/src/inference/pipelines/text/generator-runtime.js +19 -0
- package/src/inference/pipelines/text/generator-steps.d.ts +15 -0
- package/src/inference/pipelines/text/generator-steps.js +71 -26
- package/src/inference/pipelines/text/generator.d.ts +5 -0
- package/src/inference/pipelines/text/generator.js +353 -166
- package/src/inference/pipelines/text/init.d.ts +15 -0
- package/src/inference/pipelines/text/init.js +35 -10
- package/src/inference/pipelines/text/layer.js +38 -8
- package/src/inference/pipelines/text/linear-attention.d.ts +5 -0
- package/src/inference/pipelines/text/linear-attention.js +33 -3
- package/src/inference/pipelines/text/logits/gpu.js +2 -2
- package/src/inference/pipelines/text/logits/index.d.ts +6 -1
- package/src/inference/pipelines/text/logits/index.js +3 -1
- package/src/inference/pipelines/text/model-load.js +3 -0
- package/src/inference/pipelines/text/moe-gpu.js +21 -3
- package/src/inference/pipelines/text/moe-shape-validator.d.ts +9 -0
- package/src/inference/pipelines/text/moe-shape-validator.js +31 -11
- package/src/inference/pipelines/text/ops.js +123 -53
- package/src/inference/pipelines/text/probes.js +1 -0
- package/src/inference/pipelines/text/sampling.js +52 -6
- package/src/inference/pipelines/text/state.js +2 -0
- package/src/inference/pipelines/text.d.ts +5 -0
- package/src/inference/pipelines/text.js +59 -1
- package/src/inference/pipelines/vision/encoder.js +386 -0
- package/src/inference/pipelines/vision/image-preprocess.js +151 -0
- package/src/inference/pipelines/vision/index.js +173 -0
- package/src/inference/pipelines/vision/ops.js +78 -0
- package/src/inference/pipelines/vision/patch-embed.js +151 -0
- package/src/inference/test-harness.js +11 -9
- package/src/loader/doppler-loader.d.ts +3 -0
- package/src/loader/doppler-loader.js +20 -3
- package/src/loader/experts/expert-cache.js +6 -2
- package/src/loader/experts/expert-loader.js +6 -2
- package/src/loader/final-weights-loader.js +2 -0
- package/src/loader/layer-loader.js +42 -3
- package/src/loader/manifest-config.js +3 -1
- package/src/loader/shard-cache.js +3 -2
- package/src/loader/tensors/tensor-loader.d.ts +3 -0
- package/src/loader/tensors/tensor-loader.js +130 -4
- package/src/rules/inference/dtype.rules.json +5 -0
- package/src/rules/inference/kernel-path.rules.json +2 -2
- package/src/rules/kernels/moe.rules.mixtral.json +75 -0
- package/src/rules/kernels/softmax.rules.json +2 -0
- package/src/rules/kernels/split-qg.rules.json +6 -0
- package/src/rules/rule-registry.d.ts +1 -0
- package/src/rules/rule-registry.js +4 -0
- package/src/storage/downloader.js +2 -1
- package/src/storage/quickstart-downloader.d.ts +3 -0
- package/src/storage/quickstart-downloader.js +27 -30
- package/src/storage/shard-manager.js +4 -3
- package/src/tooling/conversion-config-materializer.js +3 -5
- package/src/tooling/node-converter.js +28 -7
- package/src/tooling/node-source-runtime.js +65 -5
- package/src/tooling/node-webgpu.js +24 -7
- package/src/types/model.d.ts +5 -0
- package/src/utils/hf-resolve-url.d.ts +16 -0
- package/src/utils/hf-resolve-url.js +17 -0
- package/src/version.js +1 -1
- package/tools/doppler-cli.js +6 -1
- package/src/tooling/node-convert.d.ts +0 -54
|
@@ -9,6 +9,8 @@ const platformCache = new Map();
|
|
|
9
9
|
|
|
10
10
|
let platformsBaseUrl = null;
|
|
11
11
|
|
|
12
|
+
const DEFAULT_PREFER_UNIFIED_MEMORY = false;
|
|
13
|
+
|
|
12
14
|
const PLATFORM_FILES = [
|
|
13
15
|
'apple-m3',
|
|
14
16
|
'apple-m2',
|
|
@@ -131,7 +133,7 @@ export function getMemoryHints() {
|
|
|
131
133
|
}
|
|
132
134
|
|
|
133
135
|
export function prefersUnifiedMemory() {
|
|
134
|
-
return getMemoryHints()?.preferUnifiedMemory ??
|
|
136
|
+
return getMemoryHints()?.preferUnifiedMemory ?? DEFAULT_PREFER_UNIFIED_MEMORY;
|
|
135
137
|
}
|
|
136
138
|
|
|
137
139
|
export function getBufferAlignment() {
|
|
@@ -8,19 +8,19 @@
|
|
|
8
8
|
"decode": {
|
|
9
9
|
"steps": [
|
|
10
10
|
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
11
|
-
{ "op": "q_proj", "kernel": "
|
|
12
|
-
{ "op": "k_proj", "kernel": "
|
|
13
|
-
{ "op": "v_proj", "kernel": "
|
|
11
|
+
{ "op": "q_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
12
|
+
{ "op": "k_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
13
|
+
{ "op": "v_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
14
14
|
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
15
15
|
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
16
16
|
{ "op": "attention", "kernel": "attention_decode_chunked_f16kv.wgsl", "entry": "main" },
|
|
17
|
-
{ "op": "o_proj", "kernel": "
|
|
17
|
+
{ "op": "o_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
18
18
|
{ "op": "attn_residual","kernel": "residual.wgsl", "entry": "main" },
|
|
19
19
|
{ "op": "post_attn_norm","kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
20
|
-
{ "op": "gate_proj", "kernel": "
|
|
21
|
-
{ "op": "up_proj", "kernel": "
|
|
20
|
+
{ "op": "gate_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
21
|
+
{ "op": "up_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
22
22
|
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
23
|
-
{ "op": "down_proj", "kernel": "
|
|
23
|
+
{ "op": "down_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
24
24
|
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
25
25
|
]
|
|
26
26
|
},
|
|
@@ -28,19 +28,19 @@
|
|
|
28
28
|
"prefill": {
|
|
29
29
|
"steps": [
|
|
30
30
|
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
31
|
-
{ "op": "q_proj", "kernel": "
|
|
32
|
-
{ "op": "k_proj", "kernel": "
|
|
33
|
-
{ "op": "v_proj", "kernel": "
|
|
31
|
+
{ "op": "q_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
32
|
+
{ "op": "k_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
33
|
+
{ "op": "v_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
34
34
|
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
35
35
|
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
36
36
|
{ "op": "attention", "kernel": "attention_streaming_f16kv.wgsl", "entry": "main" },
|
|
37
|
-
{ "op": "o_proj", "kernel": "
|
|
37
|
+
{ "op": "o_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
38
38
|
{ "op": "attn_residual","kernel": "residual.wgsl", "entry": "main" },
|
|
39
39
|
{ "op": "post_attn_norm","kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
40
|
-
{ "op": "gate_proj", "kernel": "
|
|
41
|
-
{ "op": "up_proj", "kernel": "
|
|
40
|
+
{ "op": "gate_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
41
|
+
{ "op": "up_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
42
42
|
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
43
|
-
{ "op": "down_proj", "kernel": "
|
|
43
|
+
{ "op": "down_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
44
44
|
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
45
45
|
]
|
|
46
46
|
},
|
|
@@ -51,8 +51,8 @@
|
|
|
51
51
|
|
|
52
52
|
"postLayer": [
|
|
53
53
|
{ "op": "final_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
54
|
-
{ "op": "lm_head", "kernel": "matmul_f16w_f32a.wgsl",
|
|
55
|
-
{ "op": "lm_head_prefill", "kernel": "
|
|
54
|
+
{ "op": "lm_head", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "lm_head" },
|
|
55
|
+
{ "op": "lm_head_prefill", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "lm_head" }
|
|
56
56
|
],
|
|
57
57
|
|
|
58
58
|
"sampling": [
|
|
@@ -28,19 +28,19 @@
|
|
|
28
28
|
"prefill": {
|
|
29
29
|
"steps": [
|
|
30
30
|
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
31
|
-
{ "op": "q_proj", "kernel": "
|
|
32
|
-
{ "op": "k_proj", "kernel": "
|
|
33
|
-
{ "op": "v_proj", "kernel": "
|
|
31
|
+
{ "op": "q_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
32
|
+
{ "op": "k_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
33
|
+
{ "op": "v_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
34
34
|
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
35
35
|
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
36
36
|
{ "op": "attention", "kernel": "attention_streaming_f16kv.wgsl", "entry": "main" },
|
|
37
|
-
{ "op": "o_proj", "kernel": "
|
|
37
|
+
{ "op": "o_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
38
38
|
{ "op": "attn_residual", "kernel": "residual.wgsl", "entry": "main" },
|
|
39
39
|
{ "op": "post_attn_norm","kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
40
|
-
{ "op": "gate_proj", "kernel": "
|
|
41
|
-
{ "op": "up_proj", "kernel": "
|
|
40
|
+
{ "op": "gate_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
41
|
+
{ "op": "up_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
42
42
|
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
43
|
-
{ "op": "down_proj", "kernel": "
|
|
43
|
+
{ "op": "down_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
44
44
|
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
45
45
|
]
|
|
46
46
|
},
|
|
@@ -52,7 +52,7 @@
|
|
|
52
52
|
"postLayer": [
|
|
53
53
|
{ "op": "final_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
54
54
|
{ "op": "lm_head", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_multicol", "weights": "lm_head", "constants": { "MULTICOL_COLS_PER_WG": 64, "MULTICOL_THREADS_PER_COL": 4 } },
|
|
55
|
-
{ "op": "lm_head_prefill", "kernel": "
|
|
55
|
+
{ "op": "lm_head_prefill", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "lm_head" }
|
|
56
56
|
],
|
|
57
57
|
|
|
58
58
|
"sampling": [
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "gemma3-q4k-dequant-f32a-small-attn",
|
|
3
|
+
"name": "Gemma 3 Q4K Dequant (F32 activations, small-attn prefill)",
|
|
4
|
+
"description": "Q4K dequantized to F16 with F32 activations. Same as gemma3-q4k-dequant-f32a-online but uses attention_small_f16kv.wgsl for prefill (diagnostic variant).",
|
|
5
|
+
"activationDtype": "f32",
|
|
6
|
+
"kvDtype": "f16",
|
|
7
|
+
|
|
8
|
+
"decode": {
|
|
9
|
+
"steps": [
|
|
10
|
+
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
11
|
+
{ "op": "q_proj", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_vec4", "weights": "layer.{L}.self_attn.q_proj" },
|
|
12
|
+
{ "op": "k_proj", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_vec4", "weights": "layer.{L}.self_attn.k_proj" },
|
|
13
|
+
{ "op": "v_proj", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_vec4", "weights": "layer.{L}.self_attn.v_proj" },
|
|
14
|
+
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
15
|
+
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
16
|
+
{ "op": "attention", "kernel": "attention_decode_online_f16kv.wgsl", "entry": "main" },
|
|
17
|
+
{ "op": "o_proj", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_vec4", "weights": "layer.{L}.self_attn.o_proj" },
|
|
18
|
+
{ "op": "attn_residual", "kernel": "residual.wgsl", "entry": "main" },
|
|
19
|
+
{ "op": "post_attn_norm","kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
20
|
+
{ "op": "gate_proj", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_vec4", "weights": "layer.{L}.mlp.gate_proj" },
|
|
21
|
+
{ "op": "up_proj", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_vec4", "weights": "layer.{L}.mlp.up_proj" },
|
|
22
|
+
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
23
|
+
{ "op": "down_proj", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_vec4", "weights": "layer.{L}.mlp.down_proj" },
|
|
24
|
+
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
25
|
+
]
|
|
26
|
+
},
|
|
27
|
+
|
|
28
|
+
"prefill": {
|
|
29
|
+
"steps": [
|
|
30
|
+
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
31
|
+
{ "op": "q_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
32
|
+
{ "op": "k_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
33
|
+
{ "op": "v_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
34
|
+
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
35
|
+
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
36
|
+
{ "op": "attention", "kernel": "attention_small_f16kv.wgsl", "entry": "main" },
|
|
37
|
+
{ "op": "o_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
38
|
+
{ "op": "attn_residual", "kernel": "residual.wgsl", "entry": "main" },
|
|
39
|
+
{ "op": "post_attn_norm","kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
40
|
+
{ "op": "gate_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
41
|
+
{ "op": "up_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
42
|
+
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
43
|
+
{ "op": "down_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
44
|
+
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
45
|
+
]
|
|
46
|
+
},
|
|
47
|
+
|
|
48
|
+
"preLayer": [
|
|
49
|
+
{ "op": "embed", "kernel": "gather_f16.wgsl", "entry": "main", "weights": "embed_tokens" }
|
|
50
|
+
],
|
|
51
|
+
|
|
52
|
+
"postLayer": [
|
|
53
|
+
{ "op": "final_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
54
|
+
{ "op": "lm_head", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_multicol", "weights": "lm_head", "constants": { "MULTICOL_COLS_PER_WG": 64, "MULTICOL_THREADS_PER_COL": 4 } },
|
|
55
|
+
{ "op": "lm_head_prefill", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "lm_head" }
|
|
56
|
+
],
|
|
57
|
+
|
|
58
|
+
"sampling": [
|
|
59
|
+
{ "op": "sample", "kernel": "sample.wgsl", "entry": "sample_single_pass" }
|
|
60
|
+
]
|
|
61
|
+
}
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "gemma3-q4k-dequant-f32w-f32a-online",
|
|
3
|
+
"name": "Gemma 3 Q4K Dequant (F32 projection weights, F32 activations, online decode)",
|
|
4
|
+
"description": "Q4K projection weights dequantized to F32 with F32 activations. Tied embeddings and LM head stay on the native F16 path. Decode uses online attention; prefill uses streaming attention.",
|
|
5
|
+
"activationDtype": "f32",
|
|
6
|
+
"kvDtype": "f16",
|
|
7
|
+
"decode": {
|
|
8
|
+
"steps": [
|
|
9
|
+
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
10
|
+
{ "op": "q_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
11
|
+
{ "op": "k_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
12
|
+
{ "op": "v_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
13
|
+
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
14
|
+
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
15
|
+
{ "op": "attention", "kernel": "attention_decode_online_f16kv.wgsl", "entry": "main" },
|
|
16
|
+
{ "op": "o_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
17
|
+
{ "op": "attn_residual", "kernel": "residual.wgsl", "entry": "main" },
|
|
18
|
+
{ "op": "post_attn_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
19
|
+
{ "op": "gate_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
20
|
+
{ "op": "up_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
21
|
+
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
22
|
+
{ "op": "down_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
23
|
+
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
24
|
+
]
|
|
25
|
+
},
|
|
26
|
+
"prefill": {
|
|
27
|
+
"steps": [
|
|
28
|
+
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
29
|
+
{ "op": "q_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
30
|
+
{ "op": "k_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
31
|
+
{ "op": "v_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
32
|
+
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
33
|
+
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
34
|
+
{ "op": "attention", "kernel": "attention_streaming_f16kv.wgsl", "entry": "main" },
|
|
35
|
+
{ "op": "o_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
36
|
+
{ "op": "attn_residual", "kernel": "residual.wgsl", "entry": "main" },
|
|
37
|
+
{ "op": "post_attn_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
38
|
+
{ "op": "gate_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
39
|
+
{ "op": "up_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
40
|
+
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
41
|
+
{ "op": "down_proj", "kernel": "matmul_f32.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
42
|
+
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
43
|
+
]
|
|
44
|
+
},
|
|
45
|
+
"preLayer": [
|
|
46
|
+
{ "op": "embed", "kernel": "gather_f16.wgsl", "entry": "main", "weights": "embed_tokens" }
|
|
47
|
+
],
|
|
48
|
+
"postLayer": [
|
|
49
|
+
{ "op": "final_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
50
|
+
{ "op": "lm_head", "kernel": "matmul_gemv_subgroup.wgsl", "entry": "main_multicol", "weights": "lm_head", "constants": { "MULTICOL_COLS_PER_WG": 64, "MULTICOL_THREADS_PER_COL": 4 } },
|
|
51
|
+
{ "op": "lm_head_prefill", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "lm_head" }
|
|
52
|
+
],
|
|
53
|
+
"sampling": [
|
|
54
|
+
{ "op": "sample", "kernel": "sample.wgsl", "entry": "sample_single_pass" }
|
|
55
|
+
]
|
|
56
|
+
}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "lfm2-q4k-dequant-f32a-nosubgroups",
|
|
3
|
+
"name": "LFM2 Q4K Dequant (F32 activations, no subgroups)",
|
|
4
|
+
"description": "Subgroup-free LFM2 Q4K path: F32 activations with tiled prefill matmul and small-kernel prefill attention. Still requires shader-f16 kernels.",
|
|
5
|
+
"activationDtype": "f32",
|
|
6
|
+
"kvDtype": "f16",
|
|
7
|
+
|
|
8
|
+
"decode": {
|
|
9
|
+
"steps": [
|
|
10
|
+
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
11
|
+
{ "op": "q_proj", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
12
|
+
{ "op": "k_proj", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
13
|
+
{ "op": "v_proj", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
14
|
+
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
15
|
+
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
16
|
+
{ "op": "attention", "kernel": "attention_decode_chunked_f16kv.wgsl", "entry": "main" },
|
|
17
|
+
{ "op": "o_proj", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
18
|
+
{ "op": "attn_residual", "kernel": "residual.wgsl", "entry": "main" },
|
|
19
|
+
{ "op": "post_attn_norm","kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
20
|
+
{ "op": "gate_proj", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
21
|
+
{ "op": "up_proj", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
22
|
+
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
23
|
+
{ "op": "down_proj", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
24
|
+
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
25
|
+
]
|
|
26
|
+
},
|
|
27
|
+
|
|
28
|
+
"prefill": {
|
|
29
|
+
"steps": [
|
|
30
|
+
{ "op": "input_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
31
|
+
{ "op": "q_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.q_proj" },
|
|
32
|
+
{ "op": "k_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.k_proj" },
|
|
33
|
+
{ "op": "v_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.v_proj" },
|
|
34
|
+
{ "op": "rope_q", "kernel": "rope.wgsl", "entry": "main" },
|
|
35
|
+
{ "op": "rope_k", "kernel": "rope.wgsl", "entry": "main" },
|
|
36
|
+
{ "op": "attention", "kernel": "attention_small_f16kv.wgsl", "entry": "main" },
|
|
37
|
+
{ "op": "o_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.self_attn.o_proj" },
|
|
38
|
+
{ "op": "attn_residual", "kernel": "residual.wgsl", "entry": "main" },
|
|
39
|
+
{ "op": "post_attn_norm","kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
40
|
+
{ "op": "gate_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.gate_proj" },
|
|
41
|
+
{ "op": "up_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.up_proj" },
|
|
42
|
+
{ "op": "activation", "kernel": "gelu.wgsl", "entry": "main", "constants": { "HAS_GATE": true } },
|
|
43
|
+
{ "op": "down_proj", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "layer.{L}.mlp.down_proj" },
|
|
44
|
+
{ "op": "ffn_residual", "kernel": "residual.wgsl", "entry": "main" }
|
|
45
|
+
]
|
|
46
|
+
},
|
|
47
|
+
|
|
48
|
+
"preLayer": [
|
|
49
|
+
{ "op": "embed", "kernel": "gather_f16.wgsl", "entry": "main", "weights": "embed_tokens" }
|
|
50
|
+
],
|
|
51
|
+
|
|
52
|
+
"postLayer": [
|
|
53
|
+
{ "op": "final_norm", "kernel": "rmsnorm.wgsl", "entry": "main" },
|
|
54
|
+
{ "op": "lm_head", "kernel": "matmul_f16w_f32a.wgsl", "entry": "main", "weights": "lm_head" },
|
|
55
|
+
{ "op": "lm_head_prefill", "kernel": "matmul_f16w_f32a_tiled.wgsl", "entry": "main", "weights": "lm_head" }
|
|
56
|
+
],
|
|
57
|
+
|
|
58
|
+
"sampling": [
|
|
59
|
+
{ "op": "sample", "kernel": "sample.wgsl", "entry": "sample_single_pass" }
|
|
60
|
+
]
|
|
61
|
+
}
|
|
@@ -92,6 +92,20 @@
|
|
|
92
92
|
"statusReason": "default",
|
|
93
93
|
"notes": "Gemma 3 Q4K dequant default: subgroup GEMV + online attention + tuned lm_head multicol, F32 activations."
|
|
94
94
|
},
|
|
95
|
+
{
|
|
96
|
+
"id": "gemma3-q4k-dequant-f32a-small-attn",
|
|
97
|
+
"file": "gemma3-q4k-dequant-f32a-small-attn.json",
|
|
98
|
+
"status": "experimental",
|
|
99
|
+
"statusReason": "diagnostic-probe",
|
|
100
|
+
"notes": "Diagnostic: same as gemma3-q4k-dequant-f32a-online but uses attention_small_f16kv.wgsl for prefill to isolate streaming attention bug."
|
|
101
|
+
},
|
|
102
|
+
{
|
|
103
|
+
"id": "gemma3-q4k-dequant-f32w-f32a-online",
|
|
104
|
+
"file": "gemma3-q4k-dequant-f32w-f32a-online.json",
|
|
105
|
+
"status": "experimental",
|
|
106
|
+
"statusReason": "accuracy-probe",
|
|
107
|
+
"notes": "Gemma 3 Q4K dequant path that keeps matmul weights in F32 and runs F32 matmul kernels for numeric-sensitivity debugging."
|
|
108
|
+
},
|
|
95
109
|
{
|
|
96
110
|
"id": "lfm2-q4k-dequant-f32a-online",
|
|
97
111
|
"file": "lfm2-q4k-dequant-f32a-online.json",
|
|
@@ -99,6 +113,13 @@
|
|
|
99
113
|
"statusReason": "default",
|
|
100
114
|
"notes": "LFM2 Q4K default: subgroup GEMV decode with tiled fast-prefill path and F32 activations."
|
|
101
115
|
},
|
|
116
|
+
{
|
|
117
|
+
"id": "lfm2-q4k-dequant-f32a-nosubgroups",
|
|
118
|
+
"file": "lfm2-q4k-dequant-f32a-nosubgroups.json",
|
|
119
|
+
"status": "canonical",
|
|
120
|
+
"statusReason": "subgroup-free",
|
|
121
|
+
"notes": "Subgroup-free LFM2 Q4K dequant path with F32 activations and tiled prefill. Still requires shader-f16 kernels."
|
|
122
|
+
},
|
|
102
123
|
{
|
|
103
124
|
"id": "embeddinggemma-f16-f32a",
|
|
104
125
|
"file": "embeddinggemma-f16-f32a.json",
|
|
@@ -9,6 +9,7 @@
|
|
|
9
9
|
|
|
10
10
|
"inference": {
|
|
11
11
|
"attention": {
|
|
12
|
+
"queryPreAttnScalar": 256,
|
|
12
13
|
"slidingWindow": 4096,
|
|
13
14
|
"attnLogitSoftcapping": 50.0,
|
|
14
15
|
"queryKeyNorm": false
|
|
@@ -40,7 +41,7 @@
|
|
|
40
41
|
"f32": "gemma2-f16-f32a"
|
|
41
42
|
},
|
|
42
43
|
"q4k": {
|
|
43
|
-
"f16": "gemma2-q4k-dequant-
|
|
44
|
+
"f16": "gemma2-q4k-dequant-f16a",
|
|
44
45
|
"f32": "gemma2-q4k-dequant-f32a-nosubgroups"
|
|
45
46
|
}
|
|
46
47
|
}
|
|
@@ -8,7 +8,9 @@
|
|
|
8
8
|
},
|
|
9
9
|
"inference": {
|
|
10
10
|
"attention": {
|
|
11
|
+
"queryPreAttnScalar": 256,
|
|
11
12
|
"attnLogitSoftcapping": null,
|
|
13
|
+
"slidingWindow": 512,
|
|
12
14
|
"queryKeyNorm": true
|
|
13
15
|
},
|
|
14
16
|
"normalization": {
|
|
@@ -32,7 +34,8 @@
|
|
|
32
34
|
},
|
|
33
35
|
"rope": {
|
|
34
36
|
"ropeTheta": 1000000,
|
|
35
|
-
"ropeLocalTheta": 10000
|
|
37
|
+
"ropeLocalTheta": 10000,
|
|
38
|
+
"ropeLocalScalingFactor": 1.0
|
|
36
39
|
},
|
|
37
40
|
"chatTemplate": {
|
|
38
41
|
"type": "gemma",
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "gemma4",
|
|
3
|
+
"name": "Gemma 4",
|
|
4
|
+
"extends": "gemma3",
|
|
5
|
+
"modelType": "mixtral",
|
|
6
|
+
|
|
7
|
+
"inference": {
|
|
8
|
+
"attention": {
|
|
9
|
+
"slidingWindow": 1024
|
|
10
|
+
},
|
|
11
|
+
"rope": {
|
|
12
|
+
"ropeTheta": 1000000,
|
|
13
|
+
"ropeLocalTheta": 10000,
|
|
14
|
+
"ropeScalingType": "yarn",
|
|
15
|
+
"ropeScalingFactor": 8.0,
|
|
16
|
+
"yarnBetaFast": 4.0,
|
|
17
|
+
"yarnBetaSlow": 1.0,
|
|
18
|
+
"yarnOriginalMaxPos": 32768
|
|
19
|
+
},
|
|
20
|
+
"moe": {
|
|
21
|
+
"kernelProfileId": "mixtral-moe-v1",
|
|
22
|
+
"numExperts": 8,
|
|
23
|
+
"topK": 2,
|
|
24
|
+
"numSharedExperts": 0,
|
|
25
|
+
"routerDtype": "f32",
|
|
26
|
+
"supportedActivationDtypes": ["f16", "f32"],
|
|
27
|
+
"preferredActivationDtype": "f32",
|
|
28
|
+
"tensorPattern": "mixtral"
|
|
29
|
+
},
|
|
30
|
+
"kernelPaths": {
|
|
31
|
+
"q4k": {
|
|
32
|
+
"default": "gemma3-q4k-dequant-f32a-online",
|
|
33
|
+
"f16": "gemma3-q4k-dequant-f16a-online",
|
|
34
|
+
"f16a": "gemma3-q4k-dequant-f16a-online",
|
|
35
|
+
"f32": "gemma3-q4k-dequant-f32a-online"
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
},
|
|
39
|
+
|
|
40
|
+
"tensorPatterns": {
|
|
41
|
+
"ffn": {
|
|
42
|
+
"gate": ["layers.{layer}.block_sparse_moe.experts.{expert}.w1.weight"],
|
|
43
|
+
"up": ["layers.{layer}.block_sparse_moe.experts.{expert}.w3.weight"],
|
|
44
|
+
"down": ["layers.{layer}.block_sparse_moe.experts.{expert}.w2.weight"]
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
|
|
48
|
+
"detection": {
|
|
49
|
+
"architecturePatterns": [
|
|
50
|
+
"gemma4",
|
|
51
|
+
"Gemma4ForCausalLM",
|
|
52
|
+
"Gemma4ForConditionalGeneration",
|
|
53
|
+
"gemma-4"
|
|
54
|
+
],
|
|
55
|
+
"modelTypePatterns": [
|
|
56
|
+
"gemma4",
|
|
57
|
+
"gemma4_text",
|
|
58
|
+
"gemma4_moe"
|
|
59
|
+
]
|
|
60
|
+
}
|
|
61
|
+
}
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "granite-docling",
|
|
3
|
+
"name": "Granite-Docling (Document OCR VLM)",
|
|
4
|
+
"extends": "transformer",
|
|
5
|
+
"modelType": "ocr",
|
|
6
|
+
|
|
7
|
+
"_notes": "Stabilized successor to SmolDocling-256M-preview. Requires full multimodal pipeline: SigLIP vision encoder, Idefics3-style image-token merge, pixel-shuffle connector, SmolLM2 decoder, DocTags output parsing. This preset covers the decoder config only — vision encoder and connector are separate pipeline stages not yet implemented in Doppler.",
|
|
8
|
+
|
|
9
|
+
"architecture": {
|
|
10
|
+
"headDim": 64,
|
|
11
|
+
"ropeTheta": 10000,
|
|
12
|
+
"visionEncoder": {
|
|
13
|
+
"type": "siglip_b16",
|
|
14
|
+
"patchSize": 16,
|
|
15
|
+
"imageSize": 512,
|
|
16
|
+
"hiddenSize": 768,
|
|
17
|
+
"numLayers": 12,
|
|
18
|
+
"numHeads": 12,
|
|
19
|
+
"parameterCount": 93000000,
|
|
20
|
+
"_note": "SigLIP base patch-16/512 backbone. Requires dedicated vision encoder pipeline in Doppler."
|
|
21
|
+
},
|
|
22
|
+
"connector": {
|
|
23
|
+
"type": "mlp_pixel_shuffle",
|
|
24
|
+
"downsampleFactor": 2,
|
|
25
|
+
"_note": "Idefics3/SmolVLM-style projection. Maps vision tokens to decoder embedding space."
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
|
|
29
|
+
"inference": {
|
|
30
|
+
"attention": {
|
|
31
|
+
"queryKeyNorm": false,
|
|
32
|
+
"causal": true
|
|
33
|
+
},
|
|
34
|
+
"normalization": {
|
|
35
|
+
"rmsNormWeightOffset": false,
|
|
36
|
+
"rmsNormEps": 1e-5
|
|
37
|
+
},
|
|
38
|
+
"ffn": {
|
|
39
|
+
"activation": "silu"
|
|
40
|
+
},
|
|
41
|
+
"output": {
|
|
42
|
+
"scaleEmbeddings": false,
|
|
43
|
+
"tieWordEmbeddings": true
|
|
44
|
+
},
|
|
45
|
+
"chatTemplate": {
|
|
46
|
+
"enabled": false
|
|
47
|
+
},
|
|
48
|
+
"kernelPaths": {
|
|
49
|
+
"q4k": {
|
|
50
|
+
"f16": "granite-docling-q4k-dequant-f32a",
|
|
51
|
+
"f32": "granite-docling-q4k-dequant-f32a"
|
|
52
|
+
},
|
|
53
|
+
"f16": {
|
|
54
|
+
"f16": "granite-docling-f16-f32a",
|
|
55
|
+
"f32": "granite-docling-f16-f32a"
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
},
|
|
59
|
+
|
|
60
|
+
"tokenizer": {
|
|
61
|
+
"bosToken": "<|endoftext|>",
|
|
62
|
+
"eosTokens": ["<|endoftext|>", "<|im_end|>"],
|
|
63
|
+
"addBosToken": true
|
|
64
|
+
},
|
|
65
|
+
|
|
66
|
+
"detection": {
|
|
67
|
+
"architecturePatterns": ["granite-docling", "GraniteDocling", "smoldocling", "SmolDocling", "SmolVLM"],
|
|
68
|
+
"modelTypePatterns": ["granite-docling", "smoldocling", "smolvlm"]
|
|
69
|
+
}
|
|
70
|
+
}
|
|
@@ -39,11 +39,16 @@
|
|
|
39
39
|
"period": null,
|
|
40
40
|
"offset": null,
|
|
41
41
|
"layerTypes": null
|
|
42
|
+
},
|
|
43
|
+
"chatTemplate": {
|
|
44
|
+
"type": "chatml",
|
|
45
|
+
"enabled": true
|
|
42
46
|
}
|
|
43
47
|
},
|
|
44
48
|
|
|
45
49
|
"tokenizer": {
|
|
46
|
-
"
|
|
50
|
+
"bosTokenId": 1,
|
|
51
|
+
"addBosToken": true,
|
|
47
52
|
"addEosToken": false
|
|
48
53
|
},
|
|
49
54
|
|
|
@@ -10,7 +10,8 @@
|
|
|
10
10
|
"inference": {
|
|
11
11
|
"attention": {
|
|
12
12
|
"slidingWindow": null,
|
|
13
|
-
"queryKeyNorm": true
|
|
13
|
+
"queryKeyNorm": true,
|
|
14
|
+
"attentionOutputGate": true
|
|
14
15
|
},
|
|
15
16
|
"output": {
|
|
16
17
|
"scaleEmbeddings": false
|
|
@@ -39,8 +40,8 @@
|
|
|
39
40
|
},
|
|
40
41
|
|
|
41
42
|
"detection": {
|
|
42
|
-
"architecturePatterns": ["qwen3", "
|
|
43
|
-
"modelTypePatterns": ["
|
|
43
|
+
"architecturePatterns": ["qwen3", "Qwen3ForCausalLM", "Qwen2ForCausalLM"],
|
|
44
|
+
"modelTypePatterns": ["qwen3", "qwen2"],
|
|
44
45
|
"configPatterns": {
|
|
45
46
|
"model_type": "qwen2"
|
|
46
47
|
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "qwen3_5",
|
|
3
|
+
"name": "Qwen 3.5",
|
|
4
|
+
"extends": "qwen3",
|
|
5
|
+
|
|
6
|
+
"inference": {
|
|
7
|
+
"normalization": {
|
|
8
|
+
"rmsNormWeightOffset": true
|
|
9
|
+
}
|
|
10
|
+
},
|
|
11
|
+
|
|
12
|
+
"detection": {
|
|
13
|
+
"architecturePatterns": ["qwen3_5", "Qwen3_5ForCausalLM", "Qwen3_5ForConditionalGeneration"],
|
|
14
|
+
"modelTypePatterns": ["qwen3_5", "qwen3_5_text"]
|
|
15
|
+
}
|
|
16
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
{
|
|
2
|
+
"id": "qwen3_vl",
|
|
3
|
+
"name": "Qwen 3 VL",
|
|
4
|
+
"extends": "qwen3",
|
|
5
|
+
|
|
6
|
+
"architecture": {
|
|
7
|
+
"ropeTheta": 5000000
|
|
8
|
+
},
|
|
9
|
+
|
|
10
|
+
"inference": {
|
|
11
|
+
"normalization": {
|
|
12
|
+
"rmsNormWeightOffset": false
|
|
13
|
+
},
|
|
14
|
+
"rope": {
|
|
15
|
+
"ropeTheta": 5000000,
|
|
16
|
+
"mropeInterleaved": true,
|
|
17
|
+
"mropeSection": [24, 20, 20],
|
|
18
|
+
"partialRotaryFactor": null
|
|
19
|
+
}
|
|
20
|
+
},
|
|
21
|
+
|
|
22
|
+
"vision": {
|
|
23
|
+
"patchSize": 16,
|
|
24
|
+
"spatialMergeSize": 2,
|
|
25
|
+
"temporalPatchSize": 2,
|
|
26
|
+
"eps": 1e-6,
|
|
27
|
+
"minPixels": 3136,
|
|
28
|
+
"maxPixels": 1003520,
|
|
29
|
+
"projectorType": "spatial_merge",
|
|
30
|
+
"normalization": {
|
|
31
|
+
"mean": [0.48145466, 0.4578275, 0.40821073],
|
|
32
|
+
"std": [0.26862954, 0.26130258, 0.27577711]
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
|
|
36
|
+
"detection": {
|
|
37
|
+
"architecturePatterns": ["qwen3_vl", "Qwen3VLForConditionalGeneration"],
|
|
38
|
+
"modelTypePatterns": ["qwen3_vl"]
|
|
39
|
+
}
|
|
40
|
+
}
|